virtuelle Umgebung teil20b
This commit is contained in:
94
teil20b/lib/python3.11/site-packages/numpy/lib/__init__.py
Normal file
94
teil20b/lib/python3.11/site-packages/numpy/lib/__init__.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
**Note:** almost all functions in the ``numpy.lib`` namespace
|
||||
are also present in the main ``numpy`` namespace. Please use the
|
||||
functions as ``np.<funcname>`` where possible.
|
||||
|
||||
``numpy.lib`` is mostly a space for implementing functions that don't
|
||||
belong in core or in another NumPy submodule with a clear purpose
|
||||
(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
|
||||
|
||||
Most contains basic functions that are used by several submodules and are
|
||||
useful to have in the main name-space.
|
||||
|
||||
"""
|
||||
|
||||
from numpy.version import version as __version__
|
||||
|
||||
# Public submodules
|
||||
# Note: recfunctions and (maybe) format are public too, but not imported
|
||||
from . import mixins
|
||||
from . import scimath as emath
|
||||
|
||||
# Private submodules
|
||||
# load module names. See https://github.com/networkx/networkx/issues/5838
|
||||
from . import type_check
|
||||
from . import index_tricks
|
||||
from . import function_base
|
||||
from . import nanfunctions
|
||||
from . import shape_base
|
||||
from . import stride_tricks
|
||||
from . import twodim_base
|
||||
from . import ufunclike
|
||||
from . import histograms
|
||||
from . import polynomial
|
||||
from . import utils
|
||||
from . import arraysetops
|
||||
from . import npyio
|
||||
from . import arrayterator
|
||||
from . import arraypad
|
||||
from . import _version
|
||||
|
||||
from .type_check import *
|
||||
from .index_tricks import *
|
||||
from .function_base import *
|
||||
from .nanfunctions import *
|
||||
from .shape_base import *
|
||||
from .stride_tricks import *
|
||||
from .twodim_base import *
|
||||
from .ufunclike import *
|
||||
from .histograms import *
|
||||
|
||||
from .polynomial import *
|
||||
from .utils import *
|
||||
from .arraysetops import *
|
||||
from .npyio import *
|
||||
from .arrayterator import Arrayterator
|
||||
from .arraypad import *
|
||||
from ._version import *
|
||||
from numpy.core._multiarray_umath import tracemalloc_domain
|
||||
|
||||
__all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
|
||||
__all__ += type_check.__all__
|
||||
__all__ += index_tricks.__all__
|
||||
__all__ += function_base.__all__
|
||||
__all__ += shape_base.__all__
|
||||
__all__ += stride_tricks.__all__
|
||||
__all__ += twodim_base.__all__
|
||||
__all__ += ufunclike.__all__
|
||||
__all__ += arraypad.__all__
|
||||
__all__ += polynomial.__all__
|
||||
__all__ += utils.__all__
|
||||
__all__ += arraysetops.__all__
|
||||
__all__ += npyio.__all__
|
||||
__all__ += nanfunctions.__all__
|
||||
__all__ += histograms.__all__
|
||||
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
def __getattr__(attr):
|
||||
# Warn for reprecated attributes
|
||||
import math
|
||||
import warnings
|
||||
|
||||
if attr == 'math':
|
||||
warnings.warn(
|
||||
"`np.lib.math` is a deprecated alias for the standard library "
|
||||
"`math` module (Deprecated Numpy 1.25). Replace usages of "
|
||||
"`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
|
||||
return math
|
||||
else:
|
||||
raise AttributeError("module {!r} has no attribute "
|
||||
"{!r}".format(__name__, attr))
|
||||
|
245
teil20b/lib/python3.11/site-packages/numpy/lib/__init__.pyi
Normal file
245
teil20b/lib/python3.11/site-packages/numpy/lib/__init__.pyi
Normal file
@@ -0,0 +1,245 @@
|
||||
import math as math
|
||||
from typing import Any
|
||||
|
||||
from numpy._pytesttester import PytestTester
|
||||
|
||||
from numpy import (
|
||||
ndenumerate as ndenumerate,
|
||||
ndindex as ndindex,
|
||||
)
|
||||
|
||||
from numpy.version import version
|
||||
|
||||
from numpy.lib import (
|
||||
format as format,
|
||||
mixins as mixins,
|
||||
scimath as scimath,
|
||||
stride_tricks as stride_tricks,
|
||||
)
|
||||
|
||||
from numpy.lib._version import (
|
||||
NumpyVersion as NumpyVersion,
|
||||
)
|
||||
|
||||
from numpy.lib.arraypad import (
|
||||
pad as pad,
|
||||
)
|
||||
|
||||
from numpy.lib.arraysetops import (
|
||||
ediff1d as ediff1d,
|
||||
intersect1d as intersect1d,
|
||||
setxor1d as setxor1d,
|
||||
union1d as union1d,
|
||||
setdiff1d as setdiff1d,
|
||||
unique as unique,
|
||||
in1d as in1d,
|
||||
isin as isin,
|
||||
)
|
||||
|
||||
from numpy.lib.arrayterator import (
|
||||
Arrayterator as Arrayterator,
|
||||
)
|
||||
|
||||
from numpy.lib.function_base import (
|
||||
select as select,
|
||||
piecewise as piecewise,
|
||||
trim_zeros as trim_zeros,
|
||||
copy as copy,
|
||||
iterable as iterable,
|
||||
percentile as percentile,
|
||||
diff as diff,
|
||||
gradient as gradient,
|
||||
angle as angle,
|
||||
unwrap as unwrap,
|
||||
sort_complex as sort_complex,
|
||||
disp as disp,
|
||||
flip as flip,
|
||||
rot90 as rot90,
|
||||
extract as extract,
|
||||
place as place,
|
||||
vectorize as vectorize,
|
||||
asarray_chkfinite as asarray_chkfinite,
|
||||
average as average,
|
||||
bincount as bincount,
|
||||
digitize as digitize,
|
||||
cov as cov,
|
||||
corrcoef as corrcoef,
|
||||
median as median,
|
||||
sinc as sinc,
|
||||
hamming as hamming,
|
||||
hanning as hanning,
|
||||
bartlett as bartlett,
|
||||
blackman as blackman,
|
||||
kaiser as kaiser,
|
||||
trapz as trapz,
|
||||
i0 as i0,
|
||||
add_newdoc as add_newdoc,
|
||||
add_docstring as add_docstring,
|
||||
meshgrid as meshgrid,
|
||||
delete as delete,
|
||||
insert as insert,
|
||||
append as append,
|
||||
interp as interp,
|
||||
add_newdoc_ufunc as add_newdoc_ufunc,
|
||||
quantile as quantile,
|
||||
)
|
||||
|
||||
from numpy.lib.histograms import (
|
||||
histogram_bin_edges as histogram_bin_edges,
|
||||
histogram as histogram,
|
||||
histogramdd as histogramdd,
|
||||
)
|
||||
|
||||
from numpy.lib.index_tricks import (
|
||||
ravel_multi_index as ravel_multi_index,
|
||||
unravel_index as unravel_index,
|
||||
mgrid as mgrid,
|
||||
ogrid as ogrid,
|
||||
r_ as r_,
|
||||
c_ as c_,
|
||||
s_ as s_,
|
||||
index_exp as index_exp,
|
||||
ix_ as ix_,
|
||||
fill_diagonal as fill_diagonal,
|
||||
diag_indices as diag_indices,
|
||||
diag_indices_from as diag_indices_from,
|
||||
)
|
||||
|
||||
from numpy.lib.nanfunctions import (
|
||||
nansum as nansum,
|
||||
nanmax as nanmax,
|
||||
nanmin as nanmin,
|
||||
nanargmax as nanargmax,
|
||||
nanargmin as nanargmin,
|
||||
nanmean as nanmean,
|
||||
nanmedian as nanmedian,
|
||||
nanpercentile as nanpercentile,
|
||||
nanvar as nanvar,
|
||||
nanstd as nanstd,
|
||||
nanprod as nanprod,
|
||||
nancumsum as nancumsum,
|
||||
nancumprod as nancumprod,
|
||||
nanquantile as nanquantile,
|
||||
)
|
||||
|
||||
from numpy.lib.npyio import (
|
||||
savetxt as savetxt,
|
||||
loadtxt as loadtxt,
|
||||
genfromtxt as genfromtxt,
|
||||
recfromtxt as recfromtxt,
|
||||
recfromcsv as recfromcsv,
|
||||
load as load,
|
||||
save as save,
|
||||
savez as savez,
|
||||
savez_compressed as savez_compressed,
|
||||
packbits as packbits,
|
||||
unpackbits as unpackbits,
|
||||
fromregex as fromregex,
|
||||
DataSource as DataSource,
|
||||
)
|
||||
|
||||
from numpy.lib.polynomial import (
|
||||
poly as poly,
|
||||
roots as roots,
|
||||
polyint as polyint,
|
||||
polyder as polyder,
|
||||
polyadd as polyadd,
|
||||
polysub as polysub,
|
||||
polymul as polymul,
|
||||
polydiv as polydiv,
|
||||
polyval as polyval,
|
||||
polyfit as polyfit,
|
||||
RankWarning as RankWarning,
|
||||
poly1d as poly1d,
|
||||
)
|
||||
|
||||
from numpy.lib.shape_base import (
|
||||
column_stack as column_stack,
|
||||
row_stack as row_stack,
|
||||
dstack as dstack,
|
||||
array_split as array_split,
|
||||
split as split,
|
||||
hsplit as hsplit,
|
||||
vsplit as vsplit,
|
||||
dsplit as dsplit,
|
||||
apply_over_axes as apply_over_axes,
|
||||
expand_dims as expand_dims,
|
||||
apply_along_axis as apply_along_axis,
|
||||
kron as kron,
|
||||
tile as tile,
|
||||
get_array_wrap as get_array_wrap,
|
||||
take_along_axis as take_along_axis,
|
||||
put_along_axis as put_along_axis,
|
||||
)
|
||||
|
||||
from numpy.lib.stride_tricks import (
|
||||
broadcast_to as broadcast_to,
|
||||
broadcast_arrays as broadcast_arrays,
|
||||
broadcast_shapes as broadcast_shapes,
|
||||
)
|
||||
|
||||
from numpy.lib.twodim_base import (
|
||||
diag as diag,
|
||||
diagflat as diagflat,
|
||||
eye as eye,
|
||||
fliplr as fliplr,
|
||||
flipud as flipud,
|
||||
tri as tri,
|
||||
triu as triu,
|
||||
tril as tril,
|
||||
vander as vander,
|
||||
histogram2d as histogram2d,
|
||||
mask_indices as mask_indices,
|
||||
tril_indices as tril_indices,
|
||||
tril_indices_from as tril_indices_from,
|
||||
triu_indices as triu_indices,
|
||||
triu_indices_from as triu_indices_from,
|
||||
)
|
||||
|
||||
from numpy.lib.type_check import (
|
||||
mintypecode as mintypecode,
|
||||
asfarray as asfarray,
|
||||
real as real,
|
||||
imag as imag,
|
||||
iscomplex as iscomplex,
|
||||
isreal as isreal,
|
||||
iscomplexobj as iscomplexobj,
|
||||
isrealobj as isrealobj,
|
||||
nan_to_num as nan_to_num,
|
||||
real_if_close as real_if_close,
|
||||
typename as typename,
|
||||
common_type as common_type,
|
||||
)
|
||||
|
||||
from numpy.lib.ufunclike import (
|
||||
fix as fix,
|
||||
isposinf as isposinf,
|
||||
isneginf as isneginf,
|
||||
)
|
||||
|
||||
from numpy.lib.utils import (
|
||||
issubclass_ as issubclass_,
|
||||
issubsctype as issubsctype,
|
||||
issubdtype as issubdtype,
|
||||
deprecate as deprecate,
|
||||
deprecate_with_doc as deprecate_with_doc,
|
||||
get_include as get_include,
|
||||
info as info,
|
||||
source as source,
|
||||
who as who,
|
||||
lookfor as lookfor,
|
||||
byte_bounds as byte_bounds,
|
||||
safe_eval as safe_eval,
|
||||
show_runtime as show_runtime,
|
||||
)
|
||||
|
||||
from numpy.core.multiarray import (
|
||||
tracemalloc_domain as tracemalloc_domain,
|
||||
)
|
||||
|
||||
__all__: list[str]
|
||||
__path__: list[str]
|
||||
test: PytestTester
|
||||
|
||||
__version__ = version
|
||||
emath = scimath
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
704
teil20b/lib/python3.11/site-packages/numpy/lib/_datasource.py
Normal file
704
teil20b/lib/python3.11/site-packages/numpy/lib/_datasource.py
Normal file
@@ -0,0 +1,704 @@
|
||||
"""A file interface for handling local and remote data files.
|
||||
|
||||
The goal of datasource is to abstract some of the file system operations
|
||||
when dealing with data files so the researcher doesn't have to know all the
|
||||
low-level details. Through datasource, a researcher can obtain and use a
|
||||
file with one function call, regardless of location of the file.
|
||||
|
||||
DataSource is meant to augment standard python libraries, not replace them.
|
||||
It should work seamlessly with standard file IO operations and the os
|
||||
module.
|
||||
|
||||
DataSource files can originate locally or remotely:
|
||||
|
||||
- local files : '/home/guido/src/local/data.txt'
|
||||
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
|
||||
|
||||
DataSource files can also be compressed or uncompressed. Currently only
|
||||
gzip, bz2 and xz are supported.
|
||||
|
||||
Example::
|
||||
|
||||
>>> # Create a DataSource, use os.curdir (default) for local storage.
|
||||
>>> from numpy import DataSource
|
||||
>>> ds = DataSource()
|
||||
>>>
|
||||
>>> # Open a remote file.
|
||||
>>> # DataSource downloads the file, stores it locally in:
|
||||
>>> # './www.google.com/index.html'
|
||||
>>> # opens the file and returns a file object.
|
||||
>>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
|
||||
>>>
|
||||
>>> # Use the file as you normally would
|
||||
>>> fp.read() # doctest: +SKIP
|
||||
>>> fp.close() # doctest: +SKIP
|
||||
|
||||
"""
|
||||
import os
|
||||
import io
|
||||
|
||||
from .._utils import set_module
|
||||
|
||||
|
||||
_open = open
|
||||
|
||||
|
||||
def _check_mode(mode, encoding, newline):
|
||||
"""Check mode and that encoding and newline are compatible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : str
|
||||
File open mode.
|
||||
encoding : str
|
||||
File encoding.
|
||||
newline : str
|
||||
Newline for text files.
|
||||
|
||||
"""
|
||||
if "t" in mode:
|
||||
if "b" in mode:
|
||||
raise ValueError("Invalid mode: %r" % (mode,))
|
||||
else:
|
||||
if encoding is not None:
|
||||
raise ValueError("Argument 'encoding' not supported in binary mode")
|
||||
if newline is not None:
|
||||
raise ValueError("Argument 'newline' not supported in binary mode")
|
||||
|
||||
|
||||
# Using a class instead of a module-level dictionary
|
||||
# to reduce the initial 'import numpy' overhead by
|
||||
# deferring the import of lzma, bz2 and gzip until needed
|
||||
|
||||
# TODO: .zip support, .tar support?
|
||||
class _FileOpeners:
|
||||
"""
|
||||
Container for different methods to open (un-)compressed files.
|
||||
|
||||
`_FileOpeners` contains a dictionary that holds one method for each
|
||||
supported file format. Attribute lookup is implemented in such a way
|
||||
that an instance of `_FileOpeners` itself can be indexed with the keys
|
||||
of that dictionary. Currently uncompressed files as well as files
|
||||
compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`_file_openers`, an instance of `_FileOpeners`, is made available for
|
||||
use in the `_datasource` module.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import gzip
|
||||
>>> np.lib._datasource._file_openers.keys()
|
||||
[None, '.bz2', '.gz', '.xz', '.lzma']
|
||||
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
|
||||
True
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._loaded = False
|
||||
self._file_openers = {None: io.open}
|
||||
|
||||
def _load(self):
|
||||
if self._loaded:
|
||||
return
|
||||
|
||||
try:
|
||||
import bz2
|
||||
self._file_openers[".bz2"] = bz2.open
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import gzip
|
||||
self._file_openers[".gz"] = gzip.open
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import lzma
|
||||
self._file_openers[".xz"] = lzma.open
|
||||
self._file_openers[".lzma"] = lzma.open
|
||||
except (ImportError, AttributeError):
|
||||
# There are incompatible backports of lzma that do not have the
|
||||
# lzma.open attribute, so catch that as well as ImportError.
|
||||
pass
|
||||
|
||||
self._loaded = True
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Return the keys of currently supported file openers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
keys : list
|
||||
The keys are None for uncompressed files and the file extension
|
||||
strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
|
||||
methods.
|
||||
|
||||
"""
|
||||
self._load()
|
||||
return list(self._file_openers.keys())
|
||||
|
||||
def __getitem__(self, key):
|
||||
self._load()
|
||||
return self._file_openers[key]
|
||||
|
||||
_file_openers = _FileOpeners()
|
||||
|
||||
def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
|
||||
"""
|
||||
Open `path` with `mode` and return the file object.
|
||||
|
||||
If ``path`` is an URL, it will be downloaded, stored in the
|
||||
`DataSource` `destpath` directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Local file path or URL to open.
|
||||
mode : str, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
|
||||
append. Available modes depend on the type of object specified by
|
||||
path. Default is 'r'.
|
||||
destpath : str, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `io.open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
The opened file.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This is a convenience function that instantiates a `DataSource` and
|
||||
returns the file object from ``DataSource.open(path)``.
|
||||
|
||||
"""
|
||||
|
||||
ds = DataSource(destpath)
|
||||
return ds.open(path, mode, encoding=encoding, newline=newline)
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
class DataSource:
|
||||
"""
|
||||
DataSource(destpath='.')
|
||||
|
||||
A generic data source file (file, http, ftp, ...).
|
||||
|
||||
DataSources can be local files or remote files/URLs. The files may
|
||||
also be compressed or uncompressed. DataSource hides some of the
|
||||
low-level details of downloading the file, allowing you to simply pass
|
||||
in a valid file path (or URL) and obtain a file object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
destpath : str or None, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
URLs require a scheme string (``http://``) to be used, without it they
|
||||
will fail::
|
||||
|
||||
>>> repos = np.DataSource()
|
||||
>>> repos.exists('www.google.com/index.html')
|
||||
False
|
||||
>>> repos.exists('http://www.google.com/index.html')
|
||||
True
|
||||
|
||||
Temporary directories are deleted when the DataSource is deleted.
|
||||
|
||||
Examples
|
||||
--------
|
||||
::
|
||||
|
||||
>>> ds = np.DataSource('/home/guido')
|
||||
>>> urlname = 'http://www.google.com/'
|
||||
>>> gfile = ds.open('http://www.google.com/')
|
||||
>>> ds.abspath(urlname)
|
||||
'/home/guido/www.google.com/index.html'
|
||||
|
||||
>>> ds = np.DataSource(None) # use with temporary file
|
||||
>>> ds.open('/home/guido/foobar.txt')
|
||||
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
|
||||
>>> ds.abspath('/home/guido/foobar.txt')
|
||||
'/tmp/.../home/guido/foobar.txt'
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, destpath=os.curdir):
|
||||
"""Create a DataSource with a local path at destpath."""
|
||||
if destpath:
|
||||
self._destpath = os.path.abspath(destpath)
|
||||
self._istmpdest = False
|
||||
else:
|
||||
import tempfile # deferring import to improve startup time
|
||||
self._destpath = tempfile.mkdtemp()
|
||||
self._istmpdest = True
|
||||
|
||||
def __del__(self):
|
||||
# Remove temp directories
|
||||
if hasattr(self, '_istmpdest') and self._istmpdest:
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(self._destpath)
|
||||
|
||||
def _iszip(self, filename):
|
||||
"""Test if the filename is a zip file by looking at the file extension.
|
||||
|
||||
"""
|
||||
fname, ext = os.path.splitext(filename)
|
||||
return ext in _file_openers.keys()
|
||||
|
||||
def _iswritemode(self, mode):
|
||||
"""Test if the given mode will open a file for writing."""
|
||||
|
||||
# Currently only used to test the bz2 files.
|
||||
_writemodes = ("w", "+")
|
||||
for c in mode:
|
||||
if c in _writemodes:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _splitzipext(self, filename):
|
||||
"""Split zip extension from filename and return filename.
|
||||
|
||||
Returns
|
||||
-------
|
||||
base, zip_ext : {tuple}
|
||||
|
||||
"""
|
||||
|
||||
if self._iszip(filename):
|
||||
return os.path.splitext(filename)
|
||||
else:
|
||||
return filename, None
|
||||
|
||||
def _possible_names(self, filename):
|
||||
"""Return a tuple containing compressed filename variations."""
|
||||
names = [filename]
|
||||
if not self._iszip(filename):
|
||||
for zipext in _file_openers.keys():
|
||||
if zipext:
|
||||
names.append(filename+zipext)
|
||||
return names
|
||||
|
||||
def _isurl(self, path):
|
||||
"""Test if path is a net location. Tests the scheme and netloc."""
|
||||
|
||||
# We do this here to reduce the 'import numpy' initial import time.
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# BUG : URLs require a scheme string ('http://') to be used.
|
||||
# www.google.com will fail.
|
||||
# Should we prepend the scheme for those that don't have it and
|
||||
# test that also? Similar to the way we append .gz and test for
|
||||
# for compressed versions of files.
|
||||
|
||||
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
|
||||
return bool(scheme and netloc)
|
||||
|
||||
def _cache(self, path):
|
||||
"""Cache the file specified by path.
|
||||
|
||||
Creates a copy of the file in the datasource cache.
|
||||
|
||||
"""
|
||||
# We import these here because importing them is slow and
|
||||
# a significant fraction of numpy's total import time.
|
||||
import shutil
|
||||
from urllib.request import urlopen
|
||||
|
||||
upath = self.abspath(path)
|
||||
|
||||
# ensure directory exists
|
||||
if not os.path.exists(os.path.dirname(upath)):
|
||||
os.makedirs(os.path.dirname(upath))
|
||||
|
||||
# TODO: Doesn't handle compressed files!
|
||||
if self._isurl(path):
|
||||
with urlopen(path) as openedurl:
|
||||
with _open(upath, 'wb') as f:
|
||||
shutil.copyfileobj(openedurl, f)
|
||||
else:
|
||||
shutil.copyfile(path, upath)
|
||||
return upath
|
||||
|
||||
def _findfile(self, path):
|
||||
"""Searches for ``path`` and returns full path if found.
|
||||
|
||||
If path is an URL, _findfile will cache a local copy and return the
|
||||
path to the cached file. If path is a local file, _findfile will
|
||||
return a path to that local file.
|
||||
|
||||
The search will include possible compressed versions of the file
|
||||
and return the first occurrence found.
|
||||
|
||||
"""
|
||||
|
||||
# Build list of possible local file paths
|
||||
if not self._isurl(path):
|
||||
# Valid local paths
|
||||
filelist = self._possible_names(path)
|
||||
# Paths in self._destpath
|
||||
filelist += self._possible_names(self.abspath(path))
|
||||
else:
|
||||
# Cached URLs in self._destpath
|
||||
filelist = self._possible_names(self.abspath(path))
|
||||
# Remote URLs
|
||||
filelist = filelist + self._possible_names(path)
|
||||
|
||||
for name in filelist:
|
||||
if self.exists(name):
|
||||
if self._isurl(name):
|
||||
name = self._cache(name)
|
||||
return name
|
||||
return None
|
||||
|
||||
def abspath(self, path):
|
||||
"""
|
||||
Return absolute path of file in the DataSource directory.
|
||||
|
||||
If `path` is an URL, then `abspath` will return either the location
|
||||
the file exists locally or the location it would exist when opened
|
||||
using the `open` method.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Can be a local file or a remote URL.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : str
|
||||
Complete path, including the `DataSource` destination directory.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The functionality is based on `os.path.abspath`.
|
||||
|
||||
"""
|
||||
# We do this here to reduce the 'import numpy' initial import time.
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# TODO: This should be more robust. Handles case where path includes
|
||||
# the destpath, but not other sub-paths. Failing case:
|
||||
# path = /home/guido/datafile.txt
|
||||
# destpath = /home/alex/
|
||||
# upath = self.abspath(path)
|
||||
# upath == '/home/alex/home/guido/datafile.txt'
|
||||
|
||||
# handle case where path includes self._destpath
|
||||
splitpath = path.split(self._destpath, 2)
|
||||
if len(splitpath) > 1:
|
||||
path = splitpath[1]
|
||||
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
|
||||
netloc = self._sanitize_relative_path(netloc)
|
||||
upath = self._sanitize_relative_path(upath)
|
||||
return os.path.join(self._destpath, netloc, upath)
|
||||
|
||||
def _sanitize_relative_path(self, path):
|
||||
"""Return a sanitised relative path for which
|
||||
os.path.abspath(os.path.join(base, path)).startswith(base)
|
||||
"""
|
||||
last = None
|
||||
path = os.path.normpath(path)
|
||||
while path != last:
|
||||
last = path
|
||||
# Note: os.path.join treats '/' as os.sep on Windows
|
||||
path = path.lstrip(os.sep).lstrip('/')
|
||||
path = path.lstrip(os.pardir).lstrip('..')
|
||||
drive, path = os.path.splitdrive(path) # for Windows
|
||||
return path
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Test if path exists.
|
||||
|
||||
Test if `path` exists as (and in this order):
|
||||
|
||||
- a local file.
|
||||
- a remote URL that has been downloaded and stored locally in the
|
||||
`DataSource` directory.
|
||||
- a remote URL that has not been downloaded, but is valid and
|
||||
accessible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Can be a local file or a remote URL.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
True if `path` exists.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When `path` is an URL, `exists` will return True if it's either
|
||||
stored locally in the `DataSource` directory, or is a valid remote
|
||||
URL. `DataSource` does not discriminate between the two, the file
|
||||
is accessible if it exists in either location.
|
||||
|
||||
"""
|
||||
|
||||
# First test for local path
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
||||
# We import this here because importing urllib is slow and
|
||||
# a significant fraction of numpy's total import time.
|
||||
from urllib.request import urlopen
|
||||
from urllib.error import URLError
|
||||
|
||||
# Test cached url
|
||||
upath = self.abspath(path)
|
||||
if os.path.exists(upath):
|
||||
return True
|
||||
|
||||
# Test remote url
|
||||
if self._isurl(path):
|
||||
try:
|
||||
netfile = urlopen(path)
|
||||
netfile.close()
|
||||
del(netfile)
|
||||
return True
|
||||
except URLError:
|
||||
return False
|
||||
return False
|
||||
|
||||
def open(self, path, mode='r', encoding=None, newline=None):
|
||||
"""
|
||||
Open and return file-like object.
|
||||
|
||||
If `path` is an URL, it will be downloaded, stored in the
|
||||
`DataSource` directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Local file path or URL to open.
|
||||
mode : {'r', 'w', 'a'}, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
|
||||
'a' to append. Available modes depend on the type of object
|
||||
specified by `path`. Default is 'r'.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `io.open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
File object.
|
||||
|
||||
"""
|
||||
|
||||
# TODO: There is no support for opening a file for writing which
|
||||
# doesn't exist yet (creating a file). Should there be?
|
||||
|
||||
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
|
||||
# used to store URLs in self._destpath.
|
||||
|
||||
if self._isurl(path) and self._iswritemode(mode):
|
||||
raise ValueError("URLs are not writeable")
|
||||
|
||||
# NOTE: _findfile will fail on a new file opened for writing.
|
||||
found = self._findfile(path)
|
||||
if found:
|
||||
_fname, ext = self._splitzipext(found)
|
||||
if ext == 'bz2':
|
||||
mode.replace("+", "")
|
||||
return _file_openers[ext](found, mode=mode,
|
||||
encoding=encoding, newline=newline)
|
||||
else:
|
||||
raise FileNotFoundError(f"{path} not found.")
|
||||
|
||||
|
||||
class Repository (DataSource):
|
||||
"""
|
||||
Repository(baseurl, destpath='.')
|
||||
|
||||
A data repository where multiple DataSource's share a base
|
||||
URL/directory.
|
||||
|
||||
`Repository` extends `DataSource` by prepending a base URL (or
|
||||
directory) to all the files it handles. Use `Repository` when you will
|
||||
be working with multiple files from one base URL. Initialize
|
||||
`Repository` with the base URL, then refer to each file by its filename
|
||||
only.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
baseurl : str
|
||||
Path to the local directory or remote location that contains the
|
||||
data files.
|
||||
destpath : str or None, optional
|
||||
Path to the directory where the source file gets downloaded to for
|
||||
use. If `destpath` is None, a temporary directory will be created.
|
||||
The default path is the current directory.
|
||||
|
||||
Examples
|
||||
--------
|
||||
To analyze all files in the repository, do something like this
|
||||
(note: this is not self-contained code)::
|
||||
|
||||
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
|
||||
>>> for filename in filelist:
|
||||
... fp = repos.open(filename)
|
||||
... fp.analyze()
|
||||
... fp.close()
|
||||
|
||||
Similarly you could use a URL for a repository::
|
||||
|
||||
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, baseurl, destpath=os.curdir):
|
||||
"""Create a Repository with a shared url or directory of baseurl."""
|
||||
DataSource.__init__(self, destpath=destpath)
|
||||
self._baseurl = baseurl
|
||||
|
||||
def __del__(self):
|
||||
DataSource.__del__(self)
|
||||
|
||||
def _fullpath(self, path):
|
||||
"""Return complete path for path. Prepends baseurl if necessary."""
|
||||
splitpath = path.split(self._baseurl, 2)
|
||||
if len(splitpath) == 1:
|
||||
result = os.path.join(self._baseurl, path)
|
||||
else:
|
||||
result = path # path contains baseurl already
|
||||
return result
|
||||
|
||||
def _findfile(self, path):
|
||||
"""Extend DataSource method to prepend baseurl to ``path``."""
|
||||
return DataSource._findfile(self, self._fullpath(path))
|
||||
|
||||
def abspath(self, path):
|
||||
"""
|
||||
Return absolute path of file in the Repository directory.
|
||||
|
||||
If `path` is an URL, then `abspath` will return either the location
|
||||
the file exists locally or the location it would exist when opened
|
||||
using the `open` method.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Can be a local file or a remote URL. This may, but does not
|
||||
have to, include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : str
|
||||
Complete path, including the `DataSource` destination directory.
|
||||
|
||||
"""
|
||||
return DataSource.abspath(self, self._fullpath(path))
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Test if path exists prepending Repository base URL to path.
|
||||
|
||||
Test if `path` exists as (and in this order):
|
||||
|
||||
- a local file.
|
||||
- a remote URL that has been downloaded and stored locally in the
|
||||
`DataSource` directory.
|
||||
- a remote URL that has not been downloaded, but is valid and
|
||||
accessible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Can be a local file or a remote URL. This may, but does not
|
||||
have to, include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
True if `path` exists.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When `path` is an URL, `exists` will return True if it's either
|
||||
stored locally in the `DataSource` directory, or is a valid remote
|
||||
URL. `DataSource` does not discriminate between the two, the file
|
||||
is accessible if it exists in either location.
|
||||
|
||||
"""
|
||||
return DataSource.exists(self, self._fullpath(path))
|
||||
|
||||
def open(self, path, mode='r', encoding=None, newline=None):
|
||||
"""
|
||||
Open and return file-like object prepending Repository base URL.
|
||||
|
||||
If `path` is an URL, it will be downloaded, stored in the
|
||||
DataSource directory and opened from there.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Local file path or URL to open. This may, but does not have to,
|
||||
include the `baseurl` with which the `Repository` was
|
||||
initialized.
|
||||
mode : {'r', 'w', 'a'}, optional
|
||||
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
|
||||
'a' to append. Available modes depend on the type of object
|
||||
specified by `path`. Default is 'r'.
|
||||
encoding : {None, str}, optional
|
||||
Open text file with given encoding. The default encoding will be
|
||||
what `io.open` uses.
|
||||
newline : {None, str}, optional
|
||||
Newline to use when reading text file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : file object
|
||||
File object.
|
||||
|
||||
"""
|
||||
return DataSource.open(self, self._fullpath(path), mode,
|
||||
encoding=encoding, newline=newline)
|
||||
|
||||
def listdir(self):
|
||||
"""
|
||||
List files in the source Repository.
|
||||
|
||||
Returns
|
||||
-------
|
||||
files : list of str
|
||||
List of file names (not containing a directory part).
|
||||
|
||||
Notes
|
||||
-----
|
||||
Does not currently work for remote repositories.
|
||||
|
||||
"""
|
||||
if self._isurl(self._baseurl):
|
||||
raise NotImplementedError(
|
||||
"Directory listing of URLs, not supported yet.")
|
||||
else:
|
||||
return os.listdir(self._baseurl)
|
897
teil20b/lib/python3.11/site-packages/numpy/lib/_iotools.py
Normal file
897
teil20b/lib/python3.11/site-packages/numpy/lib/_iotools.py
Normal file
@@ -0,0 +1,897 @@
|
||||
"""A collection of functions designed to help I/O with ascii files.
|
||||
|
||||
"""
|
||||
__docformat__ = "restructuredtext en"
|
||||
|
||||
import numpy as np
|
||||
import numpy.core.numeric as nx
|
||||
from numpy.compat import asbytes, asunicode
|
||||
|
||||
|
||||
def _decode_line(line, encoding=None):
|
||||
"""Decode bytes from binary input streams.
|
||||
|
||||
Defaults to decoding from 'latin1'. That differs from the behavior of
|
||||
np.compat.asunicode that decodes from 'ascii'.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str or bytes
|
||||
Line to be decoded.
|
||||
encoding : str
|
||||
Encoding used to decode `line`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
decoded_line : str
|
||||
|
||||
"""
|
||||
if type(line) is bytes:
|
||||
if encoding is None:
|
||||
encoding = "latin1"
|
||||
line = line.decode(encoding)
|
||||
|
||||
return line
|
||||
|
||||
|
||||
def _is_string_like(obj):
|
||||
"""
|
||||
Check whether obj behaves like a string.
|
||||
"""
|
||||
try:
|
||||
obj + ''
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_bytes_like(obj):
|
||||
"""
|
||||
Check whether obj behaves like a bytes object.
|
||||
"""
|
||||
try:
|
||||
obj + b''
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def has_nested_fields(ndtype):
|
||||
"""
|
||||
Returns whether one or several fields of a dtype are nested.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : dtype
|
||||
Data-type of a structured array.
|
||||
|
||||
Raises
|
||||
------
|
||||
AttributeError
|
||||
If `ndtype` does not have a `names` attribute.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
|
||||
>>> np.lib._iotools.has_nested_fields(dt)
|
||||
False
|
||||
|
||||
"""
|
||||
for name in ndtype.names or ():
|
||||
if ndtype[name].names is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def flatten_dtype(ndtype, flatten_base=False):
|
||||
"""
|
||||
Unpack a structured data-type by collapsing nested fields and/or fields
|
||||
with a shape.
|
||||
|
||||
Note that the field names are lost.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : dtype
|
||||
The datatype to collapse
|
||||
flatten_base : bool, optional
|
||||
If True, transform a field with a shape into several fields. Default is
|
||||
False.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
|
||||
... ('block', int, (2, 3))])
|
||||
>>> np.lib._iotools.flatten_dtype(dt)
|
||||
[dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
|
||||
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
|
||||
[dtype('S4'),
|
||||
dtype('float64'),
|
||||
dtype('float64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64'),
|
||||
dtype('int64')]
|
||||
|
||||
"""
|
||||
names = ndtype.names
|
||||
if names is None:
|
||||
if flatten_base:
|
||||
return [ndtype.base] * int(np.prod(ndtype.shape))
|
||||
return [ndtype.base]
|
||||
else:
|
||||
types = []
|
||||
for field in names:
|
||||
info = ndtype.fields[field]
|
||||
flat_dt = flatten_dtype(info[0], flatten_base)
|
||||
types.extend(flat_dt)
|
||||
return types
|
||||
|
||||
|
||||
class LineSplitter:
|
||||
"""
|
||||
Object to split a string at a given delimiter or at given places.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
delimiter : str, int, or sequence of ints, optional
|
||||
If a string, character used to delimit consecutive fields.
|
||||
If an integer or a sequence of integers, width(s) of each field.
|
||||
comments : str, optional
|
||||
Character used to mark the beginning of a comment. Default is '#'.
|
||||
autostrip : bool, optional
|
||||
Whether to strip each individual field. Default is True.
|
||||
|
||||
"""
|
||||
|
||||
def autostrip(self, method):
|
||||
"""
|
||||
Wrapper to strip each member of the output of `method`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
method : function
|
||||
Function that takes a single argument and returns a sequence of
|
||||
strings.
|
||||
|
||||
Returns
|
||||
-------
|
||||
wrapped : function
|
||||
The result of wrapping `method`. `wrapped` takes a single input
|
||||
argument and returns a list of strings that are stripped of
|
||||
white-space.
|
||||
|
||||
"""
|
||||
return lambda input: [_.strip() for _ in method(input)]
|
||||
|
||||
def __init__(self, delimiter=None, comments='#', autostrip=True,
|
||||
encoding=None):
|
||||
delimiter = _decode_line(delimiter)
|
||||
comments = _decode_line(comments)
|
||||
|
||||
self.comments = comments
|
||||
|
||||
# Delimiter is a character
|
||||
if (delimiter is None) or isinstance(delimiter, str):
|
||||
delimiter = delimiter or None
|
||||
_handyman = self._delimited_splitter
|
||||
# Delimiter is a list of field widths
|
||||
elif hasattr(delimiter, '__iter__'):
|
||||
_handyman = self._variablewidth_splitter
|
||||
idx = np.cumsum([0] + list(delimiter))
|
||||
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
|
||||
# Delimiter is a single integer
|
||||
elif int(delimiter):
|
||||
(_handyman, delimiter) = (
|
||||
self._fixedwidth_splitter, int(delimiter))
|
||||
else:
|
||||
(_handyman, delimiter) = (self._delimited_splitter, None)
|
||||
self.delimiter = delimiter
|
||||
if autostrip:
|
||||
self._handyman = self.autostrip(_handyman)
|
||||
else:
|
||||
self._handyman = _handyman
|
||||
self.encoding = encoding
|
||||
|
||||
def _delimited_splitter(self, line):
|
||||
"""Chop off comments, strip, and split at delimiter. """
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
line = line.strip(" \r\n")
|
||||
if not line:
|
||||
return []
|
||||
return line.split(self.delimiter)
|
||||
|
||||
def _fixedwidth_splitter(self, line):
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
line = line.strip("\r\n")
|
||||
if not line:
|
||||
return []
|
||||
fixed = self.delimiter
|
||||
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
|
||||
return [line[s] for s in slices]
|
||||
|
||||
def _variablewidth_splitter(self, line):
|
||||
if self.comments is not None:
|
||||
line = line.split(self.comments)[0]
|
||||
if not line:
|
||||
return []
|
||||
slices = self.delimiter
|
||||
return [line[s] for s in slices]
|
||||
|
||||
def __call__(self, line):
|
||||
return self._handyman(_decode_line(line, self.encoding))
|
||||
|
||||
|
||||
class NameValidator:
|
||||
"""
|
||||
Object to validate a list of strings to use as field names.
|
||||
|
||||
The strings are stripped of any non alphanumeric character, and spaces
|
||||
are replaced by '_'. During instantiation, the user can define a list
|
||||
of names to exclude, as well as a list of invalid characters. Names in
|
||||
the exclusion list are appended a '_' character.
|
||||
|
||||
Once an instance has been created, it can be called with a list of
|
||||
names, and a list of valid names will be created. The `__call__`
|
||||
method accepts an optional keyword "default" that sets the default name
|
||||
in case of ambiguity. By default this is 'f', so that names will
|
||||
default to `f0`, `f1`, etc.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
excludelist : sequence, optional
|
||||
A list of names to exclude. This list is appended to the default
|
||||
list ['return', 'file', 'print']. Excluded names are appended an
|
||||
underscore: for example, `file` becomes `file_` if supplied.
|
||||
deletechars : str, optional
|
||||
A string combining invalid characters that must be deleted from the
|
||||
names.
|
||||
case_sensitive : {True, False, 'upper', 'lower'}, optional
|
||||
* If True, field names are case-sensitive.
|
||||
* If False or 'upper', field names are converted to upper case.
|
||||
* If 'lower', field names are converted to lower case.
|
||||
|
||||
The default value is True.
|
||||
replace_space : '_', optional
|
||||
Character(s) used in replacement of white spaces.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Calling an instance of `NameValidator` is the same as calling its
|
||||
method `validate`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> validator = np.lib._iotools.NameValidator()
|
||||
>>> validator(['file', 'field2', 'with space', 'CaSe'])
|
||||
('file_', 'field2', 'with_space', 'CaSe')
|
||||
|
||||
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
|
||||
... deletechars='q',
|
||||
... case_sensitive=False)
|
||||
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
|
||||
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
|
||||
|
||||
"""
|
||||
|
||||
defaultexcludelist = ['return', 'file', 'print']
|
||||
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
|
||||
|
||||
def __init__(self, excludelist=None, deletechars=None,
|
||||
case_sensitive=None, replace_space='_'):
|
||||
# Process the exclusion list ..
|
||||
if excludelist is None:
|
||||
excludelist = []
|
||||
excludelist.extend(self.defaultexcludelist)
|
||||
self.excludelist = excludelist
|
||||
# Process the list of characters to delete
|
||||
if deletechars is None:
|
||||
delete = self.defaultdeletechars
|
||||
else:
|
||||
delete = set(deletechars)
|
||||
delete.add('"')
|
||||
self.deletechars = delete
|
||||
# Process the case option .....
|
||||
if (case_sensitive is None) or (case_sensitive is True):
|
||||
self.case_converter = lambda x: x
|
||||
elif (case_sensitive is False) or case_sensitive.startswith('u'):
|
||||
self.case_converter = lambda x: x.upper()
|
||||
elif case_sensitive.startswith('l'):
|
||||
self.case_converter = lambda x: x.lower()
|
||||
else:
|
||||
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
|
||||
raise ValueError(msg)
|
||||
|
||||
self.replace_space = replace_space
|
||||
|
||||
def validate(self, names, defaultfmt="f%i", nbfields=None):
|
||||
"""
|
||||
Validate a list of strings as field names for a structured array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
names : sequence of str
|
||||
Strings to be validated.
|
||||
defaultfmt : str, optional
|
||||
Default format string, used if validating a given string
|
||||
reduces its length to zero.
|
||||
nbfields : integer, optional
|
||||
Final number of validated names, used to expand or shrink the
|
||||
initial list of names.
|
||||
|
||||
Returns
|
||||
-------
|
||||
validatednames : list of str
|
||||
The list of validated field names.
|
||||
|
||||
Notes
|
||||
-----
|
||||
A `NameValidator` instance can be called directly, which is the
|
||||
same as calling `validate`. For examples, see `NameValidator`.
|
||||
|
||||
"""
|
||||
# Initial checks ..............
|
||||
if (names is None):
|
||||
if (nbfields is None):
|
||||
return None
|
||||
names = []
|
||||
if isinstance(names, str):
|
||||
names = [names, ]
|
||||
if nbfields is not None:
|
||||
nbnames = len(names)
|
||||
if (nbnames < nbfields):
|
||||
names = list(names) + [''] * (nbfields - nbnames)
|
||||
elif (nbnames > nbfields):
|
||||
names = names[:nbfields]
|
||||
# Set some shortcuts ...........
|
||||
deletechars = self.deletechars
|
||||
excludelist = self.excludelist
|
||||
case_converter = self.case_converter
|
||||
replace_space = self.replace_space
|
||||
# Initializes some variables ...
|
||||
validatednames = []
|
||||
seen = dict()
|
||||
nbempty = 0
|
||||
|
||||
for item in names:
|
||||
item = case_converter(item).strip()
|
||||
if replace_space:
|
||||
item = item.replace(' ', replace_space)
|
||||
item = ''.join([c for c in item if c not in deletechars])
|
||||
if item == '':
|
||||
item = defaultfmt % nbempty
|
||||
while item in names:
|
||||
nbempty += 1
|
||||
item = defaultfmt % nbempty
|
||||
nbempty += 1
|
||||
elif item in excludelist:
|
||||
item += '_'
|
||||
cnt = seen.get(item, 0)
|
||||
if cnt > 0:
|
||||
validatednames.append(item + '_%d' % cnt)
|
||||
else:
|
||||
validatednames.append(item)
|
||||
seen[item] = cnt + 1
|
||||
return tuple(validatednames)
|
||||
|
||||
def __call__(self, names, defaultfmt="f%i", nbfields=None):
|
||||
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
|
||||
|
||||
|
||||
def str2bool(value):
|
||||
"""
|
||||
Tries to transform a string supposed to represent a boolean to a boolean.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : str
|
||||
The string that is transformed to a boolean.
|
||||
|
||||
Returns
|
||||
-------
|
||||
boolval : bool
|
||||
The boolean representation of `value`.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the string is not 'True' or 'False' (case independent)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.lib._iotools.str2bool('TRUE')
|
||||
True
|
||||
>>> np.lib._iotools.str2bool('false')
|
||||
False
|
||||
|
||||
"""
|
||||
value = value.upper()
|
||||
if value == 'TRUE':
|
||||
return True
|
||||
elif value == 'FALSE':
|
||||
return False
|
||||
else:
|
||||
raise ValueError("Invalid boolean")
|
||||
|
||||
|
||||
class ConverterError(Exception):
|
||||
"""
|
||||
Exception raised when an error occurs in a converter for string values.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ConverterLockError(ConverterError):
|
||||
"""
|
||||
Exception raised when an attempt is made to upgrade a locked converter.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ConversionWarning(UserWarning):
|
||||
"""
|
||||
Warning issued when a string converter has a problem.
|
||||
|
||||
Notes
|
||||
-----
|
||||
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
|
||||
is explicitly suppressed with the "invalid_raise" keyword.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class StringConverter:
|
||||
"""
|
||||
Factory class for function transforming a string into another object
|
||||
(int, float).
|
||||
|
||||
After initialization, an instance can be called to transform a string
|
||||
into another object. If the string is recognized as representing a
|
||||
missing value, a default value is returned.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
func : function
|
||||
Function used for the conversion.
|
||||
default : any
|
||||
Default value to return when the input corresponds to a missing
|
||||
value.
|
||||
type : type
|
||||
Type of the output.
|
||||
_status : int
|
||||
Integer representing the order of the conversion.
|
||||
_mapper : sequence of tuples
|
||||
Sequence of tuples (dtype, function, default value) to evaluate in
|
||||
order.
|
||||
_locked : bool
|
||||
Holds `locked` parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype_or_func : {None, dtype, function}, optional
|
||||
If a `dtype`, specifies the input data type, used to define a basic
|
||||
function and a default value for missing data. For example, when
|
||||
`dtype` is float, the `func` attribute is set to `float` and the
|
||||
default value to `np.nan`. If a function, this function is used to
|
||||
convert a string to another object. In this case, it is recommended
|
||||
to give an associated default value as input.
|
||||
default : any, optional
|
||||
Value to return by default, that is, when the string to be
|
||||
converted is flagged as missing. If not given, `StringConverter`
|
||||
tries to supply a reasonable default value.
|
||||
missing_values : {None, sequence of str}, optional
|
||||
``None`` or sequence of strings indicating a missing value. If ``None``
|
||||
then missing values are indicated by empty entries. The default is
|
||||
``None``.
|
||||
locked : bool, optional
|
||||
Whether the StringConverter should be locked to prevent automatic
|
||||
upgrade or not. Default is False.
|
||||
|
||||
"""
|
||||
_mapper = [(nx.bool_, str2bool, False),
|
||||
(nx.int_, int, -1),]
|
||||
|
||||
# On 32-bit systems, we need to make sure that we explicitly include
|
||||
# nx.int64 since ns.int_ is nx.int32.
|
||||
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
|
||||
_mapper.append((nx.int64, int, -1))
|
||||
|
||||
_mapper.extend([(nx.float64, float, nx.nan),
|
||||
(nx.complex128, complex, nx.nan + 0j),
|
||||
(nx.longdouble, nx.longdouble, nx.nan),
|
||||
# If a non-default dtype is passed, fall back to generic
|
||||
# ones (should only be used for the converter)
|
||||
(nx.integer, int, -1),
|
||||
(nx.floating, float, nx.nan),
|
||||
(nx.complexfloating, complex, nx.nan + 0j),
|
||||
# Last, try with the string types (must be last, because
|
||||
# `_mapper[-1]` is used as default in some cases)
|
||||
(nx.str_, asunicode, '???'),
|
||||
(nx.bytes_, asbytes, '???'),
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def _getdtype(cls, val):
|
||||
"""Returns the dtype of the input variable."""
|
||||
return np.array(val).dtype
|
||||
|
||||
@classmethod
|
||||
def _getsubdtype(cls, val):
|
||||
"""Returns the type of the dtype of the input variable."""
|
||||
return np.array(val).dtype.type
|
||||
|
||||
@classmethod
|
||||
def _dtypeortype(cls, dtype):
|
||||
"""Returns dtype for datetime64 and type of dtype otherwise."""
|
||||
|
||||
# This is a bit annoying. We want to return the "general" type in most
|
||||
# cases (ie. "string" rather than "S10"), but we want to return the
|
||||
# specific type for datetime64 (ie. "datetime64[us]" rather than
|
||||
# "datetime64").
|
||||
if dtype.type == np.datetime64:
|
||||
return dtype
|
||||
return dtype.type
|
||||
|
||||
@classmethod
|
||||
def upgrade_mapper(cls, func, default=None):
|
||||
"""
|
||||
Upgrade the mapper of a StringConverter by adding a new function and
|
||||
its corresponding default.
|
||||
|
||||
The input function (or sequence of functions) and its associated
|
||||
default value (if any) is inserted in penultimate position of the
|
||||
mapper. The corresponding type is estimated from the dtype of the
|
||||
default value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : var
|
||||
Function, or sequence of functions
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import dateutil.parser
|
||||
>>> import datetime
|
||||
>>> dateparser = dateutil.parser.parse
|
||||
>>> defaultdate = datetime.date(2000, 1, 1)
|
||||
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
|
||||
"""
|
||||
# Func is a single functions
|
||||
if hasattr(func, '__call__'):
|
||||
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
|
||||
return
|
||||
elif hasattr(func, '__iter__'):
|
||||
if isinstance(func[0], (tuple, list)):
|
||||
for _ in func:
|
||||
cls._mapper.insert(-1, _)
|
||||
return
|
||||
if default is None:
|
||||
default = [None] * len(func)
|
||||
else:
|
||||
default = list(default)
|
||||
default.append([None] * (len(func) - len(default)))
|
||||
for fct, dft in zip(func, default):
|
||||
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
|
||||
|
||||
@classmethod
|
||||
def _find_map_entry(cls, dtype):
|
||||
# if a converter for the specific dtype is available use that
|
||||
for i, (deftype, func, default_def) in enumerate(cls._mapper):
|
||||
if dtype.type == deftype:
|
||||
return i, (deftype, func, default_def)
|
||||
|
||||
# otherwise find an inexact match
|
||||
for i, (deftype, func, default_def) in enumerate(cls._mapper):
|
||||
if np.issubdtype(dtype.type, deftype):
|
||||
return i, (deftype, func, default_def)
|
||||
|
||||
raise LookupError
|
||||
|
||||
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
|
||||
locked=False):
|
||||
# Defines a lock for upgrade
|
||||
self._locked = bool(locked)
|
||||
# No input dtype: minimal initialization
|
||||
if dtype_or_func is None:
|
||||
self.func = str2bool
|
||||
self._status = 0
|
||||
self.default = default or False
|
||||
dtype = np.dtype('bool')
|
||||
else:
|
||||
# Is the input a np.dtype ?
|
||||
try:
|
||||
self.func = None
|
||||
dtype = np.dtype(dtype_or_func)
|
||||
except TypeError:
|
||||
# dtype_or_func must be a function, then
|
||||
if not hasattr(dtype_or_func, '__call__'):
|
||||
errmsg = ("The input argument `dtype` is neither a"
|
||||
" function nor a dtype (got '%s' instead)")
|
||||
raise TypeError(errmsg % type(dtype_or_func))
|
||||
# Set the function
|
||||
self.func = dtype_or_func
|
||||
# If we don't have a default, try to guess it or set it to
|
||||
# None
|
||||
if default is None:
|
||||
try:
|
||||
default = self.func('0')
|
||||
except ValueError:
|
||||
default = None
|
||||
dtype = self._getdtype(default)
|
||||
|
||||
# find the best match in our mapper
|
||||
try:
|
||||
self._status, (_, func, default_def) = self._find_map_entry(dtype)
|
||||
except LookupError:
|
||||
# no match
|
||||
self.default = default
|
||||
_, func, _ = self._mapper[-1]
|
||||
self._status = 0
|
||||
else:
|
||||
# use the found default only if we did not already have one
|
||||
if default is None:
|
||||
self.default = default_def
|
||||
else:
|
||||
self.default = default
|
||||
|
||||
# If the input was a dtype, set the function to the last we saw
|
||||
if self.func is None:
|
||||
self.func = func
|
||||
|
||||
# If the status is 1 (int), change the function to
|
||||
# something more robust.
|
||||
if self.func == self._mapper[1][1]:
|
||||
if issubclass(dtype.type, np.uint64):
|
||||
self.func = np.uint64
|
||||
elif issubclass(dtype.type, np.int64):
|
||||
self.func = np.int64
|
||||
else:
|
||||
self.func = lambda x: int(float(x))
|
||||
# Store the list of strings corresponding to missing values.
|
||||
if missing_values is None:
|
||||
self.missing_values = {''}
|
||||
else:
|
||||
if isinstance(missing_values, str):
|
||||
missing_values = missing_values.split(",")
|
||||
self.missing_values = set(list(missing_values) + [''])
|
||||
|
||||
self._callingfunction = self._strict_call
|
||||
self.type = self._dtypeortype(dtype)
|
||||
self._checked = False
|
||||
self._initial_default = default
|
||||
|
||||
def _loose_call(self, value):
|
||||
try:
|
||||
return self.func(value)
|
||||
except ValueError:
|
||||
return self.default
|
||||
|
||||
def _strict_call(self, value):
|
||||
try:
|
||||
|
||||
# We check if we can convert the value using the current function
|
||||
new_value = self.func(value)
|
||||
|
||||
# In addition to having to check whether func can convert the
|
||||
# value, we also have to make sure that we don't get overflow
|
||||
# errors for integers.
|
||||
if self.func is int:
|
||||
try:
|
||||
np.array(value, dtype=self.type)
|
||||
except OverflowError:
|
||||
raise ValueError
|
||||
|
||||
# We're still here so we can now return the new value
|
||||
return new_value
|
||||
|
||||
except ValueError:
|
||||
if value.strip() in self.missing_values:
|
||||
if not self._status:
|
||||
self._checked = False
|
||||
return self.default
|
||||
raise ValueError("Cannot convert string '%s'" % value)
|
||||
|
||||
def __call__(self, value):
|
||||
return self._callingfunction(value)
|
||||
|
||||
def _do_upgrade(self):
|
||||
# Raise an exception if we locked the converter...
|
||||
if self._locked:
|
||||
errmsg = "Converter is locked and cannot be upgraded"
|
||||
raise ConverterLockError(errmsg)
|
||||
_statusmax = len(self._mapper)
|
||||
# Complains if we try to upgrade by the maximum
|
||||
_status = self._status
|
||||
if _status == _statusmax:
|
||||
errmsg = "Could not find a valid conversion function"
|
||||
raise ConverterError(errmsg)
|
||||
elif _status < _statusmax - 1:
|
||||
_status += 1
|
||||
self.type, self.func, default = self._mapper[_status]
|
||||
self._status = _status
|
||||
if self._initial_default is not None:
|
||||
self.default = self._initial_default
|
||||
else:
|
||||
self.default = default
|
||||
|
||||
def upgrade(self, value):
|
||||
"""
|
||||
Find the best converter for a given string, and return the result.
|
||||
|
||||
The supplied string `value` is converted by testing different
|
||||
converters in order. First the `func` method of the
|
||||
`StringConverter` instance is tried, if this fails other available
|
||||
converters are tried. The order in which these other converters
|
||||
are tried is determined by the `_status` attribute of the instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : str
|
||||
The string to convert.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : any
|
||||
The result of converting `value` with the appropriate converter.
|
||||
|
||||
"""
|
||||
self._checked = True
|
||||
try:
|
||||
return self._strict_call(value)
|
||||
except ValueError:
|
||||
self._do_upgrade()
|
||||
return self.upgrade(value)
|
||||
|
||||
def iterupgrade(self, value):
|
||||
self._checked = True
|
||||
if not hasattr(value, '__iter__'):
|
||||
value = (value,)
|
||||
_strict_call = self._strict_call
|
||||
try:
|
||||
for _m in value:
|
||||
_strict_call(_m)
|
||||
except ValueError:
|
||||
self._do_upgrade()
|
||||
self.iterupgrade(value)
|
||||
|
||||
def update(self, func, default=None, testing_value=None,
|
||||
missing_values='', locked=False):
|
||||
"""
|
||||
Set StringConverter attributes directly.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : function
|
||||
Conversion function.
|
||||
default : any, optional
|
||||
Value to return by default, that is, when the string to be
|
||||
converted is flagged as missing. If not given,
|
||||
`StringConverter` tries to supply a reasonable default value.
|
||||
testing_value : str, optional
|
||||
A string representing a standard input value of the converter.
|
||||
This string is used to help defining a reasonable default
|
||||
value.
|
||||
missing_values : {sequence of str, None}, optional
|
||||
Sequence of strings indicating a missing value. If ``None``, then
|
||||
the existing `missing_values` are cleared. The default is `''`.
|
||||
locked : bool, optional
|
||||
Whether the StringConverter should be locked to prevent
|
||||
automatic upgrade or not. Default is False.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`update` takes the same parameters as the constructor of
|
||||
`StringConverter`, except that `func` does not accept a `dtype`
|
||||
whereas `dtype_or_func` in the constructor does.
|
||||
|
||||
"""
|
||||
self.func = func
|
||||
self._locked = locked
|
||||
|
||||
# Don't reset the default to None if we can avoid it
|
||||
if default is not None:
|
||||
self.default = default
|
||||
self.type = self._dtypeortype(self._getdtype(default))
|
||||
else:
|
||||
try:
|
||||
tester = func(testing_value or '1')
|
||||
except (TypeError, ValueError):
|
||||
tester = None
|
||||
self.type = self._dtypeortype(self._getdtype(tester))
|
||||
|
||||
# Add the missing values to the existing set or clear it.
|
||||
if missing_values is None:
|
||||
# Clear all missing values even though the ctor initializes it to
|
||||
# set(['']) when the argument is None.
|
||||
self.missing_values = set()
|
||||
else:
|
||||
if not np.iterable(missing_values):
|
||||
missing_values = [missing_values]
|
||||
if not all(isinstance(v, str) for v in missing_values):
|
||||
raise TypeError("missing_values must be strings or unicode")
|
||||
self.missing_values.update(missing_values)
|
||||
|
||||
|
||||
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
|
||||
"""
|
||||
Convenience function to create a `np.dtype` object.
|
||||
|
||||
The function processes the input `dtype` and matches it with the given
|
||||
names.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndtype : var
|
||||
Definition of the dtype. Can be any string or dictionary recognized
|
||||
by the `np.dtype` function, or a sequence of types.
|
||||
names : str or sequence, optional
|
||||
Sequence of strings to use as field names for a structured dtype.
|
||||
For convenience, `names` can be a string of a comma-separated list
|
||||
of names.
|
||||
defaultfmt : str, optional
|
||||
Format string used to define missing names, such as ``"f%i"``
|
||||
(default) or ``"fields_%02i"``.
|
||||
validationargs : optional
|
||||
A series of optional arguments used to initialize a
|
||||
`NameValidator`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.lib._iotools.easy_dtype(float)
|
||||
dtype('float64')
|
||||
>>> np.lib._iotools.easy_dtype("i4, f8")
|
||||
dtype([('f0', '<i4'), ('f1', '<f8')])
|
||||
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
|
||||
dtype([('field_000', '<i4'), ('field_001', '<f8')])
|
||||
|
||||
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
|
||||
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
|
||||
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
|
||||
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
|
||||
|
||||
"""
|
||||
try:
|
||||
ndtype = np.dtype(ndtype)
|
||||
except TypeError:
|
||||
validate = NameValidator(**validationargs)
|
||||
nbfields = len(ndtype)
|
||||
if names is None:
|
||||
names = [''] * len(ndtype)
|
||||
elif isinstance(names, str):
|
||||
names = names.split(",")
|
||||
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
|
||||
ndtype = np.dtype(dict(formats=ndtype, names=names))
|
||||
else:
|
||||
# Explicit names
|
||||
if names is not None:
|
||||
validate = NameValidator(**validationargs)
|
||||
if isinstance(names, str):
|
||||
names = names.split(",")
|
||||
# Simple dtype: repeat to match the nb of names
|
||||
if ndtype.names is None:
|
||||
formats = tuple([ndtype.type] * len(names))
|
||||
names = validate(names, defaultfmt=defaultfmt)
|
||||
ndtype = np.dtype(list(zip(names, formats)))
|
||||
# Structured dtype: just validate the names as needed
|
||||
else:
|
||||
ndtype.names = validate(names, nbfields=len(ndtype.names),
|
||||
defaultfmt=defaultfmt)
|
||||
# No implicit names
|
||||
elif ndtype.names is not None:
|
||||
validate = NameValidator(**validationargs)
|
||||
# Default initial names : should we change the format ?
|
||||
numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
|
||||
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
|
||||
ndtype.names = validate([''] * len(ndtype.names),
|
||||
defaultfmt=defaultfmt)
|
||||
# Explicit initial names : just validate
|
||||
else:
|
||||
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
|
||||
return ndtype
|
155
teil20b/lib/python3.11/site-packages/numpy/lib/_version.py
Normal file
155
teil20b/lib/python3.11/site-packages/numpy/lib/_version.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""Utility to compare (NumPy) version strings.
|
||||
|
||||
The NumpyVersion class allows properly comparing numpy version strings.
|
||||
The LooseVersion and StrictVersion classes that distutils provides don't
|
||||
work; they don't recognize anything like alpha/beta/rc/dev versions.
|
||||
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
__all__ = ['NumpyVersion']
|
||||
|
||||
|
||||
class NumpyVersion():
|
||||
"""Parse and compare numpy version strings.
|
||||
|
||||
NumPy has the following versioning scheme (numbers given are examples; they
|
||||
can be > 9 in principle):
|
||||
|
||||
- Released version: '1.8.0', '1.8.1', etc.
|
||||
- Alpha: '1.8.0a1', '1.8.0a2', etc.
|
||||
- Beta: '1.8.0b1', '1.8.0b2', etc.
|
||||
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
|
||||
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
|
||||
- Development versions after a1: '1.8.0a1.dev-f1234afa',
|
||||
'1.8.0b2.dev-f1234afa',
|
||||
'1.8.1rc1.dev-f1234afa', etc.
|
||||
- Development versions (no git hash available): '1.8.0.dev-Unknown'
|
||||
|
||||
Comparing needs to be done against a valid version string or other
|
||||
`NumpyVersion` instance. Note that all development versions of the same
|
||||
(pre-)release compare equal.
|
||||
|
||||
.. versionadded:: 1.9.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
vstring : str
|
||||
NumPy version string (``np.__version__``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from numpy.lib import NumpyVersion
|
||||
>>> if NumpyVersion(np.__version__) < '1.7.0':
|
||||
... print('skip')
|
||||
>>> # skip
|
||||
|
||||
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: Not a valid numpy version string
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, vstring):
|
||||
self.vstring = vstring
|
||||
ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
|
||||
if not ver_main:
|
||||
raise ValueError("Not a valid numpy version string")
|
||||
|
||||
self.version = ver_main.group()
|
||||
self.major, self.minor, self.bugfix = [int(x) for x in
|
||||
self.version.split('.')]
|
||||
if len(vstring) == ver_main.end():
|
||||
self.pre_release = 'final'
|
||||
else:
|
||||
alpha = re.match(r'a\d', vstring[ver_main.end():])
|
||||
beta = re.match(r'b\d', vstring[ver_main.end():])
|
||||
rc = re.match(r'rc\d', vstring[ver_main.end():])
|
||||
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
|
||||
if pre_rel:
|
||||
self.pre_release = pre_rel[0].group()
|
||||
else:
|
||||
self.pre_release = ''
|
||||
|
||||
self.is_devversion = bool(re.search(r'.dev', vstring))
|
||||
|
||||
def _compare_version(self, other):
|
||||
"""Compare major.minor.bugfix"""
|
||||
if self.major == other.major:
|
||||
if self.minor == other.minor:
|
||||
if self.bugfix == other.bugfix:
|
||||
vercmp = 0
|
||||
elif self.bugfix > other.bugfix:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
elif self.minor > other.minor:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
elif self.major > other.major:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
|
||||
return vercmp
|
||||
|
||||
def _compare_pre_release(self, other):
|
||||
"""Compare alpha/beta/rc/final."""
|
||||
if self.pre_release == other.pre_release:
|
||||
vercmp = 0
|
||||
elif self.pre_release == 'final':
|
||||
vercmp = 1
|
||||
elif other.pre_release == 'final':
|
||||
vercmp = -1
|
||||
elif self.pre_release > other.pre_release:
|
||||
vercmp = 1
|
||||
else:
|
||||
vercmp = -1
|
||||
|
||||
return vercmp
|
||||
|
||||
def _compare(self, other):
|
||||
if not isinstance(other, (str, NumpyVersion)):
|
||||
raise ValueError("Invalid object to compare with NumpyVersion.")
|
||||
|
||||
if isinstance(other, str):
|
||||
other = NumpyVersion(other)
|
||||
|
||||
vercmp = self._compare_version(other)
|
||||
if vercmp == 0:
|
||||
# Same x.y.z version, check for alpha/beta/rc
|
||||
vercmp = self._compare_pre_release(other)
|
||||
if vercmp == 0:
|
||||
# Same version and same pre-release, check if dev version
|
||||
if self.is_devversion is other.is_devversion:
|
||||
vercmp = 0
|
||||
elif self.is_devversion:
|
||||
vercmp = -1
|
||||
else:
|
||||
vercmp = 1
|
||||
|
||||
return vercmp
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other) < 0
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other) <= 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other) == 0
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._compare(other) != 0
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._compare(other) > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._compare(other) >= 0
|
||||
|
||||
def __repr__(self):
|
||||
return "NumpyVersion(%s)" % self.vstring
|
17
teil20b/lib/python3.11/site-packages/numpy/lib/_version.pyi
Normal file
17
teil20b/lib/python3.11/site-packages/numpy/lib/_version.pyi
Normal file
@@ -0,0 +1,17 @@
|
||||
__all__: list[str]
|
||||
|
||||
class NumpyVersion:
|
||||
vstring: str
|
||||
version: str
|
||||
major: int
|
||||
minor: int
|
||||
bugfix: int
|
||||
pre_release: str
|
||||
is_devversion: bool
|
||||
def __init__(self, vstring: str) -> None: ...
|
||||
def __lt__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __le__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
|
||||
def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
|
||||
def __gt__(self, other: str | NumpyVersion) -> bool: ...
|
||||
def __ge__(self, other: str | NumpyVersion) -> bool: ...
|
882
teil20b/lib/python3.11/site-packages/numpy/lib/arraypad.py
Normal file
882
teil20b/lib/python3.11/site-packages/numpy/lib/arraypad.py
Normal file
@@ -0,0 +1,882 @@
|
||||
"""
|
||||
The arraypad module contains a group of functions to pad values onto the edges
|
||||
of an n-dimensional array.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy.core.overrides import array_function_dispatch
|
||||
from numpy.lib.index_tricks import ndindex
|
||||
|
||||
|
||||
__all__ = ['pad']
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Private utility functions.
|
||||
|
||||
|
||||
def _round_if_needed(arr, dtype):
|
||||
"""
|
||||
Rounds arr inplace if destination dtype is integer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr : ndarray
|
||||
Input array.
|
||||
dtype : dtype
|
||||
The dtype of the destination array.
|
||||
"""
|
||||
if np.issubdtype(dtype, np.integer):
|
||||
arr.round(out=arr)
|
||||
|
||||
|
||||
def _slice_at_axis(sl, axis):
|
||||
"""
|
||||
Construct tuple of slices to slice an array in the given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sl : slice
|
||||
The slice for the given dimension.
|
||||
axis : int
|
||||
The axis to which `sl` is applied. All other dimensions are left
|
||||
"unsliced".
|
||||
|
||||
Returns
|
||||
-------
|
||||
sl : tuple of slices
|
||||
A tuple with slices matching `shape` in length.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> _slice_at_axis(slice(None, 3, -1), 1)
|
||||
(slice(None, None, None), slice(None, 3, -1), (...,))
|
||||
"""
|
||||
return (slice(None),) * axis + (sl,) + (...,)
|
||||
|
||||
|
||||
def _view_roi(array, original_area_slice, axis):
|
||||
"""
|
||||
Get a view of the current region of interest during iterative padding.
|
||||
|
||||
When padding multiple dimensions iteratively corner values are
|
||||
unnecessarily overwritten multiple times. This function reduces the
|
||||
working area for the first dimensions so that corners are excluded.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : ndarray
|
||||
The array with the region of interest.
|
||||
original_area_slice : tuple of slices
|
||||
Denotes the area with original values of the unpadded array.
|
||||
axis : int
|
||||
The currently padded dimension assuming that `axis` is padded before
|
||||
`axis` + 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
roi : ndarray
|
||||
The region of interest of the original `array`.
|
||||
"""
|
||||
axis += 1
|
||||
sl = (slice(None),) * axis + original_area_slice[axis:]
|
||||
return array[sl]
|
||||
|
||||
|
||||
def _pad_simple(array, pad_width, fill_value=None):
|
||||
"""
|
||||
Pad array on all sides with either a single value or undefined values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : ndarray
|
||||
Array to grow.
|
||||
pad_width : sequence of tuple[int, int]
|
||||
Pad width on both sides for each dimension in `arr`.
|
||||
fill_value : scalar, optional
|
||||
If provided the padded area is filled with this value, otherwise
|
||||
the pad area left undefined.
|
||||
|
||||
Returns
|
||||
-------
|
||||
padded : ndarray
|
||||
The padded array with the same dtype as`array`. Its order will default
|
||||
to C-style if `array` is not F-contiguous.
|
||||
original_area_slice : tuple
|
||||
A tuple of slices pointing to the area of the original array.
|
||||
"""
|
||||
# Allocate grown array
|
||||
new_shape = tuple(
|
||||
left + size + right
|
||||
for size, (left, right) in zip(array.shape, pad_width)
|
||||
)
|
||||
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
|
||||
padded = np.empty(new_shape, dtype=array.dtype, order=order)
|
||||
|
||||
if fill_value is not None:
|
||||
padded.fill(fill_value)
|
||||
|
||||
# Copy old array into correct space
|
||||
original_area_slice = tuple(
|
||||
slice(left, left + size)
|
||||
for size, (left, right) in zip(array.shape, pad_width)
|
||||
)
|
||||
padded[original_area_slice] = array
|
||||
|
||||
return padded, original_area_slice
|
||||
|
||||
|
||||
def _set_pad_area(padded, axis, width_pair, value_pair):
|
||||
"""
|
||||
Set empty-padded area in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Array with the pad area which is modified inplace.
|
||||
axis : int
|
||||
Dimension with the pad area to set.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
value_pair : tuple of scalars or ndarrays
|
||||
Values inserted into the pad area on each side. It must match or be
|
||||
broadcastable to the shape of `arr`.
|
||||
"""
|
||||
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
|
||||
padded[left_slice] = value_pair[0]
|
||||
|
||||
right_slice = _slice_at_axis(
|
||||
slice(padded.shape[axis] - width_pair[1], None), axis)
|
||||
padded[right_slice] = value_pair[1]
|
||||
|
||||
|
||||
def _get_edges(padded, axis, width_pair):
|
||||
"""
|
||||
Retrieve edge values from empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the edges are considered.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_edge, right_edge : ndarray
|
||||
Edge values of the valid area in `padded` in the given dimension. Its
|
||||
shape will always match `padded` except for the dimension given by
|
||||
`axis` which will have a length of 1.
|
||||
"""
|
||||
left_index = width_pair[0]
|
||||
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
|
||||
left_edge = padded[left_slice]
|
||||
|
||||
right_index = padded.shape[axis] - width_pair[1]
|
||||
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
|
||||
right_edge = padded[right_slice]
|
||||
|
||||
return left_edge, right_edge
|
||||
|
||||
|
||||
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
|
||||
"""
|
||||
Construct linear ramps for empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the ramps are constructed.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
end_value_pair : (scalar, scalar)
|
||||
End values for the linear ramps which form the edge of the fully padded
|
||||
array. These values are included in the linear ramps.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_ramp, right_ramp : ndarray
|
||||
Linear ramps to set on both sides of `padded`.
|
||||
"""
|
||||
edge_pair = _get_edges(padded, axis, width_pair)
|
||||
|
||||
left_ramp, right_ramp = (
|
||||
np.linspace(
|
||||
start=end_value,
|
||||
stop=edge.squeeze(axis), # Dimension is replaced by linspace
|
||||
num=width,
|
||||
endpoint=False,
|
||||
dtype=padded.dtype,
|
||||
axis=axis
|
||||
)
|
||||
for end_value, edge, width in zip(
|
||||
end_value_pair, edge_pair, width_pair
|
||||
)
|
||||
)
|
||||
|
||||
# Reverse linear space in appropriate dimension
|
||||
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
|
||||
|
||||
return left_ramp, right_ramp
|
||||
|
||||
|
||||
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
|
||||
"""
|
||||
Calculate statistic for the empty-padded array in given dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Empty-padded array.
|
||||
axis : int
|
||||
Dimension in which the statistic is calculated.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
length_pair : 2-element sequence of None or int
|
||||
Gives the number of values in valid area from each side that is
|
||||
taken into account when calculating the statistic. If None the entire
|
||||
valid area in `padded` is considered.
|
||||
stat_func : function
|
||||
Function to compute statistic. The expected signature is
|
||||
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
left_stat, right_stat : ndarray
|
||||
Calculated statistic for both sides of `padded`.
|
||||
"""
|
||||
# Calculate indices of the edges of the area with original values
|
||||
left_index = width_pair[0]
|
||||
right_index = padded.shape[axis] - width_pair[1]
|
||||
# as well as its length
|
||||
max_length = right_index - left_index
|
||||
|
||||
# Limit stat_lengths to max_length
|
||||
left_length, right_length = length_pair
|
||||
if left_length is None or max_length < left_length:
|
||||
left_length = max_length
|
||||
if right_length is None or max_length < right_length:
|
||||
right_length = max_length
|
||||
|
||||
if (left_length == 0 or right_length == 0) \
|
||||
and stat_func in {np.amax, np.amin}:
|
||||
# amax and amin can't operate on an empty array,
|
||||
# raise a more descriptive warning here instead of the default one
|
||||
raise ValueError("stat_length of 0 yields no value for padding")
|
||||
|
||||
# Calculate statistic for the left side
|
||||
left_slice = _slice_at_axis(
|
||||
slice(left_index, left_index + left_length), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
|
||||
_round_if_needed(left_stat, padded.dtype)
|
||||
|
||||
if left_length == right_length == max_length:
|
||||
# return early as right_stat must be identical to left_stat
|
||||
return left_stat, left_stat
|
||||
|
||||
# Calculate statistic for the right side
|
||||
right_slice = _slice_at_axis(
|
||||
slice(right_index - right_length, right_index), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
|
||||
_round_if_needed(right_stat, padded.dtype)
|
||||
|
||||
return left_stat, right_stat
|
||||
|
||||
|
||||
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
|
||||
"""
|
||||
Pad `axis` of `arr` with reflection.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Input array of arbitrary shape.
|
||||
axis : int
|
||||
Axis along which to pad `arr`.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
method : str
|
||||
Controls method of reflection; options are 'even' or 'odd'.
|
||||
include_edge : bool
|
||||
If true, edge value is included in reflection, otherwise the edge
|
||||
value forms the symmetric axis to the reflection.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad_amt : tuple of ints, length 2
|
||||
New index positions of padding to do along the `axis`. If these are
|
||||
both 0, padding is done in this dimension.
|
||||
"""
|
||||
left_pad, right_pad = width_pair
|
||||
old_length = padded.shape[axis] - right_pad - left_pad
|
||||
|
||||
if include_edge:
|
||||
# Edge is included, we need to offset the pad amount by 1
|
||||
edge_offset = 1
|
||||
else:
|
||||
edge_offset = 0 # Edge is not included, no need to offset pad amount
|
||||
old_length -= 1 # but must be omitted from the chunk
|
||||
|
||||
if left_pad > 0:
|
||||
# Pad with reflected values on left side:
|
||||
# First limit chunk size which can't be larger than pad area
|
||||
chunk_length = min(old_length, left_pad)
|
||||
# Slice right to left, stop on or next to edge, start relative to stop
|
||||
stop = left_pad - edge_offset
|
||||
start = stop + chunk_length
|
||||
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
|
||||
if method == "odd":
|
||||
# Negate chunk and align with edge
|
||||
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
|
||||
left_chunk = 2 * padded[edge_slice] - left_chunk
|
||||
|
||||
# Insert chunk into padded area
|
||||
start = left_pad - chunk_length
|
||||
stop = left_pad
|
||||
pad_area = _slice_at_axis(slice(start, stop), axis)
|
||||
padded[pad_area] = left_chunk
|
||||
# Adjust pointer to left edge for next iteration
|
||||
left_pad -= chunk_length
|
||||
|
||||
if right_pad > 0:
|
||||
# Pad with reflected values on right side:
|
||||
# First limit chunk size which can't be larger than pad area
|
||||
chunk_length = min(old_length, right_pad)
|
||||
# Slice right to left, start on or next to edge, stop relative to start
|
||||
start = -right_pad + edge_offset - 2
|
||||
stop = start - chunk_length
|
||||
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
|
||||
if method == "odd":
|
||||
# Negate chunk and align with edge
|
||||
edge_slice = _slice_at_axis(
|
||||
slice(-right_pad - 1, -right_pad), axis)
|
||||
right_chunk = 2 * padded[edge_slice] - right_chunk
|
||||
|
||||
# Insert chunk into padded area
|
||||
start = padded.shape[axis] - right_pad
|
||||
stop = start + chunk_length
|
||||
pad_area = _slice_at_axis(slice(start, stop), axis)
|
||||
padded[pad_area] = right_chunk
|
||||
# Adjust pointer to right edge for next iteration
|
||||
right_pad -= chunk_length
|
||||
|
||||
return left_pad, right_pad
|
||||
|
||||
|
||||
def _set_wrap_both(padded, axis, width_pair, original_period):
|
||||
"""
|
||||
Pad `axis` of `arr` with wrapped values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
padded : ndarray
|
||||
Input array of arbitrary shape.
|
||||
axis : int
|
||||
Axis along which to pad `arr`.
|
||||
width_pair : (int, int)
|
||||
Pair of widths that mark the pad area on both sides in the given
|
||||
dimension.
|
||||
original_period : int
|
||||
Original length of data on `axis` of `arr`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad_amt : tuple of ints, length 2
|
||||
New index positions of padding to do along the `axis`. If these are
|
||||
both 0, padding is done in this dimension.
|
||||
"""
|
||||
left_pad, right_pad = width_pair
|
||||
period = padded.shape[axis] - right_pad - left_pad
|
||||
# Avoid wrapping with only a subset of the original area by ensuring period
|
||||
# can only be a multiple of the original area's length.
|
||||
period = period // original_period * original_period
|
||||
|
||||
# If the current dimension of `arr` doesn't contain enough valid values
|
||||
# (not part of the undefined pad area) we need to pad multiple times.
|
||||
# Each time the pad area shrinks on both sides which is communicated with
|
||||
# these variables.
|
||||
new_left_pad = 0
|
||||
new_right_pad = 0
|
||||
|
||||
if left_pad > 0:
|
||||
# Pad with wrapped values on left side
|
||||
# First slice chunk from left side of the non-pad area.
|
||||
# Use min(period, left_pad) to ensure that chunk is not larger than
|
||||
# pad area.
|
||||
slice_end = left_pad + period
|
||||
slice_start = slice_end - min(period, left_pad)
|
||||
right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
|
||||
right_chunk = padded[right_slice]
|
||||
|
||||
if left_pad > period:
|
||||
# Chunk is smaller than pad area
|
||||
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
|
||||
new_left_pad = left_pad - period
|
||||
else:
|
||||
# Chunk matches pad area
|
||||
pad_area = _slice_at_axis(slice(None, left_pad), axis)
|
||||
padded[pad_area] = right_chunk
|
||||
|
||||
if right_pad > 0:
|
||||
# Pad with wrapped values on right side
|
||||
# First slice chunk from right side of the non-pad area.
|
||||
# Use min(period, right_pad) to ensure that chunk is not larger than
|
||||
# pad area.
|
||||
slice_start = -right_pad - period
|
||||
slice_end = slice_start + min(period, right_pad)
|
||||
left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
|
||||
left_chunk = padded[left_slice]
|
||||
|
||||
if right_pad > period:
|
||||
# Chunk is smaller than pad area
|
||||
pad_area = _slice_at_axis(
|
||||
slice(-right_pad, -right_pad + period), axis)
|
||||
new_right_pad = right_pad - period
|
||||
else:
|
||||
# Chunk matches pad area
|
||||
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
|
||||
padded[pad_area] = left_chunk
|
||||
|
||||
return new_left_pad, new_right_pad
|
||||
|
||||
|
||||
def _as_pairs(x, ndim, as_index=False):
|
||||
"""
|
||||
Broadcast `x` to an array with the shape (`ndim`, 2).
|
||||
|
||||
A helper function for `pad` that prepares and validates arguments like
|
||||
`pad_width` for iteration in pairs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : {None, scalar, array-like}
|
||||
The object to broadcast to the shape (`ndim`, 2).
|
||||
ndim : int
|
||||
Number of pairs the broadcasted `x` will have.
|
||||
as_index : bool, optional
|
||||
If `x` is not None, try to round each element of `x` to an integer
|
||||
(dtype `np.intp`) and ensure every element is positive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pairs : nested iterables, shape (`ndim`, 2)
|
||||
The broadcasted version of `x`.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `as_index` is True and `x` contains negative elements.
|
||||
Or if `x` is not broadcastable to the shape (`ndim`, 2).
|
||||
"""
|
||||
if x is None:
|
||||
# Pass through None as a special case, otherwise np.round(x) fails
|
||||
# with an AttributeError
|
||||
return ((None, None),) * ndim
|
||||
|
||||
x = np.array(x)
|
||||
if as_index:
|
||||
x = np.round(x).astype(np.intp, copy=False)
|
||||
|
||||
if x.ndim < 3:
|
||||
# Optimization: Possibly use faster paths for cases where `x` has
|
||||
# only 1 or 2 elements. `np.broadcast_to` could handle these as well
|
||||
# but is currently slower
|
||||
|
||||
if x.size == 1:
|
||||
# x was supplied as a single value
|
||||
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
|
||||
if as_index and x < 0:
|
||||
raise ValueError("index can't contain negative values")
|
||||
return ((x[0], x[0]),) * ndim
|
||||
|
||||
if x.size == 2 and x.shape != (2, 1):
|
||||
# x was supplied with a single value for each side
|
||||
# but except case when each dimension has a single value
|
||||
# which should be broadcasted to a pair,
|
||||
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
|
||||
x = x.ravel() # Ensure x[0], x[1] works
|
||||
if as_index and (x[0] < 0 or x[1] < 0):
|
||||
raise ValueError("index can't contain negative values")
|
||||
return ((x[0], x[1]),) * ndim
|
||||
|
||||
if as_index and x.min() < 0:
|
||||
raise ValueError("index can't contain negative values")
|
||||
|
||||
# Converting the array with `tolist` seems to improve performance
|
||||
# when iterating and indexing the result (see usage in `pad`)
|
||||
return np.broadcast_to(x, (ndim, 2)).tolist()
|
||||
|
||||
|
||||
def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
|
||||
return (array,)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Public functions
|
||||
|
||||
|
||||
@array_function_dispatch(_pad_dispatcher, module='numpy')
|
||||
def pad(array, pad_width, mode='constant', **kwargs):
|
||||
"""
|
||||
Pad an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : array_like of rank N
|
||||
The array to pad.
|
||||
pad_width : {sequence, array_like, int}
|
||||
Number of values padded to the edges of each axis.
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
|
||||
for each axis.
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after pad for each axis.
|
||||
``(pad,)`` or ``int`` is a shortcut for before = after = pad width
|
||||
for all axes.
|
||||
mode : str or function, optional
|
||||
One of the following string values or a user supplied function.
|
||||
|
||||
'constant' (default)
|
||||
Pads with a constant value.
|
||||
'edge'
|
||||
Pads with the edge values of array.
|
||||
'linear_ramp'
|
||||
Pads with the linear ramp between end_value and the
|
||||
array edge value.
|
||||
'maximum'
|
||||
Pads with the maximum value of all or part of the
|
||||
vector along each axis.
|
||||
'mean'
|
||||
Pads with the mean value of all or part of the
|
||||
vector along each axis.
|
||||
'median'
|
||||
Pads with the median value of all or part of the
|
||||
vector along each axis.
|
||||
'minimum'
|
||||
Pads with the minimum value of all or part of the
|
||||
vector along each axis.
|
||||
'reflect'
|
||||
Pads with the reflection of the vector mirrored on
|
||||
the first and last values of the vector along each
|
||||
axis.
|
||||
'symmetric'
|
||||
Pads with the reflection of the vector mirrored
|
||||
along the edge of the array.
|
||||
'wrap'
|
||||
Pads with the wrap of the vector along the axis.
|
||||
The first values are used to pad the end and the
|
||||
end values are used to pad the beginning.
|
||||
'empty'
|
||||
Pads with undefined values.
|
||||
|
||||
.. versionadded:: 1.17
|
||||
|
||||
<function>
|
||||
Padding function, see Notes.
|
||||
stat_length : sequence or int, optional
|
||||
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
|
||||
values at edge of each axis used to calculate the statistic value.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique statistic
|
||||
lengths for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after statistic lengths for each axis.
|
||||
|
||||
``(stat_length,)`` or ``int`` is a shortcut for
|
||||
``before = after = statistic`` length for all axes.
|
||||
|
||||
Default is ``None``, to use the entire axis.
|
||||
constant_values : sequence or scalar, optional
|
||||
Used in 'constant'. The values to set the padded values for each
|
||||
axis.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
|
||||
for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after constants for each axis.
|
||||
|
||||
``(constant,)`` or ``constant`` is a shortcut for
|
||||
``before = after = constant`` for all axes.
|
||||
|
||||
Default is 0.
|
||||
end_values : sequence or scalar, optional
|
||||
Used in 'linear_ramp'. The values used for the ending value of the
|
||||
linear_ramp and that will form the edge of the padded array.
|
||||
|
||||
``((before_1, after_1), ... (before_N, after_N))`` unique end values
|
||||
for each axis.
|
||||
|
||||
``(before, after)`` or ``((before, after),)`` yields same before
|
||||
and after end values for each axis.
|
||||
|
||||
``(constant,)`` or ``constant`` is a shortcut for
|
||||
``before = after = constant`` for all axes.
|
||||
|
||||
Default is 0.
|
||||
reflect_type : {'even', 'odd'}, optional
|
||||
Used in 'reflect', and 'symmetric'. The 'even' style is the
|
||||
default with an unaltered reflection around the edge value. For
|
||||
the 'odd' style, the extended part of the array is created by
|
||||
subtracting the reflected values from two times the edge value.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pad : ndarray
|
||||
Padded array of rank equal to `array` with shape increased
|
||||
according to `pad_width`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 1.7.0
|
||||
|
||||
For an array with rank greater than 1, some of the padding of later
|
||||
axes is calculated from padding of previous axes. This is easiest to
|
||||
think about with a rank 2 array where the corners of the padded array
|
||||
are calculated by using padded values from the first axis.
|
||||
|
||||
The padding function, if used, should modify a rank 1 array in-place. It
|
||||
has the following signature::
|
||||
|
||||
padding_func(vector, iaxis_pad_width, iaxis, kwargs)
|
||||
|
||||
where
|
||||
|
||||
vector : ndarray
|
||||
A rank 1 array already padded with zeros. Padded values are
|
||||
vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
|
||||
iaxis_pad_width : tuple
|
||||
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
|
||||
values padded at the beginning of vector where
|
||||
iaxis_pad_width[1] represents the number of values padded at
|
||||
the end of vector.
|
||||
iaxis : int
|
||||
The axis currently being calculated.
|
||||
kwargs : dict
|
||||
Any keyword arguments the function requires.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = [1, 2, 3, 4, 5]
|
||||
>>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
|
||||
array([4, 4, 1, ..., 6, 6, 6])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'edge')
|
||||
array([1, 1, 1, ..., 5, 5, 5])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
|
||||
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
|
||||
|
||||
>>> np.pad(a, (2,), 'maximum')
|
||||
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
|
||||
|
||||
>>> np.pad(a, (2,), 'mean')
|
||||
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
|
||||
|
||||
>>> np.pad(a, (2,), 'median')
|
||||
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
|
||||
|
||||
>>> a = [[1, 2], [3, 4]]
|
||||
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
|
||||
array([[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[3, 3, 3, 4, 3, 3, 3],
|
||||
[1, 1, 1, 2, 1, 1, 1],
|
||||
[1, 1, 1, 2, 1, 1, 1]])
|
||||
|
||||
>>> a = [1, 2, 3, 4, 5]
|
||||
>>> np.pad(a, (2, 3), 'reflect')
|
||||
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
|
||||
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'symmetric')
|
||||
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
|
||||
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
|
||||
|
||||
>>> np.pad(a, (2, 3), 'wrap')
|
||||
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
|
||||
|
||||
>>> def pad_with(vector, pad_width, iaxis, kwargs):
|
||||
... pad_value = kwargs.get('padder', 10)
|
||||
... vector[:pad_width[0]] = pad_value
|
||||
... vector[-pad_width[1]:] = pad_value
|
||||
>>> a = np.arange(6)
|
||||
>>> a = a.reshape((2, 3))
|
||||
>>> np.pad(a, 2, pad_with)
|
||||
array([[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 0, 1, 2, 10, 10],
|
||||
[10, 10, 3, 4, 5, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10],
|
||||
[10, 10, 10, 10, 10, 10, 10]])
|
||||
>>> np.pad(a, 2, pad_with, padder=100)
|
||||
array([[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 0, 1, 2, 100, 100],
|
||||
[100, 100, 3, 4, 5, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100],
|
||||
[100, 100, 100, 100, 100, 100, 100]])
|
||||
"""
|
||||
array = np.asarray(array)
|
||||
pad_width = np.asarray(pad_width)
|
||||
|
||||
if not pad_width.dtype.kind == 'i':
|
||||
raise TypeError('`pad_width` must be of integral type.')
|
||||
|
||||
# Broadcast to shape (array.ndim, 2)
|
||||
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
|
||||
|
||||
if callable(mode):
|
||||
# Old behavior: Use user-supplied function with np.apply_along_axis
|
||||
function = mode
|
||||
# Create a new zero padded array
|
||||
padded, _ = _pad_simple(array, pad_width, fill_value=0)
|
||||
# And apply along each axis
|
||||
|
||||
for axis in range(padded.ndim):
|
||||
# Iterate using ndindex as in apply_along_axis, but assuming that
|
||||
# function operates inplace on the padded array.
|
||||
|
||||
# view with the iteration axis at the end
|
||||
view = np.moveaxis(padded, axis, -1)
|
||||
|
||||
# compute indices for the iteration axes, and append a trailing
|
||||
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
|
||||
inds = ndindex(view.shape[:-1])
|
||||
inds = (ind + (Ellipsis,) for ind in inds)
|
||||
for ind in inds:
|
||||
function(view[ind], pad_width[axis], axis, kwargs)
|
||||
|
||||
return padded
|
||||
|
||||
# Make sure that no unsupported keywords were passed for the current mode
|
||||
allowed_kwargs = {
|
||||
'empty': [], 'edge': [], 'wrap': [],
|
||||
'constant': ['constant_values'],
|
||||
'linear_ramp': ['end_values'],
|
||||
'maximum': ['stat_length'],
|
||||
'mean': ['stat_length'],
|
||||
'median': ['stat_length'],
|
||||
'minimum': ['stat_length'],
|
||||
'reflect': ['reflect_type'],
|
||||
'symmetric': ['reflect_type'],
|
||||
}
|
||||
try:
|
||||
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
|
||||
except KeyError:
|
||||
raise ValueError("mode '{}' is not supported".format(mode)) from None
|
||||
if unsupported_kwargs:
|
||||
raise ValueError("unsupported keyword arguments for mode '{}': {}"
|
||||
.format(mode, unsupported_kwargs))
|
||||
|
||||
stat_functions = {"maximum": np.amax, "minimum": np.amin,
|
||||
"mean": np.mean, "median": np.median}
|
||||
|
||||
# Create array with final shape and original values
|
||||
# (padded area is undefined)
|
||||
padded, original_area_slice = _pad_simple(array, pad_width)
|
||||
# And prepare iteration over all dimensions
|
||||
# (zipping may be more readable than using enumerate)
|
||||
axes = range(padded.ndim)
|
||||
|
||||
if mode == "constant":
|
||||
values = kwargs.get("constant_values", 0)
|
||||
values = _as_pairs(values, padded.ndim)
|
||||
for axis, width_pair, value_pair in zip(axes, pad_width, values):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
_set_pad_area(roi, axis, width_pair, value_pair)
|
||||
|
||||
elif mode == "empty":
|
||||
pass # Do nothing as _pad_simple already returned the correct result
|
||||
|
||||
elif array.size == 0:
|
||||
# Only modes "constant" and "empty" can extend empty axes, all other
|
||||
# modes depend on `array` not being empty
|
||||
# -> ensure every empty axis is only "padded with 0"
|
||||
for axis, width_pair in zip(axes, pad_width):
|
||||
if array.shape[axis] == 0 and any(width_pair):
|
||||
raise ValueError(
|
||||
"can't extend empty axis {} using modes other than "
|
||||
"'constant' or 'empty'".format(axis)
|
||||
)
|
||||
# passed, don't need to do anything more as _pad_simple already
|
||||
# returned the correct result
|
||||
|
||||
elif mode == "edge":
|
||||
for axis, width_pair in zip(axes, pad_width):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
edge_pair = _get_edges(roi, axis, width_pair)
|
||||
_set_pad_area(roi, axis, width_pair, edge_pair)
|
||||
|
||||
elif mode == "linear_ramp":
|
||||
end_values = kwargs.get("end_values", 0)
|
||||
end_values = _as_pairs(end_values, padded.ndim)
|
||||
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
|
||||
_set_pad_area(roi, axis, width_pair, ramp_pair)
|
||||
|
||||
elif mode in stat_functions:
|
||||
func = stat_functions[mode]
|
||||
length = kwargs.get("stat_length", None)
|
||||
length = _as_pairs(length, padded.ndim, as_index=True)
|
||||
for axis, width_pair, length_pair in zip(axes, pad_width, length):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
|
||||
_set_pad_area(roi, axis, width_pair, stat_pair)
|
||||
|
||||
elif mode in {"reflect", "symmetric"}:
|
||||
method = kwargs.get("reflect_type", "even")
|
||||
include_edge = True if mode == "symmetric" else False
|
||||
for axis, (left_index, right_index) in zip(axes, pad_width):
|
||||
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
|
||||
# Extending singleton dimension for 'reflect' is legacy
|
||||
# behavior; it really should raise an error.
|
||||
edge_pair = _get_edges(padded, axis, (left_index, right_index))
|
||||
_set_pad_area(
|
||||
padded, axis, (left_index, right_index), edge_pair)
|
||||
continue
|
||||
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
while left_index > 0 or right_index > 0:
|
||||
# Iteratively pad until dimension is filled with reflected
|
||||
# values. This is necessary if the pad area is larger than
|
||||
# the length of the original values in the current dimension.
|
||||
left_index, right_index = _set_reflect_both(
|
||||
roi, axis, (left_index, right_index),
|
||||
method, include_edge
|
||||
)
|
||||
|
||||
elif mode == "wrap":
|
||||
for axis, (left_index, right_index) in zip(axes, pad_width):
|
||||
roi = _view_roi(padded, original_area_slice, axis)
|
||||
original_period = padded.shape[axis] - right_index - left_index
|
||||
while left_index > 0 or right_index > 0:
|
||||
# Iteratively pad until dimension is filled with wrapped
|
||||
# values. This is necessary if the pad area is larger than
|
||||
# the length of the original values in the current dimension.
|
||||
left_index, right_index = _set_wrap_both(
|
||||
roi, axis, (left_index, right_index), original_period)
|
||||
|
||||
return padded
|
85
teil20b/lib/python3.11/site-packages/numpy/lib/arraypad.pyi
Normal file
85
teil20b/lib/python3.11/site-packages/numpy/lib/arraypad.pyi
Normal file
@@ -0,0 +1,85 @@
|
||||
from typing import (
|
||||
Literal as L,
|
||||
Any,
|
||||
overload,
|
||||
TypeVar,
|
||||
Protocol,
|
||||
)
|
||||
|
||||
from numpy import generic
|
||||
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ArrayLikeInt,
|
||||
_ArrayLike,
|
||||
)
|
||||
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
|
||||
class _ModeFunc(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
vector: NDArray[Any],
|
||||
iaxis_pad_width: tuple[int, int],
|
||||
iaxis: int,
|
||||
kwargs: dict[str, Any],
|
||||
/,
|
||||
) -> None: ...
|
||||
|
||||
_ModeKind = L[
|
||||
"constant",
|
||||
"edge",
|
||||
"linear_ramp",
|
||||
"maximum",
|
||||
"mean",
|
||||
"median",
|
||||
"minimum",
|
||||
"reflect",
|
||||
"symmetric",
|
||||
"wrap",
|
||||
"empty",
|
||||
]
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
# TODO: In practice each keyword argument is exclusive to one or more
|
||||
# specific modes. Consider adding more overloads to express this in the future.
|
||||
|
||||
# Expand `**kwargs` into explicit keyword-only arguments
|
||||
@overload
|
||||
def pad(
|
||||
array: _ArrayLike[_SCT],
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeKind = ...,
|
||||
*,
|
||||
stat_length: None | _ArrayLikeInt = ...,
|
||||
constant_values: ArrayLike = ...,
|
||||
end_values: ArrayLike = ...,
|
||||
reflect_type: L["odd", "even"] = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: ArrayLike,
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeKind = ...,
|
||||
*,
|
||||
stat_length: None | _ArrayLikeInt = ...,
|
||||
constant_values: ArrayLike = ...,
|
||||
end_values: ArrayLike = ...,
|
||||
reflect_type: L["odd", "even"] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: _ArrayLike[_SCT],
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeFunc,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def pad(
|
||||
array: ArrayLike,
|
||||
pad_width: _ArrayLikeInt,
|
||||
mode: _ModeFunc,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[Any]: ...
|
981
teil20b/lib/python3.11/site-packages/numpy/lib/arraysetops.py
Normal file
981
teil20b/lib/python3.11/site-packages/numpy/lib/arraysetops.py
Normal file
@@ -0,0 +1,981 @@
|
||||
"""
|
||||
Set operations for arrays based on sorting.
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
For floating point arrays, inaccurate results may appear due to usual round-off
|
||||
and floating point comparison issues.
|
||||
|
||||
Speed could be gained in some operations by an implementation of
|
||||
`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
|
||||
calls to `numpy.argsort`.
|
||||
|
||||
Original author: Robert Cimrman
|
||||
|
||||
"""
|
||||
import functools
|
||||
|
||||
import numpy as np
|
||||
from numpy.core import overrides
|
||||
|
||||
|
||||
array_function_dispatch = functools.partial(
|
||||
overrides.array_function_dispatch, module='numpy')
|
||||
|
||||
|
||||
__all__ = [
|
||||
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
|
||||
'in1d', 'isin'
|
||||
]
|
||||
|
||||
|
||||
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
|
||||
return (ary, to_end, to_begin)
|
||||
|
||||
|
||||
@array_function_dispatch(_ediff1d_dispatcher)
|
||||
def ediff1d(ary, to_end=None, to_begin=None):
|
||||
"""
|
||||
The differences between consecutive elements of an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ary : array_like
|
||||
If necessary, will be flattened before the differences are taken.
|
||||
to_end : array_like, optional
|
||||
Number(s) to append at the end of the returned differences.
|
||||
to_begin : array_like, optional
|
||||
Number(s) to prepend at the beginning of the returned differences.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ediff1d : ndarray
|
||||
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
diff, gradient
|
||||
|
||||
Notes
|
||||
-----
|
||||
When applied to masked arrays, this function drops the mask information
|
||||
if the `to_begin` and/or `to_end` parameters are used.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> x = np.array([1, 2, 4, 7, 0])
|
||||
>>> np.ediff1d(x)
|
||||
array([ 1, 2, 3, -7])
|
||||
|
||||
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
|
||||
array([-99, 1, 2, ..., -7, 88, 99])
|
||||
|
||||
The returned array is always 1D.
|
||||
|
||||
>>> y = [[1, 2, 4], [1, 6, 24]]
|
||||
>>> np.ediff1d(y)
|
||||
array([ 1, 2, -3, 5, 18])
|
||||
|
||||
"""
|
||||
# force a 1d array
|
||||
ary = np.asanyarray(ary).ravel()
|
||||
|
||||
# enforce that the dtype of `ary` is used for the output
|
||||
dtype_req = ary.dtype
|
||||
|
||||
# fast track default case
|
||||
if to_begin is None and to_end is None:
|
||||
return ary[1:] - ary[:-1]
|
||||
|
||||
if to_begin is None:
|
||||
l_begin = 0
|
||||
else:
|
||||
to_begin = np.asanyarray(to_begin)
|
||||
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
|
||||
raise TypeError("dtype of `to_begin` must be compatible "
|
||||
"with input `ary` under the `same_kind` rule.")
|
||||
|
||||
to_begin = to_begin.ravel()
|
||||
l_begin = len(to_begin)
|
||||
|
||||
if to_end is None:
|
||||
l_end = 0
|
||||
else:
|
||||
to_end = np.asanyarray(to_end)
|
||||
if not np.can_cast(to_end, dtype_req, casting="same_kind"):
|
||||
raise TypeError("dtype of `to_end` must be compatible "
|
||||
"with input `ary` under the `same_kind` rule.")
|
||||
|
||||
to_end = to_end.ravel()
|
||||
l_end = len(to_end)
|
||||
|
||||
# do the calculation in place and copy to_begin and to_end
|
||||
l_diff = max(len(ary) - 1, 0)
|
||||
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
|
||||
result = ary.__array_wrap__(result)
|
||||
if l_begin > 0:
|
||||
result[:l_begin] = to_begin
|
||||
if l_end > 0:
|
||||
result[l_begin + l_diff:] = to_end
|
||||
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
|
||||
return result
|
||||
|
||||
|
||||
def _unpack_tuple(x):
|
||||
""" Unpacks one-element tuples for use as return values """
|
||||
if len(x) == 1:
|
||||
return x[0]
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
|
||||
return_counts=None, axis=None, *, equal_nan=None):
|
||||
return (ar,)
|
||||
|
||||
|
||||
@array_function_dispatch(_unique_dispatcher)
|
||||
def unique(ar, return_index=False, return_inverse=False,
|
||||
return_counts=False, axis=None, *, equal_nan=True):
|
||||
"""
|
||||
Find the unique elements of an array.
|
||||
|
||||
Returns the sorted unique elements of an array. There are three optional
|
||||
outputs in addition to the unique elements:
|
||||
|
||||
* the indices of the input array that give the unique values
|
||||
* the indices of the unique array that reconstruct the input array
|
||||
* the number of times each unique value comes up in the input array
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar : array_like
|
||||
Input array. Unless `axis` is specified, this will be flattened if it
|
||||
is not already 1-D.
|
||||
return_index : bool, optional
|
||||
If True, also return the indices of `ar` (along the specified axis,
|
||||
if provided, or in the flattened array) that result in the unique array.
|
||||
return_inverse : bool, optional
|
||||
If True, also return the indices of the unique array (for the specified
|
||||
axis, if provided) that can be used to reconstruct `ar`.
|
||||
return_counts : bool, optional
|
||||
If True, also return the number of times each unique item appears
|
||||
in `ar`.
|
||||
axis : int or None, optional
|
||||
The axis to operate on. If None, `ar` will be flattened. If an integer,
|
||||
the subarrays indexed by the given axis will be flattened and treated
|
||||
as the elements of a 1-D array with the dimension of the given axis,
|
||||
see the notes for more details. Object arrays or structured arrays
|
||||
that contain objects are not supported if the `axis` kwarg is used. The
|
||||
default is None.
|
||||
|
||||
.. versionadded:: 1.13.0
|
||||
|
||||
equal_nan : bool, optional
|
||||
If True, collapses multiple NaN values in the return array into one.
|
||||
|
||||
.. versionadded:: 1.24
|
||||
|
||||
Returns
|
||||
-------
|
||||
unique : ndarray
|
||||
The sorted unique values.
|
||||
unique_indices : ndarray, optional
|
||||
The indices of the first occurrences of the unique values in the
|
||||
original array. Only provided if `return_index` is True.
|
||||
unique_inverse : ndarray, optional
|
||||
The indices to reconstruct the original array from the
|
||||
unique array. Only provided if `return_inverse` is True.
|
||||
unique_counts : ndarray, optional
|
||||
The number of times each of the unique values comes up in the
|
||||
original array. Only provided if `return_counts` is True.
|
||||
|
||||
.. versionadded:: 1.9.0
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
repeat : Repeat elements of an array.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When an axis is specified the subarrays indexed by the axis are sorted.
|
||||
This is done by making the specified axis the first dimension of the array
|
||||
(move the axis to the first dimension to keep the order of the other axes)
|
||||
and then flattening the subarrays in C order. The flattened subarrays are
|
||||
then viewed as a structured type with each element given a label, with the
|
||||
effect that we end up with a 1-D array of structured types that can be
|
||||
treated in the same way as any other 1-D array. The result is that the
|
||||
flattened subarrays are sorted in lexicographic order starting with the
|
||||
first element.
|
||||
|
||||
.. versionchanged: NumPy 1.21
|
||||
If nan values are in the input array, a single nan is put
|
||||
to the end of the sorted unique values.
|
||||
|
||||
Also for complex arrays all NaN values are considered equivalent
|
||||
(no matter whether the NaN is in the real or imaginary part).
|
||||
As the representant for the returned array the smallest one in the
|
||||
lexicographical order is chosen - see np.sort for how the lexicographical
|
||||
order is defined for complex arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.unique([1, 1, 2, 2, 3, 3])
|
||||
array([1, 2, 3])
|
||||
>>> a = np.array([[1, 1], [2, 3]])
|
||||
>>> np.unique(a)
|
||||
array([1, 2, 3])
|
||||
|
||||
Return the unique rows of a 2D array
|
||||
|
||||
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
|
||||
>>> np.unique(a, axis=0)
|
||||
array([[1, 0, 0], [2, 3, 4]])
|
||||
|
||||
Return the indices of the original array that give the unique values:
|
||||
|
||||
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
|
||||
>>> u, indices = np.unique(a, return_index=True)
|
||||
>>> u
|
||||
array(['a', 'b', 'c'], dtype='<U1')
|
||||
>>> indices
|
||||
array([0, 1, 3])
|
||||
>>> a[indices]
|
||||
array(['a', 'b', 'c'], dtype='<U1')
|
||||
|
||||
Reconstruct the input array from the unique values and inverse:
|
||||
|
||||
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
|
||||
>>> u, indices = np.unique(a, return_inverse=True)
|
||||
>>> u
|
||||
array([1, 2, 3, 4, 6])
|
||||
>>> indices
|
||||
array([0, 1, 4, 3, 1, 2, 1])
|
||||
>>> u[indices]
|
||||
array([1, 2, 6, 4, 2, 3, 2])
|
||||
|
||||
Reconstruct the input values from the unique values and counts:
|
||||
|
||||
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
|
||||
>>> values, counts = np.unique(a, return_counts=True)
|
||||
>>> values
|
||||
array([1, 2, 3, 4, 6])
|
||||
>>> counts
|
||||
array([1, 3, 1, 1, 1])
|
||||
>>> np.repeat(values, counts)
|
||||
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
|
||||
|
||||
"""
|
||||
ar = np.asanyarray(ar)
|
||||
if axis is None:
|
||||
ret = _unique1d(ar, return_index, return_inverse, return_counts,
|
||||
equal_nan=equal_nan)
|
||||
return _unpack_tuple(ret)
|
||||
|
||||
# axis was specified and not None
|
||||
try:
|
||||
ar = np.moveaxis(ar, axis, 0)
|
||||
except np.AxisError:
|
||||
# this removes the "axis1" or "axis2" prefix from the error message
|
||||
raise np.AxisError(axis, ar.ndim) from None
|
||||
|
||||
# Must reshape to a contiguous 2D array for this to work...
|
||||
orig_shape, orig_dtype = ar.shape, ar.dtype
|
||||
ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
|
||||
ar = np.ascontiguousarray(ar)
|
||||
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
|
||||
|
||||
# At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
|
||||
# data type with `m` fields where each field has the data type of `ar`.
|
||||
# In the following, we create the array `consolidated`, which has
|
||||
# shape `(n,)` with data type `dtype`.
|
||||
try:
|
||||
if ar.shape[1] > 0:
|
||||
consolidated = ar.view(dtype)
|
||||
else:
|
||||
# If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
|
||||
# a data type with itemsize 0, and the call `ar.view(dtype)` will
|
||||
# fail. Instead, we'll use `np.empty` to explicitly create the
|
||||
# array with shape `(len(ar),)`. Because `dtype` in this case has
|
||||
# itemsize 0, the total size of the result is still 0 bytes.
|
||||
consolidated = np.empty(len(ar), dtype=dtype)
|
||||
except TypeError as e:
|
||||
# There's no good way to do this for object arrays, etc...
|
||||
msg = 'The axis argument to unique is not supported for dtype {dt}'
|
||||
raise TypeError(msg.format(dt=ar.dtype)) from e
|
||||
|
||||
def reshape_uniq(uniq):
|
||||
n = len(uniq)
|
||||
uniq = uniq.view(orig_dtype)
|
||||
uniq = uniq.reshape(n, *orig_shape[1:])
|
||||
uniq = np.moveaxis(uniq, 0, axis)
|
||||
return uniq
|
||||
|
||||
output = _unique1d(consolidated, return_index,
|
||||
return_inverse, return_counts, equal_nan=equal_nan)
|
||||
output = (reshape_uniq(output[0]),) + output[1:]
|
||||
return _unpack_tuple(output)
|
||||
|
||||
|
||||
def _unique1d(ar, return_index=False, return_inverse=False,
|
||||
return_counts=False, *, equal_nan=True):
|
||||
"""
|
||||
Find the unique elements of an array, ignoring shape.
|
||||
"""
|
||||
ar = np.asanyarray(ar).flatten()
|
||||
|
||||
optional_indices = return_index or return_inverse
|
||||
|
||||
if optional_indices:
|
||||
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
|
||||
aux = ar[perm]
|
||||
else:
|
||||
ar.sort()
|
||||
aux = ar
|
||||
mask = np.empty(aux.shape, dtype=np.bool_)
|
||||
mask[:1] = True
|
||||
if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
|
||||
np.isnan(aux[-1])):
|
||||
if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
|
||||
aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
|
||||
else:
|
||||
aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
|
||||
if aux_firstnan > 0:
|
||||
mask[1:aux_firstnan] = (
|
||||
aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
|
||||
mask[aux_firstnan] = True
|
||||
mask[aux_firstnan + 1:] = False
|
||||
else:
|
||||
mask[1:] = aux[1:] != aux[:-1]
|
||||
|
||||
ret = (aux[mask],)
|
||||
if return_index:
|
||||
ret += (perm[mask],)
|
||||
if return_inverse:
|
||||
imask = np.cumsum(mask) - 1
|
||||
inv_idx = np.empty(mask.shape, dtype=np.intp)
|
||||
inv_idx[perm] = imask
|
||||
ret += (inv_idx,)
|
||||
if return_counts:
|
||||
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
|
||||
ret += (np.diff(idx),)
|
||||
return ret
|
||||
|
||||
|
||||
def _intersect1d_dispatcher(
|
||||
ar1, ar2, assume_unique=None, return_indices=None):
|
||||
return (ar1, ar2)
|
||||
|
||||
|
||||
@array_function_dispatch(_intersect1d_dispatcher)
|
||||
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
|
||||
"""
|
||||
Find the intersection of two arrays.
|
||||
|
||||
Return the sorted, unique values that are in both of the input arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar1, ar2 : array_like
|
||||
Input arrays. Will be flattened if not already 1D.
|
||||
assume_unique : bool
|
||||
If True, the input arrays are both assumed to be unique, which
|
||||
can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
|
||||
unique, incorrect results and out-of-bounds indices could result.
|
||||
Default is False.
|
||||
return_indices : bool
|
||||
If True, the indices which correspond to the intersection of the two
|
||||
arrays are returned. The first instance of a value is used if there are
|
||||
multiple. Default is False.
|
||||
|
||||
.. versionadded:: 1.15.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
intersect1d : ndarray
|
||||
Sorted 1D array of common and unique elements.
|
||||
comm1 : ndarray
|
||||
The indices of the first occurrences of the common values in `ar1`.
|
||||
Only provided if `return_indices` is True.
|
||||
comm2 : ndarray
|
||||
The indices of the first occurrences of the common values in `ar2`.
|
||||
Only provided if `return_indices` is True.
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
|
||||
array([1, 3])
|
||||
|
||||
To intersect more than two arrays, use functools.reduce:
|
||||
|
||||
>>> from functools import reduce
|
||||
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
|
||||
array([3])
|
||||
|
||||
To return the indices of the values common to the input arrays
|
||||
along with the intersected values:
|
||||
|
||||
>>> x = np.array([1, 1, 2, 3, 4])
|
||||
>>> y = np.array([2, 1, 4, 6])
|
||||
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
|
||||
>>> x_ind, y_ind
|
||||
(array([0, 2, 4]), array([1, 0, 2]))
|
||||
>>> xy, x[x_ind], y[y_ind]
|
||||
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
|
||||
|
||||
"""
|
||||
ar1 = np.asanyarray(ar1)
|
||||
ar2 = np.asanyarray(ar2)
|
||||
|
||||
if not assume_unique:
|
||||
if return_indices:
|
||||
ar1, ind1 = unique(ar1, return_index=True)
|
||||
ar2, ind2 = unique(ar2, return_index=True)
|
||||
else:
|
||||
ar1 = unique(ar1)
|
||||
ar2 = unique(ar2)
|
||||
else:
|
||||
ar1 = ar1.ravel()
|
||||
ar2 = ar2.ravel()
|
||||
|
||||
aux = np.concatenate((ar1, ar2))
|
||||
if return_indices:
|
||||
aux_sort_indices = np.argsort(aux, kind='mergesort')
|
||||
aux = aux[aux_sort_indices]
|
||||
else:
|
||||
aux.sort()
|
||||
|
||||
mask = aux[1:] == aux[:-1]
|
||||
int1d = aux[:-1][mask]
|
||||
|
||||
if return_indices:
|
||||
ar1_indices = aux_sort_indices[:-1][mask]
|
||||
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
|
||||
if not assume_unique:
|
||||
ar1_indices = ind1[ar1_indices]
|
||||
ar2_indices = ind2[ar2_indices]
|
||||
|
||||
return int1d, ar1_indices, ar2_indices
|
||||
else:
|
||||
return int1d
|
||||
|
||||
|
||||
def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
|
||||
return (ar1, ar2)
|
||||
|
||||
|
||||
@array_function_dispatch(_setxor1d_dispatcher)
|
||||
def setxor1d(ar1, ar2, assume_unique=False):
|
||||
"""
|
||||
Find the set exclusive-or of two arrays.
|
||||
|
||||
Return the sorted, unique values that are in only one (not both) of the
|
||||
input arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar1, ar2 : array_like
|
||||
Input arrays.
|
||||
assume_unique : bool
|
||||
If True, the input arrays are both assumed to be unique, which
|
||||
can speed up the calculation. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
setxor1d : ndarray
|
||||
Sorted 1D array of unique values that are in only one of the input
|
||||
arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.array([1, 2, 3, 2, 4])
|
||||
>>> b = np.array([2, 3, 5, 7, 5])
|
||||
>>> np.setxor1d(a,b)
|
||||
array([1, 4, 5, 7])
|
||||
|
||||
"""
|
||||
if not assume_unique:
|
||||
ar1 = unique(ar1)
|
||||
ar2 = unique(ar2)
|
||||
|
||||
aux = np.concatenate((ar1, ar2))
|
||||
if aux.size == 0:
|
||||
return aux
|
||||
|
||||
aux.sort()
|
||||
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
|
||||
return aux[flag[1:] & flag[:-1]]
|
||||
|
||||
|
||||
def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
|
||||
kind=None):
|
||||
return (ar1, ar2)
|
||||
|
||||
|
||||
@array_function_dispatch(_in1d_dispatcher)
|
||||
def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
|
||||
"""
|
||||
Test whether each element of a 1-D array is also present in a second array.
|
||||
|
||||
Returns a boolean array the same length as `ar1` that is True
|
||||
where an element of `ar1` is in `ar2` and False otherwise.
|
||||
|
||||
We recommend using :func:`isin` instead of `in1d` for new code.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar1 : (M,) array_like
|
||||
Input array.
|
||||
ar2 : array_like
|
||||
The values against which to test each value of `ar1`.
|
||||
assume_unique : bool, optional
|
||||
If True, the input arrays are both assumed to be unique, which
|
||||
can speed up the calculation. Default is False.
|
||||
invert : bool, optional
|
||||
If True, the values in the returned array are inverted (that is,
|
||||
False where an element of `ar1` is in `ar2` and True otherwise).
|
||||
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
|
||||
to (but is faster than) ``np.invert(in1d(a, b))``.
|
||||
kind : {None, 'sort', 'table'}, optional
|
||||
The algorithm to use. This will not affect the final result,
|
||||
but will affect the speed and memory use. The default, None,
|
||||
will select automatically based on memory considerations.
|
||||
|
||||
* If 'sort', will use a mergesort-based approach. This will have
|
||||
a memory usage of roughly 6 times the sum of the sizes of
|
||||
`ar1` and `ar2`, not accounting for size of dtypes.
|
||||
* If 'table', will use a lookup table approach similar
|
||||
to a counting sort. This is only available for boolean and
|
||||
integer arrays. This will have a memory usage of the
|
||||
size of `ar1` plus the max-min value of `ar2`. `assume_unique`
|
||||
has no effect when the 'table' option is used.
|
||||
* If None, will automatically choose 'table' if
|
||||
the required memory allocation is less than or equal to
|
||||
6 times the sum of the sizes of `ar1` and `ar2`,
|
||||
otherwise will use 'sort'. This is done to not use
|
||||
a large amount of memory by default, even though
|
||||
'table' may be faster in most cases. If 'table' is chosen,
|
||||
`assume_unique` will have no effect.
|
||||
|
||||
.. versionadded:: 1.8.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
in1d : (M,) ndarray, bool
|
||||
The values `ar1[in1d]` are in `ar2`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
isin : Version of this function that preserves the
|
||||
shape of ar1.
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`in1d` can be considered as an element-wise function version of the
|
||||
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
|
||||
equivalent to ``np.array([item in b for item in a])``.
|
||||
However, this idea fails if `ar2` is a set, or similar (non-sequence)
|
||||
container: As ``ar2`` is converted to an array, in those cases
|
||||
``asarray(ar2)`` is an object array rather than the expected array of
|
||||
contained values.
|
||||
|
||||
Using ``kind='table'`` tends to be faster than `kind='sort'` if the
|
||||
following relationship is true:
|
||||
``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
|
||||
but may use greater memory. The default value for `kind` will
|
||||
be automatically selected based only on memory usage, so one may
|
||||
manually set ``kind='table'`` if memory constraints can be relaxed.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> test = np.array([0, 1, 2, 5, 0])
|
||||
>>> states = [0, 2]
|
||||
>>> mask = np.in1d(test, states)
|
||||
>>> mask
|
||||
array([ True, False, True, False, True])
|
||||
>>> test[mask]
|
||||
array([0, 2, 0])
|
||||
>>> mask = np.in1d(test, states, invert=True)
|
||||
>>> mask
|
||||
array([False, True, False, True, False])
|
||||
>>> test[mask]
|
||||
array([1, 5])
|
||||
"""
|
||||
# Ravel both arrays, behavior for the first array could be different
|
||||
ar1 = np.asarray(ar1).ravel()
|
||||
ar2 = np.asarray(ar2).ravel()
|
||||
|
||||
# Ensure that iteration through object arrays yields size-1 arrays
|
||||
if ar2.dtype == object:
|
||||
ar2 = ar2.reshape(-1, 1)
|
||||
|
||||
if kind not in {None, 'sort', 'table'}:
|
||||
raise ValueError(
|
||||
f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
|
||||
|
||||
# Can use the table method if all arrays are integers or boolean:
|
||||
is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
|
||||
use_table_method = is_int_arrays and kind in {None, 'table'}
|
||||
|
||||
if use_table_method:
|
||||
if ar2.size == 0:
|
||||
if invert:
|
||||
return np.ones_like(ar1, dtype=bool)
|
||||
else:
|
||||
return np.zeros_like(ar1, dtype=bool)
|
||||
|
||||
# Convert booleans to uint8 so we can use the fast integer algorithm
|
||||
if ar1.dtype == bool:
|
||||
ar1 = ar1.astype(np.uint8)
|
||||
if ar2.dtype == bool:
|
||||
ar2 = ar2.astype(np.uint8)
|
||||
|
||||
ar2_min = np.min(ar2)
|
||||
ar2_max = np.max(ar2)
|
||||
|
||||
ar2_range = int(ar2_max) - int(ar2_min)
|
||||
|
||||
# Constraints on whether we can actually use the table method:
|
||||
# 1. Assert memory usage is not too large
|
||||
below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
|
||||
# 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
|
||||
range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
|
||||
# 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
|
||||
if ar1.size > 0:
|
||||
ar1_min = np.min(ar1)
|
||||
ar1_max = np.max(ar1)
|
||||
|
||||
# After masking, the range of ar1 is guaranteed to be
|
||||
# within the range of ar2:
|
||||
ar1_upper = min(int(ar1_max), int(ar2_max))
|
||||
ar1_lower = max(int(ar1_min), int(ar2_min))
|
||||
|
||||
range_safe_from_overflow &= all((
|
||||
ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
|
||||
ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
|
||||
))
|
||||
|
||||
# Optimal performance is for approximately
|
||||
# log10(size) > (log10(range) - 2.27) / 0.927.
|
||||
# However, here we set the requirement that by default
|
||||
# the intermediate array can only be 6x
|
||||
# the combined memory allocation of the original
|
||||
# arrays. See discussion on
|
||||
# https://github.com/numpy/numpy/pull/12065.
|
||||
|
||||
if (
|
||||
range_safe_from_overflow and
|
||||
(below_memory_constraint or kind == 'table')
|
||||
):
|
||||
|
||||
if invert:
|
||||
outgoing_array = np.ones_like(ar1, dtype=bool)
|
||||
else:
|
||||
outgoing_array = np.zeros_like(ar1, dtype=bool)
|
||||
|
||||
# Make elements 1 where the integer exists in ar2
|
||||
if invert:
|
||||
isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
|
||||
isin_helper_ar[ar2 - ar2_min] = 0
|
||||
else:
|
||||
isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
|
||||
isin_helper_ar[ar2 - ar2_min] = 1
|
||||
|
||||
# Mask out elements we know won't work
|
||||
basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
|
||||
outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
|
||||
ar2_min]
|
||||
|
||||
return outgoing_array
|
||||
elif kind == 'table': # not range_safe_from_overflow
|
||||
raise RuntimeError(
|
||||
"You have specified kind='table', "
|
||||
"but the range of values in `ar2` or `ar1` exceed the "
|
||||
"maximum integer of the datatype. "
|
||||
"Please set `kind` to None or 'sort'."
|
||||
)
|
||||
elif kind == 'table':
|
||||
raise ValueError(
|
||||
"The 'table' method is only "
|
||||
"supported for boolean or integer arrays. "
|
||||
"Please select 'sort' or None for kind."
|
||||
)
|
||||
|
||||
|
||||
# Check if one of the arrays may contain arbitrary objects
|
||||
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
|
||||
|
||||
# This code is run when
|
||||
# a) the first condition is true, making the code significantly faster
|
||||
# b) the second condition is true (i.e. `ar1` or `ar2` may contain
|
||||
# arbitrary objects), since then sorting is not guaranteed to work
|
||||
if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
|
||||
if invert:
|
||||
mask = np.ones(len(ar1), dtype=bool)
|
||||
for a in ar2:
|
||||
mask &= (ar1 != a)
|
||||
else:
|
||||
mask = np.zeros(len(ar1), dtype=bool)
|
||||
for a in ar2:
|
||||
mask |= (ar1 == a)
|
||||
return mask
|
||||
|
||||
# Otherwise use sorting
|
||||
if not assume_unique:
|
||||
ar1, rev_idx = np.unique(ar1, return_inverse=True)
|
||||
ar2 = np.unique(ar2)
|
||||
|
||||
ar = np.concatenate((ar1, ar2))
|
||||
# We need this to be a stable sort, so always use 'mergesort'
|
||||
# here. The values from the first array should always come before
|
||||
# the values from the second array.
|
||||
order = ar.argsort(kind='mergesort')
|
||||
sar = ar[order]
|
||||
if invert:
|
||||
bool_ar = (sar[1:] != sar[:-1])
|
||||
else:
|
||||
bool_ar = (sar[1:] == sar[:-1])
|
||||
flag = np.concatenate((bool_ar, [invert]))
|
||||
ret = np.empty(ar.shape, dtype=bool)
|
||||
ret[order] = flag
|
||||
|
||||
if assume_unique:
|
||||
return ret[:len(ar1)]
|
||||
else:
|
||||
return ret[rev_idx]
|
||||
|
||||
|
||||
def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
|
||||
*, kind=None):
|
||||
return (element, test_elements)
|
||||
|
||||
|
||||
@array_function_dispatch(_isin_dispatcher)
|
||||
def isin(element, test_elements, assume_unique=False, invert=False, *,
|
||||
kind=None):
|
||||
"""
|
||||
Calculates ``element in test_elements``, broadcasting over `element` only.
|
||||
Returns a boolean array of the same shape as `element` that is True
|
||||
where an element of `element` is in `test_elements` and False otherwise.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
element : array_like
|
||||
Input array.
|
||||
test_elements : array_like
|
||||
The values against which to test each value of `element`.
|
||||
This argument is flattened if it is an array or array_like.
|
||||
See notes for behavior with non-array-like parameters.
|
||||
assume_unique : bool, optional
|
||||
If True, the input arrays are both assumed to be unique, which
|
||||
can speed up the calculation. Default is False.
|
||||
invert : bool, optional
|
||||
If True, the values in the returned array are inverted, as if
|
||||
calculating `element not in test_elements`. Default is False.
|
||||
``np.isin(a, b, invert=True)`` is equivalent to (but faster
|
||||
than) ``np.invert(np.isin(a, b))``.
|
||||
kind : {None, 'sort', 'table'}, optional
|
||||
The algorithm to use. This will not affect the final result,
|
||||
but will affect the speed and memory use. The default, None,
|
||||
will select automatically based on memory considerations.
|
||||
|
||||
* If 'sort', will use a mergesort-based approach. This will have
|
||||
a memory usage of roughly 6 times the sum of the sizes of
|
||||
`ar1` and `ar2`, not accounting for size of dtypes.
|
||||
* If 'table', will use a lookup table approach similar
|
||||
to a counting sort. This is only available for boolean and
|
||||
integer arrays. This will have a memory usage of the
|
||||
size of `ar1` plus the max-min value of `ar2`. `assume_unique`
|
||||
has no effect when the 'table' option is used.
|
||||
* If None, will automatically choose 'table' if
|
||||
the required memory allocation is less than or equal to
|
||||
6 times the sum of the sizes of `ar1` and `ar2`,
|
||||
otherwise will use 'sort'. This is done to not use
|
||||
a large amount of memory by default, even though
|
||||
'table' may be faster in most cases. If 'table' is chosen,
|
||||
`assume_unique` will have no effect.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
isin : ndarray, bool
|
||||
Has the same shape as `element`. The values `element[isin]`
|
||||
are in `test_elements`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
in1d : Flattened version of this function.
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
`isin` is an element-wise function version of the python keyword `in`.
|
||||
``isin(a, b)`` is roughly equivalent to
|
||||
``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
|
||||
|
||||
`element` and `test_elements` are converted to arrays if they are not
|
||||
already. If `test_elements` is a set (or other non-sequence collection)
|
||||
it will be converted to an object array with one element, rather than an
|
||||
array of the values contained in `test_elements`. This is a consequence
|
||||
of the `array` constructor's way of handling non-sequence collections.
|
||||
Converting the set to a list usually gives the desired behavior.
|
||||
|
||||
Using ``kind='table'`` tends to be faster than `kind='sort'` if the
|
||||
following relationship is true:
|
||||
``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
|
||||
but may use greater memory. The default value for `kind` will
|
||||
be automatically selected based only on memory usage, so one may
|
||||
manually set ``kind='table'`` if memory constraints can be relaxed.
|
||||
|
||||
.. versionadded:: 1.13.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> element = 2*np.arange(4).reshape((2, 2))
|
||||
>>> element
|
||||
array([[0, 2],
|
||||
[4, 6]])
|
||||
>>> test_elements = [1, 2, 4, 8]
|
||||
>>> mask = np.isin(element, test_elements)
|
||||
>>> mask
|
||||
array([[False, True],
|
||||
[ True, False]])
|
||||
>>> element[mask]
|
||||
array([2, 4])
|
||||
|
||||
The indices of the matched values can be obtained with `nonzero`:
|
||||
|
||||
>>> np.nonzero(mask)
|
||||
(array([0, 1]), array([1, 0]))
|
||||
|
||||
The test can also be inverted:
|
||||
|
||||
>>> mask = np.isin(element, test_elements, invert=True)
|
||||
>>> mask
|
||||
array([[ True, False],
|
||||
[False, True]])
|
||||
>>> element[mask]
|
||||
array([0, 6])
|
||||
|
||||
Because of how `array` handles sets, the following does not
|
||||
work as expected:
|
||||
|
||||
>>> test_set = {1, 2, 4, 8}
|
||||
>>> np.isin(element, test_set)
|
||||
array([[False, False],
|
||||
[False, False]])
|
||||
|
||||
Casting the set to a list gives the expected result:
|
||||
|
||||
>>> np.isin(element, list(test_set))
|
||||
array([[False, True],
|
||||
[ True, False]])
|
||||
"""
|
||||
element = np.asarray(element)
|
||||
return in1d(element, test_elements, assume_unique=assume_unique,
|
||||
invert=invert, kind=kind).reshape(element.shape)
|
||||
|
||||
|
||||
def _union1d_dispatcher(ar1, ar2):
|
||||
return (ar1, ar2)
|
||||
|
||||
|
||||
@array_function_dispatch(_union1d_dispatcher)
|
||||
def union1d(ar1, ar2):
|
||||
"""
|
||||
Find the union of two arrays.
|
||||
|
||||
Return the unique, sorted array of values that are in either of the two
|
||||
input arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar1, ar2 : array_like
|
||||
Input arrays. They are flattened if they are not already 1D.
|
||||
|
||||
Returns
|
||||
-------
|
||||
union1d : ndarray
|
||||
Unique, sorted union of the input arrays.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
|
||||
array([-2, -1, 0, 1, 2])
|
||||
|
||||
To find the union of more than two arrays, use functools.reduce:
|
||||
|
||||
>>> from functools import reduce
|
||||
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
|
||||
array([1, 2, 3, 4, 6])
|
||||
"""
|
||||
return unique(np.concatenate((ar1, ar2), axis=None))
|
||||
|
||||
|
||||
def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
|
||||
return (ar1, ar2)
|
||||
|
||||
|
||||
@array_function_dispatch(_setdiff1d_dispatcher)
|
||||
def setdiff1d(ar1, ar2, assume_unique=False):
|
||||
"""
|
||||
Find the set difference of two arrays.
|
||||
|
||||
Return the unique values in `ar1` that are not in `ar2`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar1 : array_like
|
||||
Input array.
|
||||
ar2 : array_like
|
||||
Input comparison array.
|
||||
assume_unique : bool
|
||||
If True, the input arrays are both assumed to be unique, which
|
||||
can speed up the calculation. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
setdiff1d : ndarray
|
||||
1D array of values in `ar1` that are not in `ar2`. The result
|
||||
is sorted when `assume_unique=False`, but otherwise only sorted
|
||||
if the input is sorted.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.lib.arraysetops : Module with a number of other functions for
|
||||
performing set operations on arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.array([1, 2, 3, 2, 4, 1])
|
||||
>>> b = np.array([3, 4, 5, 6])
|
||||
>>> np.setdiff1d(a, b)
|
||||
array([1, 2])
|
||||
|
||||
"""
|
||||
if assume_unique:
|
||||
ar1 = np.asarray(ar1).ravel()
|
||||
else:
|
||||
ar1 = unique(ar1)
|
||||
ar2 = unique(ar2)
|
||||
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
|
360
teil20b/lib/python3.11/site-packages/numpy/lib/arraysetops.pyi
Normal file
360
teil20b/lib/python3.11/site-packages/numpy/lib/arraysetops.pyi
Normal file
@@ -0,0 +1,360 @@
|
||||
from typing import (
|
||||
Literal as L,
|
||||
Any,
|
||||
TypeVar,
|
||||
overload,
|
||||
SupportsIndex,
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
generic,
|
||||
number,
|
||||
bool_,
|
||||
ushort,
|
||||
ubyte,
|
||||
uintc,
|
||||
uint,
|
||||
ulonglong,
|
||||
short,
|
||||
int8,
|
||||
byte,
|
||||
intc,
|
||||
int_,
|
||||
intp,
|
||||
longlong,
|
||||
half,
|
||||
single,
|
||||
double,
|
||||
longdouble,
|
||||
csingle,
|
||||
cdouble,
|
||||
clongdouble,
|
||||
timedelta64,
|
||||
datetime64,
|
||||
object_,
|
||||
str_,
|
||||
bytes_,
|
||||
void,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeDT64_co,
|
||||
_ArrayLikeTD64_co,
|
||||
_ArrayLikeObject_co,
|
||||
_ArrayLikeNumber_co,
|
||||
)
|
||||
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
_NumberType = TypeVar("_NumberType", bound=number[Any])
|
||||
|
||||
# Explicitly set all allowed values to prevent accidental castings to
|
||||
# abstract dtypes (their common super-type).
|
||||
#
|
||||
# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
|
||||
# which could result in, for example, `int64` and `float64`producing a
|
||||
# `number[_64Bit]` array
|
||||
_SCTNoCast = TypeVar(
|
||||
"_SCTNoCast",
|
||||
bool_,
|
||||
ushort,
|
||||
ubyte,
|
||||
uintc,
|
||||
uint,
|
||||
ulonglong,
|
||||
short,
|
||||
byte,
|
||||
intc,
|
||||
int_,
|
||||
longlong,
|
||||
half,
|
||||
single,
|
||||
double,
|
||||
longdouble,
|
||||
csingle,
|
||||
cdouble,
|
||||
clongdouble,
|
||||
timedelta64,
|
||||
datetime64,
|
||||
object_,
|
||||
str_,
|
||||
bytes_,
|
||||
void,
|
||||
)
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeBool_co,
|
||||
to_end: None | ArrayLike = ...,
|
||||
to_begin: None | ArrayLike = ...,
|
||||
) -> NDArray[int8]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLike[_NumberType],
|
||||
to_end: None | ArrayLike = ...,
|
||||
to_begin: None | ArrayLike = ...,
|
||||
) -> NDArray[_NumberType]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeNumber_co,
|
||||
to_end: None | ArrayLike = ...,
|
||||
to_begin: None | ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
|
||||
to_end: None | ArrayLike = ...,
|
||||
to_begin: None | ArrayLike = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def ediff1d(
|
||||
ary: _ArrayLikeObject_co,
|
||||
to_end: None | ArrayLike = ...,
|
||||
to_begin: None | ArrayLike = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[False] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[False] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[False] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: _ArrayLike[_SCT],
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def unique(
|
||||
ar: ArrayLike,
|
||||
return_index: L[True] = ...,
|
||||
return_inverse: L[True] = ...,
|
||||
return_counts: L[True] = ...,
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
equal_nan: bool = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
|
||||
|
||||
@overload
|
||||
def intersect1d(
|
||||
ar1: _ArrayLike[_SCTNoCast],
|
||||
ar2: _ArrayLike[_SCTNoCast],
|
||||
assume_unique: bool = ...,
|
||||
return_indices: L[False] = ...,
|
||||
) -> NDArray[_SCTNoCast]: ...
|
||||
@overload
|
||||
def intersect1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
return_indices: L[False] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def intersect1d(
|
||||
ar1: _ArrayLike[_SCTNoCast],
|
||||
ar2: _ArrayLike[_SCTNoCast],
|
||||
assume_unique: bool = ...,
|
||||
return_indices: L[True] = ...,
|
||||
) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
|
||||
@overload
|
||||
def intersect1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
return_indices: L[True] = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
|
||||
|
||||
@overload
|
||||
def setxor1d(
|
||||
ar1: _ArrayLike[_SCTNoCast],
|
||||
ar2: _ArrayLike[_SCTNoCast],
|
||||
assume_unique: bool = ...,
|
||||
) -> NDArray[_SCTNoCast]: ...
|
||||
@overload
|
||||
def setxor1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def in1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
invert: bool = ...,
|
||||
) -> NDArray[bool_]: ...
|
||||
|
||||
def isin(
|
||||
element: ArrayLike,
|
||||
test_elements: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
invert: bool = ...,
|
||||
) -> NDArray[bool_]: ...
|
||||
|
||||
@overload
|
||||
def union1d(
|
||||
ar1: _ArrayLike[_SCTNoCast],
|
||||
ar2: _ArrayLike[_SCTNoCast],
|
||||
) -> NDArray[_SCTNoCast]: ...
|
||||
@overload
|
||||
def union1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def setdiff1d(
|
||||
ar1: _ArrayLike[_SCTNoCast],
|
||||
ar2: _ArrayLike[_SCTNoCast],
|
||||
assume_unique: bool = ...,
|
||||
) -> NDArray[_SCTNoCast]: ...
|
||||
@overload
|
||||
def setdiff1d(
|
||||
ar1: ArrayLike,
|
||||
ar2: ArrayLike,
|
||||
assume_unique: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
219
teil20b/lib/python3.11/site-packages/numpy/lib/arrayterator.py
Normal file
219
teil20b/lib/python3.11/site-packages/numpy/lib/arrayterator.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
A buffered iterator for big arrays.
|
||||
|
||||
This module solves the problem of iterating over a big file-based array
|
||||
without having to read it into memory. The `Arrayterator` class wraps
|
||||
an array object, and when iterated it will return sub-arrays with at most
|
||||
a user-specified number of elements.
|
||||
|
||||
"""
|
||||
from operator import mul
|
||||
from functools import reduce
|
||||
|
||||
__all__ = ['Arrayterator']
|
||||
|
||||
|
||||
class Arrayterator:
|
||||
"""
|
||||
Buffered iterator for big arrays.
|
||||
|
||||
`Arrayterator` creates a buffered iterator for reading big arrays in small
|
||||
contiguous blocks. The class is useful for objects stored in the
|
||||
file system. It allows iteration over the object *without* reading
|
||||
everything in memory; instead, small blocks are read and iterated over.
|
||||
|
||||
`Arrayterator` can be used with any object that supports multidimensional
|
||||
slices. This includes NumPy arrays, but also variables from
|
||||
Scientific.IO.NetCDF or pynetcdf for example.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
var : array_like
|
||||
The object to iterate over.
|
||||
buf_size : int, optional
|
||||
The buffer size. If `buf_size` is supplied, the maximum amount of
|
||||
data that will be read into memory is `buf_size` elements.
|
||||
Default is None, which will read as many element as possible
|
||||
into memory.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
var
|
||||
buf_size
|
||||
start
|
||||
stop
|
||||
step
|
||||
shape
|
||||
flat
|
||||
|
||||
See Also
|
||||
--------
|
||||
ndenumerate : Multidimensional array iterator.
|
||||
flatiter : Flat array iterator.
|
||||
memmap : Create a memory-map to an array stored in a binary file on disk.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The algorithm works by first finding a "running dimension", along which
|
||||
the blocks will be extracted. Given an array of dimensions
|
||||
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
|
||||
first dimension will be used. If, on the other hand,
|
||||
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
|
||||
Blocks are extracted along this dimension, and when the last block is
|
||||
returned the process continues from the next dimension, until all
|
||||
elements have been read.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
|
||||
>>> a_itor = np.lib.Arrayterator(a, 2)
|
||||
>>> a_itor.shape
|
||||
(3, 4, 5, 6)
|
||||
|
||||
Now we can iterate over ``a_itor``, and it will return arrays of size
|
||||
two. Since `buf_size` was smaller than any dimension, the first
|
||||
dimension will be iterated over first:
|
||||
|
||||
>>> for subarr in a_itor:
|
||||
... if not subarr.all():
|
||||
... print(subarr, subarr.shape) # doctest: +SKIP
|
||||
>>> # [[[[0 1]]]] (1, 1, 1, 2)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, var, buf_size=None):
|
||||
self.var = var
|
||||
self.buf_size = buf_size
|
||||
|
||||
self.start = [0 for dim in var.shape]
|
||||
self.stop = [dim for dim in var.shape]
|
||||
self.step = [1 for dim in var.shape]
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.var, attr)
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""
|
||||
Return a new arrayterator.
|
||||
|
||||
"""
|
||||
# Fix index, handling ellipsis and incomplete slices.
|
||||
if not isinstance(index, tuple):
|
||||
index = (index,)
|
||||
fixed = []
|
||||
length, dims = len(index), self.ndim
|
||||
for slice_ in index:
|
||||
if slice_ is Ellipsis:
|
||||
fixed.extend([slice(None)] * (dims-length+1))
|
||||
length = len(fixed)
|
||||
elif isinstance(slice_, int):
|
||||
fixed.append(slice(slice_, slice_+1, 1))
|
||||
else:
|
||||
fixed.append(slice_)
|
||||
index = tuple(fixed)
|
||||
if len(index) < dims:
|
||||
index += (slice(None),) * (dims-len(index))
|
||||
|
||||
# Return a new arrayterator object.
|
||||
out = self.__class__(self.var, self.buf_size)
|
||||
for i, (start, stop, step, slice_) in enumerate(
|
||||
zip(self.start, self.stop, self.step, index)):
|
||||
out.start[i] = start + (slice_.start or 0)
|
||||
out.step[i] = step * (slice_.step or 1)
|
||||
out.stop[i] = start + (slice_.stop or stop-start)
|
||||
out.stop[i] = min(stop, out.stop[i])
|
||||
return out
|
||||
|
||||
def __array__(self):
|
||||
"""
|
||||
Return corresponding data.
|
||||
|
||||
"""
|
||||
slice_ = tuple(slice(*t) for t in zip(
|
||||
self.start, self.stop, self.step))
|
||||
return self.var[slice_]
|
||||
|
||||
@property
|
||||
def flat(self):
|
||||
"""
|
||||
A 1-D flat iterator for Arrayterator objects.
|
||||
|
||||
This iterator returns elements of the array to be iterated over in
|
||||
`Arrayterator` one by one. It is similar to `flatiter`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
Arrayterator
|
||||
flatiter
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
|
||||
>>> a_itor = np.lib.Arrayterator(a, 2)
|
||||
|
||||
>>> for subarr in a_itor.flat:
|
||||
... if not subarr:
|
||||
... print(subarr, type(subarr))
|
||||
...
|
||||
0 <class 'numpy.int64'>
|
||||
|
||||
"""
|
||||
for block in self:
|
||||
yield from block.flat
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""
|
||||
The shape of the array to be iterated over.
|
||||
|
||||
For an example, see `Arrayterator`.
|
||||
|
||||
"""
|
||||
return tuple(((stop-start-1)//step+1) for start, stop, step in
|
||||
zip(self.start, self.stop, self.step))
|
||||
|
||||
def __iter__(self):
|
||||
# Skip arrays with degenerate dimensions
|
||||
if [dim for dim in self.shape if dim <= 0]:
|
||||
return
|
||||
|
||||
start = self.start[:]
|
||||
stop = self.stop[:]
|
||||
step = self.step[:]
|
||||
ndims = self.var.ndim
|
||||
|
||||
while True:
|
||||
count = self.buf_size or reduce(mul, self.shape)
|
||||
|
||||
# iterate over each dimension, looking for the
|
||||
# running dimension (ie, the dimension along which
|
||||
# the blocks will be built from)
|
||||
rundim = 0
|
||||
for i in range(ndims-1, -1, -1):
|
||||
# if count is zero we ran out of elements to read
|
||||
# along higher dimensions, so we read only a single position
|
||||
if count == 0:
|
||||
stop[i] = start[i]+1
|
||||
elif count <= self.shape[i]:
|
||||
# limit along this dimension
|
||||
stop[i] = start[i] + count*step[i]
|
||||
rundim = i
|
||||
else:
|
||||
# read everything along this dimension
|
||||
stop[i] = self.stop[i]
|
||||
stop[i] = min(self.stop[i], stop[i])
|
||||
count = count//self.shape[i]
|
||||
|
||||
# yield a block
|
||||
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
||||
yield self.var[slice_]
|
||||
|
||||
# Update start position, taking care of overflow to
|
||||
# other dimensions
|
||||
start[rundim] = stop[rundim] # start where we stopped
|
||||
for i in range(ndims-1, 0, -1):
|
||||
if start[i] >= self.stop[i]:
|
||||
start[i] = self.start[i]
|
||||
start[i-1] += self.step[i-1]
|
||||
if start[0] >= self.stop[0]:
|
||||
return
|
@@ -0,0 +1,49 @@
|
||||
from collections.abc import Generator
|
||||
from typing import (
|
||||
Any,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
from numpy import ndarray, dtype, generic
|
||||
from numpy._typing import DTypeLike
|
||||
|
||||
# TODO: Set a shape bound once we've got proper shape support
|
||||
_Shape = TypeVar("_Shape", bound=Any)
|
||||
_DType = TypeVar("_DType", bound=dtype[Any])
|
||||
_ScalarType = TypeVar("_ScalarType", bound=generic)
|
||||
|
||||
_Index = Union[
|
||||
Union[ellipsis, int, slice],
|
||||
tuple[Union[ellipsis, int, slice], ...],
|
||||
]
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
|
||||
# but its ``__getattr__` method does wrap around the former and thus has
|
||||
# access to all its methods
|
||||
|
||||
class Arrayterator(ndarray[_Shape, _DType]):
|
||||
var: ndarray[_Shape, _DType] # type: ignore[assignment]
|
||||
buf_size: None | int
|
||||
start: list[int]
|
||||
stop: list[int]
|
||||
step: list[int]
|
||||
|
||||
@property # type: ignore[misc]
|
||||
def shape(self) -> tuple[int, ...]: ...
|
||||
@property
|
||||
def flat( # type: ignore[override]
|
||||
self: ndarray[Any, dtype[_ScalarType]]
|
||||
) -> Generator[_ScalarType, None, None]: ...
|
||||
def __init__(
|
||||
self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
|
||||
@overload
|
||||
def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
|
||||
def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
|
||||
def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
|
976
teil20b/lib/python3.11/site-packages/numpy/lib/format.py
Normal file
976
teil20b/lib/python3.11/site-packages/numpy/lib/format.py
Normal file
@@ -0,0 +1,976 @@
|
||||
"""
|
||||
Binary serialization
|
||||
|
||||
NPY format
|
||||
==========
|
||||
|
||||
A simple format for saving numpy arrays to disk with the full
|
||||
information about them.
|
||||
|
||||
The ``.npy`` format is the standard binary file format in NumPy for
|
||||
persisting a *single* arbitrary NumPy array on disk. The format stores all
|
||||
of the shape and dtype information necessary to reconstruct the array
|
||||
correctly even on another machine with a different architecture.
|
||||
The format is designed to be as simple as possible while achieving
|
||||
its limited goals.
|
||||
|
||||
The ``.npz`` format is the standard format for persisting *multiple* NumPy
|
||||
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
|
||||
files, one for each array.
|
||||
|
||||
Capabilities
|
||||
------------
|
||||
|
||||
- Can represent all NumPy arrays including nested record arrays and
|
||||
object arrays.
|
||||
|
||||
- Represents the data in its native binary form.
|
||||
|
||||
- Supports Fortran-contiguous arrays directly.
|
||||
|
||||
- Stores all of the necessary information to reconstruct the array
|
||||
including shape and dtype on a machine of a different
|
||||
architecture. Both little-endian and big-endian arrays are
|
||||
supported, and a file with little-endian numbers will yield
|
||||
a little-endian array on any machine reading the file. The
|
||||
types are described in terms of their actual sizes. For example,
|
||||
if a machine with a 64-bit C "long int" writes out an array with
|
||||
"long ints", a reading machine with 32-bit C "long ints" will yield
|
||||
an array with 64-bit integers.
|
||||
|
||||
- Is straightforward to reverse engineer. Datasets often live longer than
|
||||
the programs that created them. A competent developer should be
|
||||
able to create a solution in their preferred programming language to
|
||||
read most ``.npy`` files that they have been given without much
|
||||
documentation.
|
||||
|
||||
- Allows memory-mapping of the data. See `open_memmap`.
|
||||
|
||||
- Can be read from a filelike stream object instead of an actual file.
|
||||
|
||||
- Stores object arrays, i.e. arrays containing elements that are arbitrary
|
||||
Python objects. Files with object arrays are not to be mmapable, but
|
||||
can be read and written to disk.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
|
||||
Subclasses will be accepted for writing, but only the array data will
|
||||
be written out. A regular numpy.ndarray object will be created
|
||||
upon reading the file.
|
||||
|
||||
.. warning::
|
||||
|
||||
Due to limitations in the interpretation of structured dtypes, dtypes
|
||||
with fields with empty names will have the names replaced by 'f0', 'f1',
|
||||
etc. Such arrays will not round-trip through the format entirely
|
||||
accurately. The data is intact; only the field names will differ. We are
|
||||
working on a fix for this. This fix will not require a change in the
|
||||
file format. The arrays with such structures can still be saved and
|
||||
restored, and the correct dtype may be restored by using the
|
||||
``loadedarray.view(correct_dtype)`` method.
|
||||
|
||||
File extensions
|
||||
---------------
|
||||
|
||||
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
|
||||
in this format. This is by no means a requirement; applications may wish
|
||||
to use these file formats but use an extension specific to the
|
||||
application. In the absence of an obvious alternative, however,
|
||||
we suggest using ``.npy`` and ``.npz``.
|
||||
|
||||
Version numbering
|
||||
-----------------
|
||||
|
||||
The version numbering of these formats is independent of NumPy version
|
||||
numbering. If the format is upgraded, the code in `numpy.io` will still
|
||||
be able to read and write Version 1.0 files.
|
||||
|
||||
Format Version 1.0
|
||||
------------------
|
||||
|
||||
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
|
||||
|
||||
The next 1 byte is an unsigned byte: the major version number of the file
|
||||
format, e.g. ``\\x01``.
|
||||
|
||||
The next 1 byte is an unsigned byte: the minor version number of the file
|
||||
format, e.g. ``\\x00``. Note: the version of the file format is not tied
|
||||
to the version of the numpy package.
|
||||
|
||||
The next 2 bytes form a little-endian unsigned short int: the length of
|
||||
the header data HEADER_LEN.
|
||||
|
||||
The next HEADER_LEN bytes form the header data describing the array's
|
||||
format. It is an ASCII string which contains a Python literal expression
|
||||
of a dictionary. It is terminated by a newline (``\\n``) and padded with
|
||||
spaces (``\\x20``) to make the total of
|
||||
``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
|
||||
by 64 for alignment purposes.
|
||||
|
||||
The dictionary contains three keys:
|
||||
|
||||
"descr" : dtype.descr
|
||||
An object that can be passed as an argument to the `numpy.dtype`
|
||||
constructor to create the array's dtype.
|
||||
"fortran_order" : bool
|
||||
Whether the array data is Fortran-contiguous or not. Since
|
||||
Fortran-contiguous arrays are a common form of non-C-contiguity,
|
||||
we allow them to be written directly to disk for efficiency.
|
||||
"shape" : tuple of int
|
||||
The shape of the array.
|
||||
|
||||
For repeatability and readability, the dictionary keys are sorted in
|
||||
alphabetic order. This is for convenience only. A writer SHOULD implement
|
||||
this if possible. A reader MUST NOT depend on this.
|
||||
|
||||
Following the header comes the array data. If the dtype contains Python
|
||||
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
|
||||
pickle of the array. Otherwise the data is the contiguous (either C-
|
||||
or Fortran-, depending on ``fortran_order``) bytes of the array.
|
||||
Consumers can figure out the number of bytes by multiplying the number
|
||||
of elements given by the shape (noting that ``shape=()`` means there is
|
||||
1 element) by ``dtype.itemsize``.
|
||||
|
||||
Format Version 2.0
|
||||
------------------
|
||||
|
||||
The version 1.0 format only allowed the array header to have a total size of
|
||||
65535 bytes. This can be exceeded by structured arrays with a large number of
|
||||
columns. The version 2.0 format extends the header size to 4 GiB.
|
||||
`numpy.save` will automatically save in 2.0 format if the data requires it,
|
||||
else it will always use the more compatible 1.0 format.
|
||||
|
||||
The description of the fourth element of the header therefore has become:
|
||||
"The next 4 bytes form a little-endian unsigned int: the length of the header
|
||||
data HEADER_LEN."
|
||||
|
||||
Format Version 3.0
|
||||
------------------
|
||||
|
||||
This version replaces the ASCII string (which in practice was latin1) with
|
||||
a utf8-encoded string, so supports structured types with any unicode field
|
||||
names.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The ``.npy`` format, including motivation for creating it and a comparison of
|
||||
alternatives, is described in the
|
||||
:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
|
||||
evolved with time and this document is more current.
|
||||
|
||||
"""
|
||||
import numpy
|
||||
import warnings
|
||||
from numpy.lib.utils import safe_eval, drop_metadata
|
||||
from numpy.compat import (
|
||||
isfileobj, os_fspath, pickle
|
||||
)
|
||||
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
|
||||
MAGIC_PREFIX = b'\x93NUMPY'
|
||||
MAGIC_LEN = len(MAGIC_PREFIX) + 2
|
||||
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
|
||||
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
|
||||
# allow growth within the address space of a 64 bit machine along one axis
|
||||
GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
|
||||
|
||||
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
|
||||
# instead of 2 bytes (H) allowing storage of large structured arrays
|
||||
_header_size_info = {
|
||||
(1, 0): ('<H', 'latin1'),
|
||||
(2, 0): ('<I', 'latin1'),
|
||||
(3, 0): ('<I', 'utf8'),
|
||||
}
|
||||
|
||||
# Python's literal_eval is not actually safe for large inputs, since parsing
|
||||
# may become slow or even cause interpreter crashes.
|
||||
# This is an arbitrary, low limit which should make it safe in practice.
|
||||
_MAX_HEADER_SIZE = 10000
|
||||
|
||||
def _check_version(version):
|
||||
if version not in [(1, 0), (2, 0), (3, 0), None]:
|
||||
msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
|
||||
raise ValueError(msg % (version,))
|
||||
|
||||
def magic(major, minor):
|
||||
""" Return the magic string for the given file format version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
major : int in [0, 255]
|
||||
minor : int in [0, 255]
|
||||
|
||||
Returns
|
||||
-------
|
||||
magic : str
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError if the version cannot be formatted.
|
||||
"""
|
||||
if major < 0 or major > 255:
|
||||
raise ValueError("major version must be 0 <= major < 256")
|
||||
if minor < 0 or minor > 255:
|
||||
raise ValueError("minor version must be 0 <= minor < 256")
|
||||
return MAGIC_PREFIX + bytes([major, minor])
|
||||
|
||||
def read_magic(fp):
|
||||
""" Read the magic string to get the version of the file format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
|
||||
Returns
|
||||
-------
|
||||
major : int
|
||||
minor : int
|
||||
"""
|
||||
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
|
||||
if magic_str[:-2] != MAGIC_PREFIX:
|
||||
msg = "the magic string is not correct; expected %r, got %r"
|
||||
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
|
||||
major, minor = magic_str[-2:]
|
||||
return major, minor
|
||||
|
||||
|
||||
def dtype_to_descr(dtype):
|
||||
"""
|
||||
Get a serializable descriptor from the dtype.
|
||||
|
||||
The .descr attribute of a dtype object cannot be round-tripped through
|
||||
the dtype() constructor. Simple types, like dtype('float32'), have
|
||||
a descr which looks like a record array with one field with '' as
|
||||
a name. The dtype() constructor interprets this as a request to give
|
||||
a default name. Instead, we construct descriptor that can be passed to
|
||||
dtype().
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype : dtype
|
||||
The dtype of the array that will be written to disk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
descr : object
|
||||
An object that can be passed to `numpy.dtype()` in order to
|
||||
replicate the input dtype.
|
||||
|
||||
"""
|
||||
# NOTE: that drop_metadata may not return the right dtype e.g. for user
|
||||
# dtypes. In that case our code below would fail the same, though.
|
||||
new_dtype = drop_metadata(dtype)
|
||||
if new_dtype is not dtype:
|
||||
warnings.warn("metadata on a dtype is not saved to an npy/npz. "
|
||||
"Use another format (such as pickle) to store it.",
|
||||
UserWarning, stacklevel=2)
|
||||
if dtype.names is not None:
|
||||
# This is a record array. The .descr is fine. XXX: parts of the
|
||||
# record array with an empty name, like padding bytes, still get
|
||||
# fiddled with. This needs to be fixed in the C implementation of
|
||||
# dtype().
|
||||
return dtype.descr
|
||||
else:
|
||||
return dtype.str
|
||||
|
||||
def descr_to_dtype(descr):
|
||||
"""
|
||||
Returns a dtype based off the given description.
|
||||
|
||||
This is essentially the reverse of `dtype_to_descr()`. It will remove
|
||||
the valueless padding fields created by, i.e. simple fields like
|
||||
dtype('float32'), and then convert the description to its corresponding
|
||||
dtype.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
descr : object
|
||||
The object retrieved by dtype.descr. Can be passed to
|
||||
`numpy.dtype()` in order to replicate the input dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtype : dtype
|
||||
The dtype constructed by the description.
|
||||
|
||||
"""
|
||||
if isinstance(descr, str):
|
||||
# No padding removal needed
|
||||
return numpy.dtype(descr)
|
||||
elif isinstance(descr, tuple):
|
||||
# subtype, will always have a shape descr[1]
|
||||
dt = descr_to_dtype(descr[0])
|
||||
return numpy.dtype((dt, descr[1]))
|
||||
|
||||
titles = []
|
||||
names = []
|
||||
formats = []
|
||||
offsets = []
|
||||
offset = 0
|
||||
for field in descr:
|
||||
if len(field) == 2:
|
||||
name, descr_str = field
|
||||
dt = descr_to_dtype(descr_str)
|
||||
else:
|
||||
name, descr_str, shape = field
|
||||
dt = numpy.dtype((descr_to_dtype(descr_str), shape))
|
||||
|
||||
# Ignore padding bytes, which will be void bytes with '' as name
|
||||
# Once support for blank names is removed, only "if name == ''" needed)
|
||||
is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
|
||||
if not is_pad:
|
||||
title, name = name if isinstance(name, tuple) else (None, name)
|
||||
titles.append(title)
|
||||
names.append(name)
|
||||
formats.append(dt)
|
||||
offsets.append(offset)
|
||||
offset += dt.itemsize
|
||||
|
||||
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
|
||||
'offsets': offsets, 'itemsize': offset})
|
||||
|
||||
def header_data_from_array_1_0(array):
|
||||
""" Get the dictionary of header metadata from a numpy.ndarray.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : numpy.ndarray
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : dict
|
||||
This has the appropriate entries for writing its string representation
|
||||
to the header of the file.
|
||||
"""
|
||||
d = {'shape': array.shape}
|
||||
if array.flags.c_contiguous:
|
||||
d['fortran_order'] = False
|
||||
elif array.flags.f_contiguous:
|
||||
d['fortran_order'] = True
|
||||
else:
|
||||
# Totally non-contiguous data. We will have to make it C-contiguous
|
||||
# before writing. Note that we need to test for C_CONTIGUOUS first
|
||||
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
|
||||
d['fortran_order'] = False
|
||||
|
||||
d['descr'] = dtype_to_descr(array.dtype)
|
||||
return d
|
||||
|
||||
|
||||
def _wrap_header(header, version):
|
||||
"""
|
||||
Takes a stringified header, and attaches the prefix and padding to it
|
||||
"""
|
||||
import struct
|
||||
assert version is not None
|
||||
fmt, encoding = _header_size_info[version]
|
||||
header = header.encode(encoding)
|
||||
hlen = len(header) + 1
|
||||
padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
|
||||
try:
|
||||
header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
|
||||
except struct.error:
|
||||
msg = "Header length {} too big for version={}".format(hlen, version)
|
||||
raise ValueError(msg) from None
|
||||
|
||||
# Pad the header with spaces and a final newline such that the magic
|
||||
# string, the header-length short and the header are aligned on a
|
||||
# ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
|
||||
# aligned up to ARRAY_ALIGN on systems like Linux where mmap()
|
||||
# offset must be page-aligned (i.e. the beginning of the file).
|
||||
return header_prefix + header + b' '*padlen + b'\n'
|
||||
|
||||
|
||||
def _wrap_header_guess_version(header):
|
||||
"""
|
||||
Like `_wrap_header`, but chooses an appropriate version given the contents
|
||||
"""
|
||||
try:
|
||||
return _wrap_header(header, (1, 0))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
ret = _wrap_header(header, (2, 0))
|
||||
except UnicodeEncodeError:
|
||||
pass
|
||||
else:
|
||||
warnings.warn("Stored array in format 2.0. It can only be"
|
||||
"read by NumPy >= 1.9", UserWarning, stacklevel=2)
|
||||
return ret
|
||||
|
||||
header = _wrap_header(header, (3, 0))
|
||||
warnings.warn("Stored array in format 3.0. It can only be "
|
||||
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
|
||||
return header
|
||||
|
||||
|
||||
def _write_array_header(fp, d, version=None):
|
||||
""" Write the header for an array and returns the version used
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
d : dict
|
||||
This has the appropriate entries for writing its string representation
|
||||
to the header of the file.
|
||||
version : tuple or None
|
||||
None means use oldest that works. Providing an explicit version will
|
||||
raise a ValueError if the format does not allow saving this data.
|
||||
Default: None
|
||||
"""
|
||||
header = ["{"]
|
||||
for key, value in sorted(d.items()):
|
||||
# Need to use repr here, since we eval these when reading
|
||||
header.append("'%s': %s, " % (key, repr(value)))
|
||||
header.append("}")
|
||||
header = "".join(header)
|
||||
|
||||
# Add some spare space so that the array header can be modified in-place
|
||||
# when changing the array size, e.g. when growing it by appending data at
|
||||
# the end.
|
||||
shape = d['shape']
|
||||
header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
|
||||
shape[-1 if d['fortran_order'] else 0]
|
||||
))) if len(shape) > 0 else 0)
|
||||
|
||||
if version is None:
|
||||
header = _wrap_header_guess_version(header)
|
||||
else:
|
||||
header = _wrap_header(header, version)
|
||||
fp.write(header)
|
||||
|
||||
def write_array_header_1_0(fp, d):
|
||||
""" Write the header for an array using the 1.0 format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
d : dict
|
||||
This has the appropriate entries for writing its string
|
||||
representation to the header of the file.
|
||||
"""
|
||||
_write_array_header(fp, d, (1, 0))
|
||||
|
||||
|
||||
def write_array_header_2_0(fp, d):
|
||||
""" Write the header for an array using the 2.0 format.
|
||||
The 2.0 format allows storing very large structured arrays.
|
||||
|
||||
.. versionadded:: 1.9.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
d : dict
|
||||
This has the appropriate entries for writing its string
|
||||
representation to the header of the file.
|
||||
"""
|
||||
_write_array_header(fp, d, (2, 0))
|
||||
|
||||
def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
|
||||
"""
|
||||
Read an array header from a filelike object using the 1.0 file format
|
||||
version.
|
||||
|
||||
This will leave the file object located just after the header.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
A file object or something with a `.read()` method like a file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
shape : tuple of int
|
||||
The shape of the array.
|
||||
fortran_order : bool
|
||||
The array data will be written out directly if it is either
|
||||
C-contiguous or Fortran-contiguous. Otherwise, it will be made
|
||||
contiguous before writing it out.
|
||||
dtype : dtype
|
||||
The dtype of the file's data.
|
||||
max_header_size : int, optional
|
||||
Maximum allowed size of the header. Large headers may not be safe
|
||||
to load securely and thus require explicitly passing a larger value.
|
||||
See :py:func:`ast.literal_eval()` for details.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the data is invalid.
|
||||
|
||||
"""
|
||||
return _read_array_header(
|
||||
fp, version=(1, 0), max_header_size=max_header_size)
|
||||
|
||||
def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
|
||||
"""
|
||||
Read an array header from a filelike object using the 2.0 file format
|
||||
version.
|
||||
|
||||
This will leave the file object located just after the header.
|
||||
|
||||
.. versionadded:: 1.9.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : filelike object
|
||||
A file object or something with a `.read()` method like a file.
|
||||
max_header_size : int, optional
|
||||
Maximum allowed size of the header. Large headers may not be safe
|
||||
to load securely and thus require explicitly passing a larger value.
|
||||
See :py:func:`ast.literal_eval()` for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
shape : tuple of int
|
||||
The shape of the array.
|
||||
fortran_order : bool
|
||||
The array data will be written out directly if it is either
|
||||
C-contiguous or Fortran-contiguous. Otherwise, it will be made
|
||||
contiguous before writing it out.
|
||||
dtype : dtype
|
||||
The dtype of the file's data.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the data is invalid.
|
||||
|
||||
"""
|
||||
return _read_array_header(
|
||||
fp, version=(2, 0), max_header_size=max_header_size)
|
||||
|
||||
|
||||
def _filter_header(s):
|
||||
"""Clean up 'L' in npz header ints.
|
||||
|
||||
Cleans up the 'L' in strings representing integers. Needed to allow npz
|
||||
headers produced in Python2 to be read in Python3.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
s : string
|
||||
Npy file header.
|
||||
|
||||
Returns
|
||||
-------
|
||||
header : str
|
||||
Cleaned up header.
|
||||
|
||||
"""
|
||||
import tokenize
|
||||
from io import StringIO
|
||||
|
||||
tokens = []
|
||||
last_token_was_number = False
|
||||
for token in tokenize.generate_tokens(StringIO(s).readline):
|
||||
token_type = token[0]
|
||||
token_string = token[1]
|
||||
if (last_token_was_number and
|
||||
token_type == tokenize.NAME and
|
||||
token_string == "L"):
|
||||
continue
|
||||
else:
|
||||
tokens.append(token)
|
||||
last_token_was_number = (token_type == tokenize.NUMBER)
|
||||
return tokenize.untokenize(tokens)
|
||||
|
||||
|
||||
def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
|
||||
"""
|
||||
see read_array_header_1_0
|
||||
"""
|
||||
# Read an unsigned, little-endian short int which has the length of the
|
||||
# header.
|
||||
import struct
|
||||
hinfo = _header_size_info.get(version)
|
||||
if hinfo is None:
|
||||
raise ValueError("Invalid version {!r}".format(version))
|
||||
hlength_type, encoding = hinfo
|
||||
|
||||
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
|
||||
header_length = struct.unpack(hlength_type, hlength_str)[0]
|
||||
header = _read_bytes(fp, header_length, "array header")
|
||||
header = header.decode(encoding)
|
||||
if len(header) > max_header_size:
|
||||
raise ValueError(
|
||||
f"Header info length ({len(header)}) is large and may not be safe "
|
||||
"to load securely.\n"
|
||||
"To allow loading, adjust `max_header_size` or fully trust "
|
||||
"the `.npy` file using `allow_pickle=True`.\n"
|
||||
"For safety against large resource use or crashes, sandboxing "
|
||||
"may be necessary.")
|
||||
|
||||
# The header is a pretty-printed string representation of a literal
|
||||
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
|
||||
# boundary. The keys are strings.
|
||||
# "shape" : tuple of int
|
||||
# "fortran_order" : bool
|
||||
# "descr" : dtype.descr
|
||||
# Versions (2, 0) and (1, 0) could have been created by a Python 2
|
||||
# implementation before header filtering was implemented.
|
||||
#
|
||||
# For performance reasons, we try without _filter_header first though
|
||||
try:
|
||||
d = safe_eval(header)
|
||||
except SyntaxError as e:
|
||||
if version <= (2, 0):
|
||||
header = _filter_header(header)
|
||||
try:
|
||||
d = safe_eval(header)
|
||||
except SyntaxError as e2:
|
||||
msg = "Cannot parse header: {!r}"
|
||||
raise ValueError(msg.format(header)) from e2
|
||||
else:
|
||||
warnings.warn(
|
||||
"Reading `.npy` or `.npz` file required additional "
|
||||
"header parsing as it was created on Python 2. Save the "
|
||||
"file again to speed up loading and avoid this warning.",
|
||||
UserWarning, stacklevel=4)
|
||||
else:
|
||||
msg = "Cannot parse header: {!r}"
|
||||
raise ValueError(msg.format(header)) from e
|
||||
if not isinstance(d, dict):
|
||||
msg = "Header is not a dictionary: {!r}"
|
||||
raise ValueError(msg.format(d))
|
||||
|
||||
if EXPECTED_KEYS != d.keys():
|
||||
keys = sorted(d.keys())
|
||||
msg = "Header does not contain the correct keys: {!r}"
|
||||
raise ValueError(msg.format(keys))
|
||||
|
||||
# Sanity-check the values.
|
||||
if (not isinstance(d['shape'], tuple) or
|
||||
not all(isinstance(x, int) for x in d['shape'])):
|
||||
msg = "shape is not valid: {!r}"
|
||||
raise ValueError(msg.format(d['shape']))
|
||||
if not isinstance(d['fortran_order'], bool):
|
||||
msg = "fortran_order is not a valid bool: {!r}"
|
||||
raise ValueError(msg.format(d['fortran_order']))
|
||||
try:
|
||||
dtype = descr_to_dtype(d['descr'])
|
||||
except TypeError as e:
|
||||
msg = "descr is not a valid dtype descriptor: {!r}"
|
||||
raise ValueError(msg.format(d['descr'])) from e
|
||||
|
||||
return d['shape'], d['fortran_order'], dtype
|
||||
|
||||
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
|
||||
"""
|
||||
Write an array to an NPY file, including a header.
|
||||
|
||||
If the array is neither C-contiguous nor Fortran-contiguous AND the
|
||||
file_like object is not a real file object, this function will have to
|
||||
copy data in memory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : file_like object
|
||||
An open, writable file object, or similar object with a
|
||||
``.write()`` method.
|
||||
array : ndarray
|
||||
The array to write to disk.
|
||||
version : (int, int) or None, optional
|
||||
The version number of the format. None means use the oldest
|
||||
supported version that is able to store the data. Default: None
|
||||
allow_pickle : bool, optional
|
||||
Whether to allow writing pickled data. Default: True
|
||||
pickle_kwargs : dict, optional
|
||||
Additional keyword arguments to pass to pickle.dump, excluding
|
||||
'protocol'. These are only useful when pickling objects in object
|
||||
arrays on Python 3 to Python 2 compatible format.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the array cannot be persisted. This includes the case of
|
||||
allow_pickle=False and array being an object array.
|
||||
Various other errors
|
||||
If the array contains Python objects as part of its dtype, the
|
||||
process of pickling them may raise various errors if the objects
|
||||
are not picklable.
|
||||
|
||||
"""
|
||||
_check_version(version)
|
||||
_write_array_header(fp, header_data_from_array_1_0(array), version)
|
||||
|
||||
if array.itemsize == 0:
|
||||
buffersize = 0
|
||||
else:
|
||||
# Set buffer size to 16 MiB to hide the Python loop overhead.
|
||||
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
|
||||
|
||||
if array.dtype.hasobject:
|
||||
# We contain Python objects so we cannot write out the data
|
||||
# directly. Instead, we will pickle it out
|
||||
if not allow_pickle:
|
||||
raise ValueError("Object arrays cannot be saved when "
|
||||
"allow_pickle=False")
|
||||
if pickle_kwargs is None:
|
||||
pickle_kwargs = {}
|
||||
pickle.dump(array, fp, protocol=3, **pickle_kwargs)
|
||||
elif array.flags.f_contiguous and not array.flags.c_contiguous:
|
||||
if isfileobj(fp):
|
||||
array.T.tofile(fp)
|
||||
else:
|
||||
for chunk in numpy.nditer(
|
||||
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
|
||||
buffersize=buffersize, order='F'):
|
||||
fp.write(chunk.tobytes('C'))
|
||||
else:
|
||||
if isfileobj(fp):
|
||||
array.tofile(fp)
|
||||
else:
|
||||
for chunk in numpy.nditer(
|
||||
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
|
||||
buffersize=buffersize, order='C'):
|
||||
fp.write(chunk.tobytes('C'))
|
||||
|
||||
|
||||
def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
|
||||
max_header_size=_MAX_HEADER_SIZE):
|
||||
"""
|
||||
Read an array from an NPY file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp : file_like object
|
||||
If this is not a real file object, then this may take extra memory
|
||||
and time.
|
||||
allow_pickle : bool, optional
|
||||
Whether to allow writing pickled data. Default: False
|
||||
|
||||
.. versionchanged:: 1.16.3
|
||||
Made default False in response to CVE-2019-6446.
|
||||
|
||||
pickle_kwargs : dict
|
||||
Additional keyword arguments to pass to pickle.load. These are only
|
||||
useful when loading object arrays saved on Python 2 when using
|
||||
Python 3.
|
||||
max_header_size : int, optional
|
||||
Maximum allowed size of the header. Large headers may not be safe
|
||||
to load securely and thus require explicitly passing a larger value.
|
||||
See :py:func:`ast.literal_eval()` for details.
|
||||
This option is ignored when `allow_pickle` is passed. In that case
|
||||
the file is by definition trusted and the limit is unnecessary.
|
||||
|
||||
Returns
|
||||
-------
|
||||
array : ndarray
|
||||
The array from the data on disk.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the data is invalid, or allow_pickle=False and the file contains
|
||||
an object array.
|
||||
|
||||
"""
|
||||
if allow_pickle:
|
||||
# Effectively ignore max_header_size, since `allow_pickle` indicates
|
||||
# that the input is fully trusted.
|
||||
max_header_size = 2**64
|
||||
|
||||
version = read_magic(fp)
|
||||
_check_version(version)
|
||||
shape, fortran_order, dtype = _read_array_header(
|
||||
fp, version, max_header_size=max_header_size)
|
||||
if len(shape) == 0:
|
||||
count = 1
|
||||
else:
|
||||
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
|
||||
|
||||
# Now read the actual data.
|
||||
if dtype.hasobject:
|
||||
# The array contained Python objects. We need to unpickle the data.
|
||||
if not allow_pickle:
|
||||
raise ValueError("Object arrays cannot be loaded when "
|
||||
"allow_pickle=False")
|
||||
if pickle_kwargs is None:
|
||||
pickle_kwargs = {}
|
||||
try:
|
||||
array = pickle.load(fp, **pickle_kwargs)
|
||||
except UnicodeError as err:
|
||||
# Friendlier error message
|
||||
raise UnicodeError("Unpickling a python object failed: %r\n"
|
||||
"You may need to pass the encoding= option "
|
||||
"to numpy.load" % (err,)) from err
|
||||
else:
|
||||
if isfileobj(fp):
|
||||
# We can use the fast fromfile() function.
|
||||
array = numpy.fromfile(fp, dtype=dtype, count=count)
|
||||
else:
|
||||
# This is not a real file. We have to read it the
|
||||
# memory-intensive way.
|
||||
# crc32 module fails on reads greater than 2 ** 32 bytes,
|
||||
# breaking large reads from gzip streams. Chunk reads to
|
||||
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
|
||||
# of the read. In non-chunked case count < max_read_count, so
|
||||
# only one read is performed.
|
||||
|
||||
# Use np.ndarray instead of np.empty since the latter does
|
||||
# not correctly instantiate zero-width string dtypes; see
|
||||
# https://github.com/numpy/numpy/pull/6430
|
||||
array = numpy.ndarray(count, dtype=dtype)
|
||||
|
||||
if dtype.itemsize > 0:
|
||||
# If dtype.itemsize == 0 then there's nothing more to read
|
||||
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
|
||||
|
||||
for i in range(0, count, max_read_count):
|
||||
read_count = min(max_read_count, count - i)
|
||||
read_size = int(read_count * dtype.itemsize)
|
||||
data = _read_bytes(fp, read_size, "array data")
|
||||
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
|
||||
count=read_count)
|
||||
|
||||
if fortran_order:
|
||||
array.shape = shape[::-1]
|
||||
array = array.transpose()
|
||||
else:
|
||||
array.shape = shape
|
||||
|
||||
return array
|
||||
|
||||
|
||||
def open_memmap(filename, mode='r+', dtype=None, shape=None,
|
||||
fortran_order=False, version=None, *,
|
||||
max_header_size=_MAX_HEADER_SIZE):
|
||||
"""
|
||||
Open a .npy file as a memory-mapped array.
|
||||
|
||||
This may be used to read an existing file or create a new one.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : str or path-like
|
||||
The name of the file on disk. This may *not* be a file-like
|
||||
object.
|
||||
mode : str, optional
|
||||
The mode in which to open the file; the default is 'r+'. In
|
||||
addition to the standard file modes, 'c' is also accepted to mean
|
||||
"copy on write." See `memmap` for the available mode strings.
|
||||
dtype : data-type, optional
|
||||
The data type of the array if we are creating a new file in "write"
|
||||
mode, if not, `dtype` is ignored. The default value is None, which
|
||||
results in a data-type of `float64`.
|
||||
shape : tuple of int
|
||||
The shape of the array if we are creating a new file in "write"
|
||||
mode, in which case this parameter is required. Otherwise, this
|
||||
parameter is ignored and is thus optional.
|
||||
fortran_order : bool, optional
|
||||
Whether the array should be Fortran-contiguous (True) or
|
||||
C-contiguous (False, the default) if we are creating a new file in
|
||||
"write" mode.
|
||||
version : tuple of int (major, minor) or None
|
||||
If the mode is a "write" mode, then this is the version of the file
|
||||
format used to create the file. None means use the oldest
|
||||
supported version that is able to store the data. Default: None
|
||||
max_header_size : int, optional
|
||||
Maximum allowed size of the header. Large headers may not be safe
|
||||
to load securely and thus require explicitly passing a larger value.
|
||||
See :py:func:`ast.literal_eval()` for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
marray : memmap
|
||||
The memory-mapped array.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the data or the mode is invalid.
|
||||
OSError
|
||||
If the file is not found or cannot be opened correctly.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.memmap
|
||||
|
||||
"""
|
||||
if isfileobj(filename):
|
||||
raise ValueError("Filename must be a string or a path-like object."
|
||||
" Memmap cannot use existing file handles.")
|
||||
|
||||
if 'w' in mode:
|
||||
# We are creating the file, not reading it.
|
||||
# Check if we ought to create the file.
|
||||
_check_version(version)
|
||||
# Ensure that the given dtype is an authentic dtype object rather
|
||||
# than just something that can be interpreted as a dtype object.
|
||||
dtype = numpy.dtype(dtype)
|
||||
if dtype.hasobject:
|
||||
msg = "Array can't be memory-mapped: Python objects in dtype."
|
||||
raise ValueError(msg)
|
||||
d = dict(
|
||||
descr=dtype_to_descr(dtype),
|
||||
fortran_order=fortran_order,
|
||||
shape=shape,
|
||||
)
|
||||
# If we got here, then it should be safe to create the file.
|
||||
with open(os_fspath(filename), mode+'b') as fp:
|
||||
_write_array_header(fp, d, version)
|
||||
offset = fp.tell()
|
||||
else:
|
||||
# Read the header of the file first.
|
||||
with open(os_fspath(filename), 'rb') as fp:
|
||||
version = read_magic(fp)
|
||||
_check_version(version)
|
||||
|
||||
shape, fortran_order, dtype = _read_array_header(
|
||||
fp, version, max_header_size=max_header_size)
|
||||
if dtype.hasobject:
|
||||
msg = "Array can't be memory-mapped: Python objects in dtype."
|
||||
raise ValueError(msg)
|
||||
offset = fp.tell()
|
||||
|
||||
if fortran_order:
|
||||
order = 'F'
|
||||
else:
|
||||
order = 'C'
|
||||
|
||||
# We need to change a write-only mode to a read-write mode since we've
|
||||
# already written data to the file.
|
||||
if mode == 'w+':
|
||||
mode = 'r+'
|
||||
|
||||
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
|
||||
mode=mode, offset=offset)
|
||||
|
||||
return marray
|
||||
|
||||
|
||||
def _read_bytes(fp, size, error_template="ran out of data"):
|
||||
"""
|
||||
Read from file-like object until size bytes are read.
|
||||
Raises ValueError if not EOF is encountered before size bytes are read.
|
||||
Non-blocking objects only supported if they derive from io objects.
|
||||
|
||||
Required as e.g. ZipExtFile in python 2.6 can return less data than
|
||||
requested.
|
||||
"""
|
||||
data = bytes()
|
||||
while True:
|
||||
# io files (default in python3) return None or raise on
|
||||
# would-block, python2 file will truncate, probably nothing can be
|
||||
# done about that. note that regular files can't be non-blocking
|
||||
try:
|
||||
r = fp.read(size - len(data))
|
||||
data += r
|
||||
if len(r) == 0 or len(data) == size:
|
||||
break
|
||||
except BlockingIOError:
|
||||
pass
|
||||
if len(data) != size:
|
||||
msg = "EOF: reading %s, expected %d bytes got %d"
|
||||
raise ValueError(msg % (error_template, size, len(data)))
|
||||
else:
|
||||
return data
|
22
teil20b/lib/python3.11/site-packages/numpy/lib/format.pyi
Normal file
22
teil20b/lib/python3.11/site-packages/numpy/lib/format.pyi
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import Any, Literal, Final
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
EXPECTED_KEYS: Final[set[str]]
|
||||
MAGIC_PREFIX: Final[bytes]
|
||||
MAGIC_LEN: Literal[8]
|
||||
ARRAY_ALIGN: Literal[64]
|
||||
BUFFER_SIZE: Literal[262144] # 2**18
|
||||
|
||||
def magic(major, minor): ...
|
||||
def read_magic(fp): ...
|
||||
def dtype_to_descr(dtype): ...
|
||||
def descr_to_dtype(descr): ...
|
||||
def header_data_from_array_1_0(array): ...
|
||||
def write_array_header_1_0(fp, d): ...
|
||||
def write_array_header_2_0(fp, d): ...
|
||||
def read_array_header_1_0(fp): ...
|
||||
def read_array_header_2_0(fp): ...
|
||||
def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
|
||||
def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
|
||||
def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
|
5732
teil20b/lib/python3.11/site-packages/numpy/lib/function_base.py
Normal file
5732
teil20b/lib/python3.11/site-packages/numpy/lib/function_base.py
Normal file
File diff suppressed because it is too large
Load Diff
697
teil20b/lib/python3.11/site-packages/numpy/lib/function_base.pyi
Normal file
697
teil20b/lib/python3.11/site-packages/numpy/lib/function_base.pyi
Normal file
@@ -0,0 +1,697 @@
|
||||
import sys
|
||||
from collections.abc import Sequence, Iterator, Callable, Iterable
|
||||
from typing import (
|
||||
Literal as L,
|
||||
Any,
|
||||
TypeVar,
|
||||
overload,
|
||||
Protocol,
|
||||
SupportsIndex,
|
||||
SupportsInt,
|
||||
)
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
else:
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from numpy import (
|
||||
vectorize as vectorize,
|
||||
ufunc,
|
||||
generic,
|
||||
floating,
|
||||
complexfloating,
|
||||
intp,
|
||||
float64,
|
||||
complex128,
|
||||
timedelta64,
|
||||
datetime64,
|
||||
object_,
|
||||
_OrderKACF,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
_ShapeLike,
|
||||
_ScalarLike_co,
|
||||
_DTypeLike,
|
||||
_ArrayLike,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeTD64_co,
|
||||
_ArrayLikeDT64_co,
|
||||
_ArrayLikeObject_co,
|
||||
_FloatLike_co,
|
||||
_ComplexLike_co,
|
||||
)
|
||||
|
||||
from numpy.core.function_base import (
|
||||
add_newdoc as add_newdoc,
|
||||
)
|
||||
|
||||
from numpy.core.multiarray import (
|
||||
add_docstring as add_docstring,
|
||||
bincount as bincount,
|
||||
)
|
||||
|
||||
from numpy.core.umath import _add_newdoc_ufunc
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
||||
|
||||
_2Tuple = tuple[_T, _T]
|
||||
|
||||
class _TrimZerosSequence(Protocol[_T_co]):
|
||||
def __len__(self) -> int: ...
|
||||
def __getitem__(self, key: slice, /) -> _T_co: ...
|
||||
def __iter__(self) -> Iterator[Any]: ...
|
||||
|
||||
class _SupportsWriteFlush(Protocol):
|
||||
def write(self, s: str, /) -> object: ...
|
||||
def flush(self) -> object: ...
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
|
||||
def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
|
||||
|
||||
@overload
|
||||
def rot90(
|
||||
m: _ArrayLike[_SCT],
|
||||
k: int = ...,
|
||||
axes: tuple[int, int] = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def rot90(
|
||||
m: ArrayLike,
|
||||
k: int = ...,
|
||||
axes: tuple[int, int] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def flip(m: _SCT, axis: None = ...) -> _SCT: ...
|
||||
@overload
|
||||
def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
|
||||
@overload
|
||||
def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
|
||||
|
||||
def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
|
||||
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
weights: None | _ArrayLikeFloat_co= ...,
|
||||
returned: L[False] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> floating[Any]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = ...,
|
||||
weights: None | _ArrayLikeComplex_co = ...,
|
||||
returned: L[False] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeObject_co,
|
||||
axis: None = ...,
|
||||
weights: None | Any = ...,
|
||||
returned: L[False] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
weights: None | _ArrayLikeFloat_co= ...,
|
||||
returned: L[True] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> _2Tuple[floating[Any]]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = ...,
|
||||
weights: None | _ArrayLikeComplex_co = ...,
|
||||
returned: L[True] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> _2Tuple[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeObject_co,
|
||||
axis: None = ...,
|
||||
weights: None | Any = ...,
|
||||
returned: L[True] = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> _2Tuple[Any]: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
weights: None | Any = ...,
|
||||
returned: L[False] = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def average(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
weights: None | Any = ...,
|
||||
returned: L[True] = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> _2Tuple[Any]: ...
|
||||
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: _ArrayLike[_SCT],
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: object,
|
||||
dtype: None = ...,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: Any,
|
||||
dtype: _DTypeLike[_SCT],
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def asarray_chkfinite(
|
||||
a: Any,
|
||||
dtype: DTypeLike,
|
||||
order: _OrderKACF = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
|
||||
# xref python/mypy#8645
|
||||
@overload
|
||||
def piecewise(
|
||||
x: _ArrayLike[_SCT],
|
||||
condlist: ArrayLike,
|
||||
funclist: Sequence[Any | Callable[..., Any]],
|
||||
*args: Any,
|
||||
**kw: Any,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def piecewise(
|
||||
x: ArrayLike,
|
||||
condlist: ArrayLike,
|
||||
funclist: Sequence[Any | Callable[..., Any]],
|
||||
*args: Any,
|
||||
**kw: Any,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def select(
|
||||
condlist: Sequence[ArrayLike],
|
||||
choicelist: Sequence[ArrayLike],
|
||||
default: ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayType,
|
||||
order: _OrderKACF,
|
||||
subok: L[True],
|
||||
) -> _ArrayType: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayType,
|
||||
order: _OrderKACF = ...,
|
||||
*,
|
||||
subok: L[True],
|
||||
) -> _ArrayType: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: _ArrayLike[_SCT],
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[False] = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def copy(
|
||||
a: ArrayLike,
|
||||
order: _OrderKACF = ...,
|
||||
subok: L[False] = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def gradient(
|
||||
f: ArrayLike,
|
||||
*varargs: ArrayLike,
|
||||
axis: None | _ShapeLike = ...,
|
||||
edge_order: L[1, 2] = ...,
|
||||
) -> Any: ...
|
||||
|
||||
@overload
|
||||
def diff(
|
||||
a: _T,
|
||||
n: L[0],
|
||||
axis: SupportsIndex = ...,
|
||||
prepend: ArrayLike = ...,
|
||||
append: ArrayLike = ...,
|
||||
) -> _T: ...
|
||||
@overload
|
||||
def diff(
|
||||
a: ArrayLike,
|
||||
n: int = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
prepend: ArrayLike = ...,
|
||||
append: ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def interp(
|
||||
x: _ArrayLikeFloat_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeFloat_co,
|
||||
left: None | _FloatLike_co = ...,
|
||||
right: None | _FloatLike_co = ...,
|
||||
period: None | _FloatLike_co = ...,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def interp(
|
||||
x: _ArrayLikeFloat_co,
|
||||
xp: _ArrayLikeFloat_co,
|
||||
fp: _ArrayLikeComplex_co,
|
||||
left: None | _ComplexLike_co = ...,
|
||||
right: None | _ComplexLike_co = ...,
|
||||
period: None | _FloatLike_co = ...,
|
||||
) -> NDArray[complex128]: ...
|
||||
|
||||
@overload
|
||||
def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
|
||||
@overload
|
||||
def angle(z: object_, deg: bool = ...) -> Any: ...
|
||||
@overload
|
||||
def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def unwrap(
|
||||
p: _ArrayLikeFloat_co,
|
||||
discont: None | float = ...,
|
||||
axis: int = ...,
|
||||
*,
|
||||
period: float = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def unwrap(
|
||||
p: _ArrayLikeObject_co,
|
||||
discont: None | float = ...,
|
||||
axis: int = ...,
|
||||
*,
|
||||
period: float = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
def trim_zeros(
|
||||
filt: _TrimZerosSequence[_T],
|
||||
trim: L["f", "b", "fb", "bf"] = ...,
|
||||
) -> _T: ...
|
||||
|
||||
@overload
|
||||
def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
|
||||
|
||||
def disp(
|
||||
mesg: object,
|
||||
device: None | _SupportsWriteFlush = ...,
|
||||
linefeed: bool = ...,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeFloat_co,
|
||||
y: None | _ArrayLikeFloat_co = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: None | SupportsIndex | SupportsInt = ...,
|
||||
fweights: None | ArrayLike = ...,
|
||||
aweights: None | ArrayLike = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: None | SupportsIndex | SupportsInt = ...,
|
||||
fweights: None | ArrayLike = ...,
|
||||
aweights: None | ArrayLike = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: None | SupportsIndex | SupportsInt = ...,
|
||||
fweights: None | ArrayLike = ...,
|
||||
aweights: None | ArrayLike = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_SCT],
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def cov(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
bias: bool = ...,
|
||||
ddof: None | SupportsIndex | SupportsInt = ...,
|
||||
fweights: None | ArrayLike = ...,
|
||||
aweights: None | ArrayLike = ...,
|
||||
*,
|
||||
dtype: DTypeLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
# NOTE `bias` and `ddof` have been deprecated
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeFloat_co,
|
||||
y: None | _ArrayLikeFloat_co = ...,
|
||||
rowvar: bool = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
*,
|
||||
dtype: None = ...,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
*,
|
||||
dtype: _DTypeLike[_SCT],
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def corrcoef(
|
||||
m: _ArrayLikeComplex_co,
|
||||
y: None | _ArrayLikeComplex_co = ...,
|
||||
rowvar: bool = ...,
|
||||
*,
|
||||
dtype: DTypeLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
|
||||
|
||||
def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
|
||||
|
||||
def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
|
||||
|
||||
def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
|
||||
|
||||
def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
|
||||
|
||||
def kaiser(
|
||||
M: _FloatLike_co,
|
||||
beta: _FloatLike_co,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
|
||||
@overload
|
||||
def sinc(x: _FloatLike_co) -> floating[Any]: ...
|
||||
@overload
|
||||
def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
# NOTE: Deprecated
|
||||
# def msort(a: ArrayLike) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> floating[Any]: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeComplex_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeTD64_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> timedelta64: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeObject_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def median(
|
||||
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
out: _ArrayType = ...,
|
||||
overwrite_input: bool = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> _ArrayType: ...
|
||||
|
||||
_MethodKind = L[
|
||||
"inverted_cdf",
|
||||
"averaged_inverted_cdf",
|
||||
"closest_observation",
|
||||
"interpolated_inverted_cdf",
|
||||
"hazen",
|
||||
"weibull",
|
||||
"linear",
|
||||
"median_unbiased",
|
||||
"normal_unbiased",
|
||||
"lower",
|
||||
"higher",
|
||||
"midpoint",
|
||||
"nearest",
|
||||
]
|
||||
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeFloat_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> floating[Any]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeTD64_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> timedelta64: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeDT64_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> datetime64: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeObject_co,
|
||||
q: _FloatLike_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeFloat_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeTD64_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> NDArray[timedelta64]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeDT64_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> NDArray[datetime64]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: L[False] = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
out: None = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> Any: ...
|
||||
@overload
|
||||
def percentile(
|
||||
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
q: _ArrayLikeFloat_co,
|
||||
axis: None | _ShapeLike = ...,
|
||||
out: _ArrayType = ...,
|
||||
overwrite_input: bool = ...,
|
||||
method: _MethodKind = ...,
|
||||
keepdims: bool = ...,
|
||||
) -> _ArrayType: ...
|
||||
|
||||
# NOTE: Not an alias, but they do have identical signatures
|
||||
# (that we can reuse)
|
||||
quantile = percentile
|
||||
|
||||
# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
|
||||
def trapz(
|
||||
y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
||||
x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
|
||||
dx: float = ...,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> Any: ...
|
||||
|
||||
def meshgrid(
|
||||
*xi: ArrayLike,
|
||||
copy: bool = ...,
|
||||
sparse: bool = ...,
|
||||
indexing: L["xy", "ij"] = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def delete(
|
||||
arr: _ArrayLike[_SCT],
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
axis: None | SupportsIndex = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def delete(
|
||||
arr: ArrayLike,
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
axis: None | SupportsIndex = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def insert(
|
||||
arr: _ArrayLike[_SCT],
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
values: ArrayLike,
|
||||
axis: None | SupportsIndex = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def insert(
|
||||
arr: ArrayLike,
|
||||
obj: slice | _ArrayLikeInt_co,
|
||||
values: ArrayLike,
|
||||
axis: None | SupportsIndex = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def append(
|
||||
arr: ArrayLike,
|
||||
values: ArrayLike,
|
||||
axis: None | SupportsIndex = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def digitize(
|
||||
x: _FloatLike_co,
|
||||
bins: _ArrayLikeFloat_co,
|
||||
right: bool = ...,
|
||||
) -> intp: ...
|
||||
@overload
|
||||
def digitize(
|
||||
x: _ArrayLikeFloat_co,
|
||||
bins: _ArrayLikeFloat_co,
|
||||
right: bool = ...,
|
||||
) -> NDArray[intp]: ...
|
1072
teil20b/lib/python3.11/site-packages/numpy/lib/histograms.py
Normal file
1072
teil20b/lib/python3.11/site-packages/numpy/lib/histograms.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,47 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import (
|
||||
Literal as L,
|
||||
Any,
|
||||
SupportsIndex,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
ArrayLike,
|
||||
)
|
||||
|
||||
_BinKind = L[
|
||||
"stone",
|
||||
"auto",
|
||||
"doane",
|
||||
"fd",
|
||||
"rice",
|
||||
"scott",
|
||||
"sqrt",
|
||||
"sturges",
|
||||
]
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
def histogram_bin_edges(
|
||||
a: ArrayLike,
|
||||
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
||||
range: None | tuple[float, float] = ...,
|
||||
weights: None | ArrayLike = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def histogram(
|
||||
a: ArrayLike,
|
||||
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
||||
range: None | tuple[float, float] = ...,
|
||||
density: bool = ...,
|
||||
weights: None | ArrayLike = ...,
|
||||
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
||||
|
||||
def histogramdd(
|
||||
sample: ArrayLike,
|
||||
bins: SupportsIndex | ArrayLike = ...,
|
||||
range: Sequence[tuple[float, float]] = ...,
|
||||
density: None | bool = ...,
|
||||
weights: None | ArrayLike = ...,
|
||||
) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
|
1046
teil20b/lib/python3.11/site-packages/numpy/lib/index_tricks.py
Normal file
1046
teil20b/lib/python3.11/site-packages/numpy/lib/index_tricks.py
Normal file
File diff suppressed because it is too large
Load Diff
162
teil20b/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi
Normal file
162
teil20b/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi
Normal file
@@ -0,0 +1,162 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
TypeVar,
|
||||
Generic,
|
||||
overload,
|
||||
Literal,
|
||||
SupportsIndex,
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
# Circumvent a naming conflict with `AxisConcatenator.matrix`
|
||||
matrix as _Matrix,
|
||||
ndenumerate as ndenumerate,
|
||||
ndindex as ndindex,
|
||||
ndarray,
|
||||
dtype,
|
||||
integer,
|
||||
str_,
|
||||
bytes_,
|
||||
bool_,
|
||||
int_,
|
||||
float_,
|
||||
complex_,
|
||||
intp,
|
||||
_OrderCF,
|
||||
_ModeKind,
|
||||
)
|
||||
from numpy._typing import (
|
||||
# Arrays
|
||||
ArrayLike,
|
||||
_NestedSequence,
|
||||
_FiniteNestedSequence,
|
||||
NDArray,
|
||||
_ArrayLikeInt,
|
||||
|
||||
# DTypes
|
||||
DTypeLike,
|
||||
_SupportsDType,
|
||||
|
||||
# Shapes
|
||||
_ShapeLike,
|
||||
)
|
||||
|
||||
from numpy.core.multiarray import (
|
||||
unravel_index as unravel_index,
|
||||
ravel_multi_index as ravel_multi_index,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_DType = TypeVar("_DType", bound=dtype[Any])
|
||||
_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
|
||||
_TupType = TypeVar("_TupType", bound=tuple[Any, ...])
|
||||
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
@overload
|
||||
def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
|
||||
@overload
|
||||
def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
|
||||
|
||||
class nd_grid(Generic[_BoolType]):
|
||||
sparse: _BoolType
|
||||
def __init__(self, sparse: _BoolType = ...) -> None: ...
|
||||
@overload
|
||||
def __getitem__(
|
||||
self: nd_grid[Literal[False]],
|
||||
key: slice | Sequence[slice],
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def __getitem__(
|
||||
self: nd_grid[Literal[True]],
|
||||
key: slice | Sequence[slice],
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
class MGridClass(nd_grid[Literal[False]]):
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
mgrid: MGridClass
|
||||
|
||||
class OGridClass(nd_grid[Literal[True]]):
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
ogrid: OGridClass
|
||||
|
||||
class AxisConcatenator:
|
||||
axis: int
|
||||
matrix: bool
|
||||
ndmin: int
|
||||
trans1d: int
|
||||
def __init__(
|
||||
self,
|
||||
axis: int = ...,
|
||||
matrix: bool = ...,
|
||||
ndmin: int = ...,
|
||||
trans1d: int = ...,
|
||||
) -> None: ...
|
||||
@staticmethod
|
||||
@overload
|
||||
def concatenate( # type: ignore[misc]
|
||||
*a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
|
||||
) -> NDArray[Any]: ...
|
||||
@staticmethod
|
||||
@overload
|
||||
def concatenate(
|
||||
*a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
|
||||
) -> _ArrayType: ...
|
||||
@staticmethod
|
||||
def makemat(
|
||||
data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
|
||||
) -> _Matrix[Any, Any]: ...
|
||||
|
||||
# TODO: Sort out this `__getitem__` method
|
||||
def __getitem__(self, key: Any) -> Any: ...
|
||||
|
||||
class RClass(AxisConcatenator):
|
||||
axis: Literal[0]
|
||||
matrix: Literal[False]
|
||||
ndmin: Literal[1]
|
||||
trans1d: Literal[-1]
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
r_: RClass
|
||||
|
||||
class CClass(AxisConcatenator):
|
||||
axis: Literal[-1]
|
||||
matrix: Literal[False]
|
||||
ndmin: Literal[2]
|
||||
trans1d: Literal[0]
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
c_: CClass
|
||||
|
||||
class IndexExpression(Generic[_BoolType]):
|
||||
maketuple: _BoolType
|
||||
def __init__(self, maketuple: _BoolType) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
|
||||
@overload
|
||||
def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
|
||||
@overload
|
||||
def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
|
||||
|
||||
index_exp: IndexExpression[Literal[True]]
|
||||
s_: IndexExpression[Literal[False]]
|
||||
|
||||
def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
|
||||
def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
|
||||
def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
|
||||
|
||||
# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
|
177
teil20b/lib/python3.11/site-packages/numpy/lib/mixins.py
Normal file
177
teil20b/lib/python3.11/site-packages/numpy/lib/mixins.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""Mixin classes for custom array types that don't inherit from ndarray."""
|
||||
from numpy.core import umath as um
|
||||
|
||||
|
||||
__all__ = ['NDArrayOperatorsMixin']
|
||||
|
||||
|
||||
def _disables_array_ufunc(obj):
|
||||
"""True when __array_ufunc__ is set to None."""
|
||||
try:
|
||||
return obj.__array_ufunc__ is None
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
|
||||
def _binary_method(ufunc, name):
|
||||
"""Implement a forward binary method with a ufunc, e.g., __add__."""
|
||||
def func(self, other):
|
||||
if _disables_array_ufunc(other):
|
||||
return NotImplemented
|
||||
return ufunc(self, other)
|
||||
func.__name__ = '__{}__'.format(name)
|
||||
return func
|
||||
|
||||
|
||||
def _reflected_binary_method(ufunc, name):
|
||||
"""Implement a reflected binary method with a ufunc, e.g., __radd__."""
|
||||
def func(self, other):
|
||||
if _disables_array_ufunc(other):
|
||||
return NotImplemented
|
||||
return ufunc(other, self)
|
||||
func.__name__ = '__r{}__'.format(name)
|
||||
return func
|
||||
|
||||
|
||||
def _inplace_binary_method(ufunc, name):
|
||||
"""Implement an in-place binary method with a ufunc, e.g., __iadd__."""
|
||||
def func(self, other):
|
||||
return ufunc(self, other, out=(self,))
|
||||
func.__name__ = '__i{}__'.format(name)
|
||||
return func
|
||||
|
||||
|
||||
def _numeric_methods(ufunc, name):
|
||||
"""Implement forward, reflected and inplace binary methods with a ufunc."""
|
||||
return (_binary_method(ufunc, name),
|
||||
_reflected_binary_method(ufunc, name),
|
||||
_inplace_binary_method(ufunc, name))
|
||||
|
||||
|
||||
def _unary_method(ufunc, name):
|
||||
"""Implement a unary special method with a ufunc."""
|
||||
def func(self):
|
||||
return ufunc(self)
|
||||
func.__name__ = '__{}__'.format(name)
|
||||
return func
|
||||
|
||||
|
||||
class NDArrayOperatorsMixin:
|
||||
"""Mixin defining all operator special methods using __array_ufunc__.
|
||||
|
||||
This class implements the special methods for almost all of Python's
|
||||
builtin operators defined in the `operator` module, including comparisons
|
||||
(``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
|
||||
deferring to the ``__array_ufunc__`` method, which subclasses must
|
||||
implement.
|
||||
|
||||
It is useful for writing classes that do not inherit from `numpy.ndarray`,
|
||||
but that should support arithmetic and numpy universal functions like
|
||||
arrays as described in `A Mechanism for Overriding Ufuncs
|
||||
<https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
|
||||
|
||||
As an trivial example, consider this implementation of an ``ArrayLike``
|
||||
class that simply wraps a NumPy array and ensures that the result of any
|
||||
arithmetic operation is also an ``ArrayLike`` object::
|
||||
|
||||
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
|
||||
def __init__(self, value):
|
||||
self.value = np.asarray(value)
|
||||
|
||||
# One might also consider adding the built-in list type to this
|
||||
# list, to support operations like np.add(array_like, list)
|
||||
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
||||
|
||||
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
||||
out = kwargs.get('out', ())
|
||||
for x in inputs + out:
|
||||
# Only support operations with instances of _HANDLED_TYPES.
|
||||
# Use ArrayLike instead of type(self) for isinstance to
|
||||
# allow subclasses that don't override __array_ufunc__ to
|
||||
# handle ArrayLike objects.
|
||||
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
|
||||
return NotImplemented
|
||||
|
||||
# Defer to the implementation of the ufunc on unwrapped values.
|
||||
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
|
||||
for x in inputs)
|
||||
if out:
|
||||
kwargs['out'] = tuple(
|
||||
x.value if isinstance(x, ArrayLike) else x
|
||||
for x in out)
|
||||
result = getattr(ufunc, method)(*inputs, **kwargs)
|
||||
|
||||
if type(result) is tuple:
|
||||
# multiple return values
|
||||
return tuple(type(self)(x) for x in result)
|
||||
elif method == 'at':
|
||||
# no return value
|
||||
return None
|
||||
else:
|
||||
# one return value
|
||||
return type(self)(result)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (type(self).__name__, self.value)
|
||||
|
||||
In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
|
||||
the result is always another ``ArrayLike``:
|
||||
|
||||
>>> x = ArrayLike([1, 2, 3])
|
||||
>>> x - 1
|
||||
ArrayLike(array([0, 1, 2]))
|
||||
>>> 1 - x
|
||||
ArrayLike(array([ 0, -1, -2]))
|
||||
>>> np.arange(3) - x
|
||||
ArrayLike(array([-1, -1, -1]))
|
||||
>>> x - np.arange(3)
|
||||
ArrayLike(array([1, 1, 1]))
|
||||
|
||||
Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
|
||||
with arbitrary, unrecognized types. This ensures that interactions with
|
||||
ArrayLike preserve a well-defined casting hierarchy.
|
||||
|
||||
.. versionadded:: 1.13
|
||||
"""
|
||||
__slots__ = ()
|
||||
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
|
||||
# overrides NEP.
|
||||
|
||||
# comparisons don't have reflected and in-place versions
|
||||
__lt__ = _binary_method(um.less, 'lt')
|
||||
__le__ = _binary_method(um.less_equal, 'le')
|
||||
__eq__ = _binary_method(um.equal, 'eq')
|
||||
__ne__ = _binary_method(um.not_equal, 'ne')
|
||||
__gt__ = _binary_method(um.greater, 'gt')
|
||||
__ge__ = _binary_method(um.greater_equal, 'ge')
|
||||
|
||||
# numeric methods
|
||||
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
|
||||
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
|
||||
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
|
||||
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
|
||||
um.matmul, 'matmul')
|
||||
# Python 3 does not use __div__, __rdiv__, or __idiv__
|
||||
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
|
||||
um.true_divide, 'truediv')
|
||||
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
|
||||
um.floor_divide, 'floordiv')
|
||||
__mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
|
||||
__divmod__ = _binary_method(um.divmod, 'divmod')
|
||||
__rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
|
||||
# __idivmod__ does not exist
|
||||
# TODO: handle the optional third argument for __pow__?
|
||||
__pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
|
||||
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(
|
||||
um.left_shift, 'lshift')
|
||||
__rshift__, __rrshift__, __irshift__ = _numeric_methods(
|
||||
um.right_shift, 'rshift')
|
||||
__and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
|
||||
__xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
|
||||
__or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
|
||||
|
||||
# unary methods
|
||||
__neg__ = _unary_method(um.negative, 'neg')
|
||||
__pos__ = _unary_method(um.positive, 'pos')
|
||||
__abs__ = _unary_method(um.absolute, 'abs')
|
||||
__invert__ = _unary_method(um.invert, 'invert')
|
74
teil20b/lib/python3.11/site-packages/numpy/lib/mixins.pyi
Normal file
74
teil20b/lib/python3.11/site-packages/numpy/lib/mixins.pyi
Normal file
@@ -0,0 +1,74 @@
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import Literal as L, Any
|
||||
|
||||
from numpy import ufunc
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
|
||||
# even though it's reliant on subclasses implementing `__array_ufunc__`
|
||||
|
||||
# NOTE: The accepted input- and output-types of the various dunders are
|
||||
# completely dependent on how `__array_ufunc__` is implemented.
|
||||
# As such, only little type safety can be provided here.
|
||||
|
||||
class NDArrayOperatorsMixin(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def __array_ufunc__(
|
||||
self,
|
||||
ufunc: ufunc,
|
||||
method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
|
||||
*inputs: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any: ...
|
||||
def __lt__(self, other: Any) -> Any: ...
|
||||
def __le__(self, other: Any) -> Any: ...
|
||||
def __eq__(self, other: Any) -> Any: ...
|
||||
def __ne__(self, other: Any) -> Any: ...
|
||||
def __gt__(self, other: Any) -> Any: ...
|
||||
def __ge__(self, other: Any) -> Any: ...
|
||||
def __add__(self, other: Any) -> Any: ...
|
||||
def __radd__(self, other: Any) -> Any: ...
|
||||
def __iadd__(self, other: Any) -> Any: ...
|
||||
def __sub__(self, other: Any) -> Any: ...
|
||||
def __rsub__(self, other: Any) -> Any: ...
|
||||
def __isub__(self, other: Any) -> Any: ...
|
||||
def __mul__(self, other: Any) -> Any: ...
|
||||
def __rmul__(self, other: Any) -> Any: ...
|
||||
def __imul__(self, other: Any) -> Any: ...
|
||||
def __matmul__(self, other: Any) -> Any: ...
|
||||
def __rmatmul__(self, other: Any) -> Any: ...
|
||||
def __imatmul__(self, other: Any) -> Any: ...
|
||||
def __truediv__(self, other: Any) -> Any: ...
|
||||
def __rtruediv__(self, other: Any) -> Any: ...
|
||||
def __itruediv__(self, other: Any) -> Any: ...
|
||||
def __floordiv__(self, other: Any) -> Any: ...
|
||||
def __rfloordiv__(self, other: Any) -> Any: ...
|
||||
def __ifloordiv__(self, other: Any) -> Any: ...
|
||||
def __mod__(self, other: Any) -> Any: ...
|
||||
def __rmod__(self, other: Any) -> Any: ...
|
||||
def __imod__(self, other: Any) -> Any: ...
|
||||
def __divmod__(self, other: Any) -> Any: ...
|
||||
def __rdivmod__(self, other: Any) -> Any: ...
|
||||
def __pow__(self, other: Any) -> Any: ...
|
||||
def __rpow__(self, other: Any) -> Any: ...
|
||||
def __ipow__(self, other: Any) -> Any: ...
|
||||
def __lshift__(self, other: Any) -> Any: ...
|
||||
def __rlshift__(self, other: Any) -> Any: ...
|
||||
def __ilshift__(self, other: Any) -> Any: ...
|
||||
def __rshift__(self, other: Any) -> Any: ...
|
||||
def __rrshift__(self, other: Any) -> Any: ...
|
||||
def __irshift__(self, other: Any) -> Any: ...
|
||||
def __and__(self, other: Any) -> Any: ...
|
||||
def __rand__(self, other: Any) -> Any: ...
|
||||
def __iand__(self, other: Any) -> Any: ...
|
||||
def __xor__(self, other: Any) -> Any: ...
|
||||
def __rxor__(self, other: Any) -> Any: ...
|
||||
def __ixor__(self, other: Any) -> Any: ...
|
||||
def __or__(self, other: Any) -> Any: ...
|
||||
def __ror__(self, other: Any) -> Any: ...
|
||||
def __ior__(self, other: Any) -> Any: ...
|
||||
def __neg__(self) -> Any: ...
|
||||
def __pos__(self) -> Any: ...
|
||||
def __abs__(self) -> Any: ...
|
||||
def __invert__(self) -> Any: ...
|
1887
teil20b/lib/python3.11/site-packages/numpy/lib/nanfunctions.py
Normal file
1887
teil20b/lib/python3.11/site-packages/numpy/lib/nanfunctions.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,38 @@
|
||||
from numpy.core.fromnumeric import (
|
||||
amin,
|
||||
amax,
|
||||
argmin,
|
||||
argmax,
|
||||
sum,
|
||||
prod,
|
||||
cumsum,
|
||||
cumprod,
|
||||
mean,
|
||||
var,
|
||||
std
|
||||
)
|
||||
|
||||
from numpy.lib.function_base import (
|
||||
median,
|
||||
percentile,
|
||||
quantile,
|
||||
)
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
# NOTE: In reaility these functions are not aliases but distinct functions
|
||||
# with identical signatures.
|
||||
nanmin = amin
|
||||
nanmax = amax
|
||||
nanargmin = argmin
|
||||
nanargmax = argmax
|
||||
nansum = sum
|
||||
nanprod = prod
|
||||
nancumsum = cumsum
|
||||
nancumprod = cumprod
|
||||
nanmean = mean
|
||||
nanvar = var
|
||||
nanstd = std
|
||||
nanmedian = median
|
||||
nanpercentile = percentile
|
||||
nanquantile = quantile
|
2547
teil20b/lib/python3.11/site-packages/numpy/lib/npyio.py
Normal file
2547
teil20b/lib/python3.11/site-packages/numpy/lib/npyio.py
Normal file
File diff suppressed because it is too large
Load Diff
330
teil20b/lib/python3.11/site-packages/numpy/lib/npyio.pyi
Normal file
330
teil20b/lib/python3.11/site-packages/numpy/lib/npyio.pyi
Normal file
@@ -0,0 +1,330 @@
|
||||
import os
|
||||
import sys
|
||||
import zipfile
|
||||
import types
|
||||
from re import Pattern
|
||||
from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
|
||||
from typing import (
|
||||
Literal as L,
|
||||
Any,
|
||||
TypeVar,
|
||||
Generic,
|
||||
IO,
|
||||
overload,
|
||||
Protocol,
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
DataSource as DataSource,
|
||||
ndarray,
|
||||
recarray,
|
||||
dtype,
|
||||
generic,
|
||||
float64,
|
||||
void,
|
||||
record,
|
||||
)
|
||||
|
||||
from numpy.ma.mrecords import MaskedRecords
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
DTypeLike,
|
||||
NDArray,
|
||||
_DTypeLike,
|
||||
_SupportsArrayFunc,
|
||||
)
|
||||
|
||||
from numpy.core.multiarray import (
|
||||
packbits as packbits,
|
||||
unpackbits as unpackbits,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_T_contra = TypeVar("_T_contra", contravariant=True)
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
|
||||
_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
|
||||
|
||||
class _SupportsGetItem(Protocol[_T_contra, _T_co]):
|
||||
def __getitem__(self, key: _T_contra, /) -> _T_co: ...
|
||||
|
||||
class _SupportsRead(Protocol[_CharType_co]):
|
||||
def read(self) -> _CharType_co: ...
|
||||
|
||||
class _SupportsReadSeek(Protocol[_CharType_co]):
|
||||
def read(self, n: int, /) -> _CharType_co: ...
|
||||
def seek(self, offset: int, whence: int, /) -> object: ...
|
||||
|
||||
class _SupportsWrite(Protocol[_CharType_contra]):
|
||||
def write(self, s: _CharType_contra, /) -> object: ...
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
class BagObj(Generic[_T_co]):
|
||||
def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
|
||||
def __getattribute__(self, key: str) -> _T_co: ...
|
||||
def __dir__(self) -> list[str]: ...
|
||||
|
||||
class NpzFile(Mapping[str, NDArray[Any]]):
|
||||
zip: zipfile.ZipFile
|
||||
fid: None | IO[str]
|
||||
files: list[str]
|
||||
allow_pickle: bool
|
||||
pickle_kwargs: None | Mapping[str, Any]
|
||||
_MAX_REPR_ARRAY_COUNT: int
|
||||
# Represent `f` as a mutable property so we can access the type of `self`
|
||||
@property
|
||||
def f(self: _T) -> BagObj[_T]: ...
|
||||
@f.setter
|
||||
def f(self: _T, value: BagObj[_T]) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
fid: IO[str],
|
||||
own_fid: bool = ...,
|
||||
allow_pickle: bool = ...,
|
||||
pickle_kwargs: None | Mapping[str, Any] = ...,
|
||||
) -> None: ...
|
||||
def __enter__(self: _T) -> _T: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: None | type[BaseException],
|
||||
exc_value: None | BaseException,
|
||||
traceback: None | types.TracebackType,
|
||||
/,
|
||||
) -> None: ...
|
||||
def close(self) -> None: ...
|
||||
def __del__(self) -> None: ...
|
||||
def __iter__(self) -> Iterator[str]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __getitem__(self, key: str) -> NDArray[Any]: ...
|
||||
def __contains__(self, key: str) -> bool: ...
|
||||
def __repr__(self) -> str: ...
|
||||
|
||||
# NOTE: Returns a `NpzFile` if file is a zip file;
|
||||
# returns an `ndarray`/`memmap` otherwise
|
||||
def load(
|
||||
file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
|
||||
mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
|
||||
allow_pickle: bool = ...,
|
||||
fix_imports: bool = ...,
|
||||
encoding: L["ASCII", "latin1", "bytes"] = ...,
|
||||
) -> Any: ...
|
||||
|
||||
def save(
|
||||
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
||||
arr: ArrayLike,
|
||||
allow_pickle: bool = ...,
|
||||
fix_imports: bool = ...,
|
||||
) -> None: ...
|
||||
|
||||
def savez(
|
||||
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
||||
*args: ArrayLike,
|
||||
**kwds: ArrayLike,
|
||||
) -> None: ...
|
||||
|
||||
def savez_compressed(
|
||||
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
||||
*args: ArrayLike,
|
||||
**kwds: ArrayLike,
|
||||
) -> None: ...
|
||||
|
||||
# File-like objects only have to implement `__iter__` and,
|
||||
# optionally, `encoding`
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: None = ...,
|
||||
comments: None | str | Sequence[str] = ...,
|
||||
delimiter: None | str = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
skiprows: int = ...,
|
||||
usecols: int | Sequence[int] = ...,
|
||||
unpack: bool = ...,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
encoding: None | str = ...,
|
||||
max_rows: None | int = ...,
|
||||
*,
|
||||
quotechar: None | str = ...,
|
||||
like: None | _SupportsArrayFunc = ...
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: _DTypeLike[_SCT],
|
||||
comments: None | str | Sequence[str] = ...,
|
||||
delimiter: None | str = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
skiprows: int = ...,
|
||||
usecols: int | Sequence[int] = ...,
|
||||
unpack: bool = ...,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
encoding: None | str = ...,
|
||||
max_rows: None | int = ...,
|
||||
*,
|
||||
quotechar: None | str = ...,
|
||||
like: None | _SupportsArrayFunc = ...
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def loadtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: DTypeLike,
|
||||
comments: None | str | Sequence[str] = ...,
|
||||
delimiter: None | str = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
skiprows: int = ...,
|
||||
usecols: int | Sequence[int] = ...,
|
||||
unpack: bool = ...,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
encoding: None | str = ...,
|
||||
max_rows: None | int = ...,
|
||||
*,
|
||||
quotechar: None | str = ...,
|
||||
like: None | _SupportsArrayFunc = ...
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def savetxt(
|
||||
fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
|
||||
X: ArrayLike,
|
||||
fmt: str | Sequence[str] = ...,
|
||||
delimiter: str = ...,
|
||||
newline: str = ...,
|
||||
header: str = ...,
|
||||
footer: str = ...,
|
||||
comments: str = ...,
|
||||
encoding: None | str = ...,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def fromregex(
|
||||
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
|
||||
regexp: str | bytes | Pattern[Any],
|
||||
dtype: _DTypeLike[_SCT],
|
||||
encoding: None | str = ...
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def fromregex(
|
||||
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
|
||||
regexp: str | bytes | Pattern[Any],
|
||||
dtype: DTypeLike,
|
||||
encoding: None | str = ...
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: None = ...,
|
||||
comments: str = ...,
|
||||
delimiter: None | str | int | Iterable[int] = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: None | Sequence[int] = ...,
|
||||
names: L[None, True] | str | Collection[str] = ...,
|
||||
excludelist: None | Sequence[str] = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L['upper', 'lower'] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: None | bool = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: None | int = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: None | _SupportsArrayFunc = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: _DTypeLike[_SCT],
|
||||
comments: str = ...,
|
||||
delimiter: None | str | int | Iterable[int] = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: None | Sequence[int] = ...,
|
||||
names: L[None, True] | str | Collection[str] = ...,
|
||||
excludelist: None | Sequence[str] = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L['upper', 'lower'] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: None | bool = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: None | int = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: None | _SupportsArrayFunc = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def genfromtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
dtype: DTypeLike,
|
||||
comments: str = ...,
|
||||
delimiter: None | str | int | Iterable[int] = ...,
|
||||
skip_header: int = ...,
|
||||
skip_footer: int = ...,
|
||||
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
||||
missing_values: Any = ...,
|
||||
filling_values: Any = ...,
|
||||
usecols: None | Sequence[int] = ...,
|
||||
names: L[None, True] | str | Collection[str] = ...,
|
||||
excludelist: None | Sequence[str] = ...,
|
||||
deletechars: str = ...,
|
||||
replace_space: str = ...,
|
||||
autostrip: bool = ...,
|
||||
case_sensitive: bool | L['upper', 'lower'] = ...,
|
||||
defaultfmt: str = ...,
|
||||
unpack: None | bool = ...,
|
||||
usemask: bool = ...,
|
||||
loose: bool = ...,
|
||||
invalid_raise: bool = ...,
|
||||
max_rows: None | int = ...,
|
||||
encoding: str = ...,
|
||||
*,
|
||||
ndmin: L[0, 1, 2] = ...,
|
||||
like: None | _SupportsArrayFunc = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def recfromtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
*,
|
||||
usemask: L[False] = ...,
|
||||
**kwargs: Any,
|
||||
) -> recarray[Any, dtype[record]]: ...
|
||||
@overload
|
||||
def recfromtxt(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
*,
|
||||
usemask: L[True],
|
||||
**kwargs: Any,
|
||||
) -> MaskedRecords[Any, dtype[void]]: ...
|
||||
|
||||
@overload
|
||||
def recfromcsv(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
*,
|
||||
usemask: L[False] = ...,
|
||||
**kwargs: Any,
|
||||
) -> recarray[Any, dtype[record]]: ...
|
||||
@overload
|
||||
def recfromcsv(
|
||||
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
||||
*,
|
||||
usemask: L[True],
|
||||
**kwargs: Any,
|
||||
) -> MaskedRecords[Any, dtype[void]]: ...
|
1453
teil20b/lib/python3.11/site-packages/numpy/lib/polynomial.py
Normal file
1453
teil20b/lib/python3.11/site-packages/numpy/lib/polynomial.py
Normal file
File diff suppressed because it is too large
Load Diff
303
teil20b/lib/python3.11/site-packages/numpy/lib/polynomial.pyi
Normal file
303
teil20b/lib/python3.11/site-packages/numpy/lib/polynomial.pyi
Normal file
@@ -0,0 +1,303 @@
|
||||
from typing import (
|
||||
Literal as L,
|
||||
overload,
|
||||
Any,
|
||||
SupportsInt,
|
||||
SupportsIndex,
|
||||
TypeVar,
|
||||
NoReturn,
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
RankWarning as RankWarning,
|
||||
poly1d as poly1d,
|
||||
unsignedinteger,
|
||||
signedinteger,
|
||||
floating,
|
||||
complexfloating,
|
||||
bool_,
|
||||
int32,
|
||||
int64,
|
||||
float64,
|
||||
complex128,
|
||||
object_,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeUInt_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeObject_co,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
_2Tup = tuple[_T, _T]
|
||||
_5Tup = tuple[
|
||||
_T,
|
||||
NDArray[float64],
|
||||
NDArray[int32],
|
||||
NDArray[float64],
|
||||
NDArray[float64],
|
||||
]
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
|
||||
|
||||
# Returns either a float or complex array depending on the input values.
|
||||
# See `np.linalg.eigvals`.
|
||||
def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
|
||||
|
||||
@overload
|
||||
def polyint(
|
||||
p: poly1d,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeFloat_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: None | _ArrayLikeFloat_co = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeComplex_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: None | _ArrayLikeComplex_co = ...,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def polyint(
|
||||
p: _ArrayLikeObject_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
k: None | _ArrayLikeObject_co = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyder(
|
||||
p: poly1d,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeFloat_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeComplex_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def polyder(
|
||||
p: _ArrayLikeObject_co,
|
||||
m: SupportsInt | SupportsIndex = ...,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[False] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: L[False] = ...,
|
||||
) -> NDArray[float64]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[False] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: L[False] = ...,
|
||||
) -> NDArray[complex128]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[False] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: L[True, "unscaled"] = ...,
|
||||
) -> _2Tup[NDArray[float64]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[False] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: L[True, "unscaled"] = ...,
|
||||
) -> _2Tup[NDArray[complex128]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeFloat_co,
|
||||
y: _ArrayLikeFloat_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[True] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: bool | L["unscaled"] = ...,
|
||||
) -> _5Tup[NDArray[float64]]: ...
|
||||
@overload
|
||||
def polyfit(
|
||||
x: _ArrayLikeComplex_co,
|
||||
y: _ArrayLikeComplex_co,
|
||||
deg: SupportsIndex | SupportsInt,
|
||||
rcond: None | float = ...,
|
||||
full: L[True] = ...,
|
||||
w: None | _ArrayLikeFloat_co = ...,
|
||||
cov: bool | L["unscaled"] = ...,
|
||||
) -> _5Tup[NDArray[complex128]]: ...
|
||||
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeBool_co,
|
||||
x: _ArrayLikeBool_co,
|
||||
) -> NDArray[int64]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeUInt_co,
|
||||
x: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger[Any]]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeInt_co,
|
||||
x: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger[Any]]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeFloat_co,
|
||||
x: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeComplex_co,
|
||||
x: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def polyval(
|
||||
p: _ArrayLikeObject_co,
|
||||
x: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: poly1d,
|
||||
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
a2: poly1d,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeBool_co,
|
||||
a2: _ArrayLikeBool_co,
|
||||
) -> NDArray[bool_]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeUInt_co,
|
||||
a2: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger[Any]]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeInt_co,
|
||||
a2: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger[Any]]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeFloat_co,
|
||||
a2: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeComplex_co,
|
||||
a2: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def polyadd(
|
||||
a1: _ArrayLikeObject_co,
|
||||
a2: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def polysub(
|
||||
a1: poly1d,
|
||||
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
a2: poly1d,
|
||||
) -> poly1d: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeBool_co,
|
||||
a2: _ArrayLikeBool_co,
|
||||
) -> NoReturn: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeUInt_co,
|
||||
a2: _ArrayLikeUInt_co,
|
||||
) -> NDArray[unsignedinteger[Any]]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeInt_co,
|
||||
a2: _ArrayLikeInt_co,
|
||||
) -> NDArray[signedinteger[Any]]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeFloat_co,
|
||||
a2: _ArrayLikeFloat_co,
|
||||
) -> NDArray[floating[Any]]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeComplex_co,
|
||||
a2: _ArrayLikeComplex_co,
|
||||
) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def polysub(
|
||||
a1: _ArrayLikeObject_co,
|
||||
a2: _ArrayLikeObject_co,
|
||||
) -> NDArray[object_]: ...
|
||||
|
||||
# NOTE: Not an alias, but they do have the same signature (that we can reuse)
|
||||
polymul = polyadd
|
||||
|
||||
@overload
|
||||
def polydiv(
|
||||
u: poly1d,
|
||||
v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
) -> _2Tup[poly1d]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
||||
v: poly1d,
|
||||
) -> _2Tup[poly1d]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeFloat_co,
|
||||
v: _ArrayLikeFloat_co,
|
||||
) -> _2Tup[NDArray[floating[Any]]]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeComplex_co,
|
||||
v: _ArrayLikeComplex_co,
|
||||
) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
|
||||
@overload
|
||||
def polydiv(
|
||||
u: _ArrayLikeObject_co,
|
||||
v: _ArrayLikeObject_co,
|
||||
) -> _2Tup[NDArray[Any]]: ...
|
1673
teil20b/lib/python3.11/site-packages/numpy/lib/recfunctions.py
Normal file
1673
teil20b/lib/python3.11/site-packages/numpy/lib/recfunctions.py
Normal file
File diff suppressed because it is too large
Load Diff
625
teil20b/lib/python3.11/site-packages/numpy/lib/scimath.py
Normal file
625
teil20b/lib/python3.11/site-packages/numpy/lib/scimath.py
Normal file
@@ -0,0 +1,625 @@
|
||||
"""
|
||||
Wrapper functions to more user-friendly calling of certain math functions
|
||||
whose output data-type is different than the input data-type in certain
|
||||
domains of the input.
|
||||
|
||||
For example, for functions like `log` with branch cuts, the versions in this
|
||||
module provide the mathematically valid answers in the complex plane::
|
||||
|
||||
>>> import math
|
||||
>>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
|
||||
True
|
||||
|
||||
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
|
||||
correctly handled. See their respective docstrings for specific examples.
|
||||
|
||||
Functions
|
||||
---------
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
sqrt
|
||||
log
|
||||
log2
|
||||
logn
|
||||
log10
|
||||
power
|
||||
arccos
|
||||
arcsin
|
||||
arctanh
|
||||
|
||||
"""
|
||||
import numpy.core.numeric as nx
|
||||
import numpy.core.numerictypes as nt
|
||||
from numpy.core.numeric import asarray, any
|
||||
from numpy.core.overrides import array_function_dispatch
|
||||
from numpy.lib.type_check import isreal
|
||||
|
||||
|
||||
__all__ = [
|
||||
'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
|
||||
'arctanh'
|
||||
]
|
||||
|
||||
|
||||
_ln2 = nx.log(2.0)
|
||||
|
||||
|
||||
def _tocomplex(arr):
|
||||
"""Convert its input `arr` to a complex array.
|
||||
|
||||
The input is returned as a complex array of the smallest type that will fit
|
||||
the original data: types like single, byte, short, etc. become csingle,
|
||||
while others become cdouble.
|
||||
|
||||
A copy of the input is always made.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr : array
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
An array with the same input data as the input but in complex form.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
First, consider an input of type short:
|
||||
|
||||
>>> a = np.array([1,2,3],np.short)
|
||||
|
||||
>>> ac = np.lib.scimath._tocomplex(a); ac
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
|
||||
>>> ac.dtype
|
||||
dtype('complex64')
|
||||
|
||||
If the input is of type double, the output is correspondingly of the
|
||||
complex double type as well:
|
||||
|
||||
>>> b = np.array([1,2,3],np.double)
|
||||
|
||||
>>> bc = np.lib.scimath._tocomplex(b); bc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j])
|
||||
|
||||
>>> bc.dtype
|
||||
dtype('complex128')
|
||||
|
||||
Note that even if the input was complex to begin with, a copy is still
|
||||
made, since the astype() method always copies:
|
||||
|
||||
>>> c = np.array([1,2,3],np.csingle)
|
||||
|
||||
>>> cc = np.lib.scimath._tocomplex(c); cc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
|
||||
>>> c *= 2; c
|
||||
array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
|
||||
|
||||
>>> cc
|
||||
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
|
||||
"""
|
||||
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
|
||||
nt.ushort, nt.csingle)):
|
||||
return arr.astype(nt.csingle)
|
||||
else:
|
||||
return arr.astype(nt.cdouble)
|
||||
|
||||
|
||||
def _fix_real_lt_zero(x):
|
||||
"""Convert `x` to complex if it has real, negative components.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.lib.scimath._fix_real_lt_zero([1,2])
|
||||
array([1, 2])
|
||||
|
||||
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
|
||||
array([-1.+0.j, 2.+0.j])
|
||||
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (x < 0)):
|
||||
x = _tocomplex(x)
|
||||
return x
|
||||
|
||||
|
||||
def _fix_int_lt_zero(x):
|
||||
"""Convert `x` to double if it has real, negative components.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.lib.scimath._fix_int_lt_zero([1,2])
|
||||
array([1, 2])
|
||||
|
||||
>>> np.lib.scimath._fix_int_lt_zero([-1,2])
|
||||
array([-1., 2.])
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (x < 0)):
|
||||
x = x * 1.0
|
||||
return x
|
||||
|
||||
|
||||
def _fix_real_abs_gt_1(x):
|
||||
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
|
||||
|
||||
Otherwise, output is just the array version of the input (via asarray).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
Returns
|
||||
-------
|
||||
array
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.lib.scimath._fix_real_abs_gt_1([0,1])
|
||||
array([0, 1])
|
||||
|
||||
>>> np.lib.scimath._fix_real_abs_gt_1([0,2])
|
||||
array([0.+0.j, 2.+0.j])
|
||||
"""
|
||||
x = asarray(x)
|
||||
if any(isreal(x) & (abs(x) > 1)):
|
||||
x = _tocomplex(x)
|
||||
return x
|
||||
|
||||
|
||||
def _unary_dispatcher(x):
|
||||
return (x,)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def sqrt(x):
|
||||
"""
|
||||
Compute the square root of x.
|
||||
|
||||
For negative input elements, a complex value is returned
|
||||
(unlike `numpy.sqrt` which returns NaN).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input value(s).
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The square root of `x`. If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.sqrt
|
||||
|
||||
Examples
|
||||
--------
|
||||
For real, non-negative inputs this works just like `numpy.sqrt`:
|
||||
|
||||
>>> np.emath.sqrt(1)
|
||||
1.0
|
||||
>>> np.emath.sqrt([1, 4])
|
||||
array([1., 2.])
|
||||
|
||||
But it automatically handles negative inputs:
|
||||
|
||||
>>> np.emath.sqrt(-1)
|
||||
1j
|
||||
>>> np.emath.sqrt([-1,4])
|
||||
array([0.+1.j, 2.+0.j])
|
||||
|
||||
Different results are expected because:
|
||||
floating point 0.0 and -0.0 are distinct.
|
||||
|
||||
For more control, explicitly use complex() as follows:
|
||||
|
||||
>>> np.emath.sqrt(complex(-4.0, 0.0))
|
||||
2j
|
||||
>>> np.emath.sqrt(complex(-4.0, -0.0))
|
||||
-2j
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.sqrt(x)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log(x):
|
||||
"""
|
||||
Compute the natural logarithm of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see `numpy.log`)
|
||||
of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
|
||||
returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
|
||||
complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose log is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
|
||||
(note, however, that otherwise `numpy.log` and this `log` are identical,
|
||||
i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
|
||||
notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.emath.log(np.exp(1))
|
||||
1.0
|
||||
|
||||
Negative arguments are handled "correctly" (recall that
|
||||
``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
|
||||
|
||||
>>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
|
||||
True
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log(x)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log10(x):
|
||||
"""
|
||||
Compute the logarithm base 10 of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
|
||||
is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
|
||||
returns ``inf``). Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose log base 10 is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log10
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
|
||||
(note, however, that otherwise `numpy.log10` and this `log10` are
|
||||
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
|
||||
and, notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
(We set the printing precision so the example can be auto-tested)
|
||||
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.log10(10**1)
|
||||
1.0
|
||||
|
||||
>>> np.emath.log10([-10**1, -10**2, 10**2])
|
||||
array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log10(x)
|
||||
|
||||
|
||||
def _logn_dispatcher(n, x):
|
||||
return (n, x,)
|
||||
|
||||
|
||||
@array_function_dispatch(_logn_dispatcher)
|
||||
def logn(n, x):
|
||||
"""
|
||||
Take log base n of x.
|
||||
|
||||
If `x` contains negative inputs, the answer is computed and returned in the
|
||||
complex domain.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : array_like
|
||||
The integer base(s) in which the log is taken.
|
||||
x : array_like
|
||||
The value(s) whose log base `n` is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base `n` of the `x` value(s). If `x` was a scalar, so is
|
||||
`out`, otherwise an array is returned.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.logn(2, [4, 8])
|
||||
array([2., 3.])
|
||||
>>> np.emath.logn(2, [-4, -8, 8])
|
||||
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
n = _fix_real_lt_zero(n)
|
||||
return nx.log(x)/nx.log(n)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def log2(x):
|
||||
"""
|
||||
Compute the logarithm base 2 of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
|
||||
a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
|
||||
``inf``). Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose log base 2 is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.log2
|
||||
|
||||
Notes
|
||||
-----
|
||||
For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
|
||||
(note, however, that otherwise `numpy.log2` and this `log2` are
|
||||
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
|
||||
and, notably, the complex principle value if ``x.imag != 0``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
We set the printing precision so the example can be auto-tested:
|
||||
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.log2(8)
|
||||
3.0
|
||||
>>> np.emath.log2([-4, -8, 8])
|
||||
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
return nx.log2(x)
|
||||
|
||||
|
||||
def _power_dispatcher(x, p):
|
||||
return (x, p)
|
||||
|
||||
|
||||
@array_function_dispatch(_power_dispatcher)
|
||||
def power(x, p):
|
||||
"""
|
||||
Return x to the power p, (x**p).
|
||||
|
||||
If `x` contains negative values, the output is converted to the
|
||||
complex domain.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The input value(s).
|
||||
p : array_like of ints
|
||||
The power(s) to which `x` is raised. If `x` contains multiple values,
|
||||
`p` has to either be a scalar, or contain the same number of values
|
||||
as `x`. In the latter case, the result is
|
||||
``x[0]**p[0], x[1]**p[1], ...``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
|
||||
otherwise an array is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.power
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.power([2, 4], 2)
|
||||
array([ 4, 16])
|
||||
>>> np.emath.power([2, 4], -2)
|
||||
array([0.25 , 0.0625])
|
||||
>>> np.emath.power([-2, 4], 2)
|
||||
array([ 4.-0.j, 16.+0.j])
|
||||
|
||||
"""
|
||||
x = _fix_real_lt_zero(x)
|
||||
p = _fix_int_lt_zero(p)
|
||||
return nx.power(x, p)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arccos(x):
|
||||
"""
|
||||
Compute the inverse cosine of x.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
|
||||
`abs(x) <= 1`, this is a real number in the closed interval
|
||||
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose arccos is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
|
||||
is `out`, otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arccos
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arccos() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``[-1,1]``, use `numpy.arccos`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.arccos(1) # a scalar is returned
|
||||
0.0
|
||||
|
||||
>>> np.emath.arccos([1,2])
|
||||
array([0.-0.j , 0.-1.317j])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arccos(x)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arcsin(x):
|
||||
"""
|
||||
Compute the inverse sine of x.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
|
||||
`abs(x) <= 1`, this is a real number in the closed interval
|
||||
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like or scalar
|
||||
The value(s) whose arcsin is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
|
||||
is `out`, otherwise an array object is returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arcsin
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arcsin() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``[-1,1]``, use `numpy.arcsin`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> np.emath.arcsin(0)
|
||||
0.0
|
||||
|
||||
>>> np.emath.arcsin([0,1])
|
||||
array([0. , 1.5708])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arcsin(x)
|
||||
|
||||
|
||||
@array_function_dispatch(_unary_dispatcher)
|
||||
def arctanh(x):
|
||||
"""
|
||||
Compute the inverse hyperbolic tangent of `x`.
|
||||
|
||||
Return the "principal value" (for a description of this, see
|
||||
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
|
||||
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
|
||||
complex, the result is complex. Finally, `x = 1` returns``inf`` and
|
||||
``x=-1`` returns ``-inf``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The value(s) whose arctanh is (are) required.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or scalar
|
||||
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
|
||||
a scalar so is `out`, otherwise an array is returned.
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
numpy.arctanh
|
||||
|
||||
Notes
|
||||
-----
|
||||
For an arctanh() that returns ``NAN`` when real `x` is not in the
|
||||
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
|
||||
return +/-inf for ``x = +/-1``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.set_printoptions(precision=4)
|
||||
|
||||
>>> from numpy.testing import suppress_warnings
|
||||
>>> with suppress_warnings() as sup:
|
||||
... sup.filter(RuntimeWarning)
|
||||
... np.emath.arctanh(np.eye(2))
|
||||
array([[inf, 0.],
|
||||
[ 0., inf]])
|
||||
>>> np.emath.arctanh([1j])
|
||||
array([0.+0.7854j])
|
||||
|
||||
"""
|
||||
x = _fix_real_abs_gt_1(x)
|
||||
return nx.arctanh(x)
|
94
teil20b/lib/python3.11/site-packages/numpy/lib/scimath.pyi
Normal file
94
teil20b/lib/python3.11/site-packages/numpy/lib/scimath.pyi
Normal file
@@ -0,0 +1,94 @@
|
||||
from typing import overload, Any
|
||||
|
||||
from numpy import complexfloating
|
||||
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ComplexLike_co,
|
||||
_FloatLike_co,
|
||||
)
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
@overload
|
||||
def sqrt(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def log(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def log10(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def log2(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def arccos(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def arcsin(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
|
||||
@overload
|
||||
def arctanh(x: _FloatLike_co) -> Any: ...
|
||||
@overload
|
||||
def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
|
||||
@overload
|
||||
def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
12
teil20b/lib/python3.11/site-packages/numpy/lib/setup.py
Normal file
12
teil20b/lib/python3.11/site-packages/numpy/lib/setup.py
Normal file
@@ -0,0 +1,12 @@
|
||||
def configuration(parent_package='',top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
|
||||
config = Configuration('lib', parent_package, top_path)
|
||||
config.add_subpackage('tests')
|
||||
config.add_data_dir('tests/data')
|
||||
config.add_data_files('*.pyi')
|
||||
return config
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(configuration=configuration)
|
1274
teil20b/lib/python3.11/site-packages/numpy/lib/shape_base.py
Normal file
1274
teil20b/lib/python3.11/site-packages/numpy/lib/shape_base.py
Normal file
File diff suppressed because it is too large
Load Diff
215
teil20b/lib/python3.11/site-packages/numpy/lib/shape_base.pyi
Normal file
215
teil20b/lib/python3.11/site-packages/numpy/lib/shape_base.pyi
Normal file
@@ -0,0 +1,215 @@
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import TypeVar, Any, overload, SupportsIndex, Protocol
|
||||
|
||||
from numpy import (
|
||||
generic,
|
||||
integer,
|
||||
ufunc,
|
||||
bool_,
|
||||
unsignedinteger,
|
||||
signedinteger,
|
||||
floating,
|
||||
complexfloating,
|
||||
object_,
|
||||
)
|
||||
|
||||
from numpy._typing import (
|
||||
ArrayLike,
|
||||
NDArray,
|
||||
_ShapeLike,
|
||||
_ArrayLike,
|
||||
_ArrayLikeBool_co,
|
||||
_ArrayLikeUInt_co,
|
||||
_ArrayLikeInt_co,
|
||||
_ArrayLikeFloat_co,
|
||||
_ArrayLikeComplex_co,
|
||||
_ArrayLikeObject_co,
|
||||
)
|
||||
|
||||
from numpy.core.shape_base import vstack
|
||||
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
|
||||
# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
|
||||
# give them unique names for the sake of clarity
|
||||
class _ArrayWrap(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
array: NDArray[Any],
|
||||
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
|
||||
/,
|
||||
) -> Any: ...
|
||||
|
||||
class _ArrayPrepare(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
array: NDArray[Any],
|
||||
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
|
||||
/,
|
||||
) -> Any: ...
|
||||
|
||||
class _SupportsArrayWrap(Protocol):
|
||||
@property
|
||||
def __array_wrap__(self) -> _ArrayWrap: ...
|
||||
|
||||
class _SupportsArrayPrepare(Protocol):
|
||||
@property
|
||||
def __array_prepare__(self) -> _ArrayPrepare: ...
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
row_stack = vstack
|
||||
|
||||
def take_along_axis(
|
||||
arr: _SCT | NDArray[_SCT],
|
||||
indices: NDArray[integer[Any]],
|
||||
axis: None | int,
|
||||
) -> NDArray[_SCT]: ...
|
||||
|
||||
def put_along_axis(
|
||||
arr: NDArray[_SCT],
|
||||
indices: NDArray[integer[Any]],
|
||||
values: ArrayLike,
|
||||
axis: None | int,
|
||||
) -> None: ...
|
||||
|
||||
# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
|
||||
# xref python/mypy#8645
|
||||
@overload
|
||||
def apply_along_axis(
|
||||
func1d: Callable[..., _ArrayLike[_SCT]],
|
||||
axis: SupportsIndex,
|
||||
arr: ArrayLike,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def apply_along_axis(
|
||||
func1d: Callable[..., ArrayLike],
|
||||
axis: SupportsIndex,
|
||||
arr: ArrayLike,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def apply_over_axes(
|
||||
func: Callable[[NDArray[Any], int], NDArray[_SCT]],
|
||||
a: ArrayLike,
|
||||
axes: int | Sequence[int],
|
||||
) -> NDArray[_SCT]: ...
|
||||
|
||||
@overload
|
||||
def expand_dims(
|
||||
a: _ArrayLike[_SCT],
|
||||
axis: _ShapeLike,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def expand_dims(
|
||||
a: ArrayLike,
|
||||
axis: _ShapeLike,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def array_split(
|
||||
ary: _ArrayLike[_SCT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[_SCT]]: ...
|
||||
@overload
|
||||
def array_split(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def split(
|
||||
ary: _ArrayLike[_SCT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[_SCT]]: ...
|
||||
@overload
|
||||
def split(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
axis: SupportsIndex = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def hsplit(
|
||||
ary: _ArrayLike[_SCT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_SCT]]: ...
|
||||
@overload
|
||||
def hsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def vsplit(
|
||||
ary: _ArrayLike[_SCT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_SCT]]: ...
|
||||
@overload
|
||||
def vsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def dsplit(
|
||||
ary: _ArrayLike[_SCT],
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[_SCT]]: ...
|
||||
@overload
|
||||
def dsplit(
|
||||
ary: ArrayLike,
|
||||
indices_or_sections: _ShapeLike,
|
||||
) -> list[NDArray[Any]]: ...
|
||||
|
||||
@overload
|
||||
def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
|
||||
@overload
|
||||
def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
|
||||
|
||||
@overload
|
||||
def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
|
||||
@overload
|
||||
def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
|
||||
|
||||
@overload
|
||||
def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
|
||||
@overload
|
||||
def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
||||
@overload
|
||||
def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
|
||||
@overload
|
||||
def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
|
||||
|
||||
@overload
|
||||
def tile(
|
||||
A: _ArrayLike[_SCT],
|
||||
reps: int | Sequence[int],
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def tile(
|
||||
A: ArrayLike,
|
||||
reps: int | Sequence[int],
|
||||
) -> NDArray[Any]: ...
|
547
teil20b/lib/python3.11/site-packages/numpy/lib/stride_tricks.py
Normal file
547
teil20b/lib/python3.11/site-packages/numpy/lib/stride_tricks.py
Normal file
@@ -0,0 +1,547 @@
|
||||
"""
|
||||
Utilities that manipulate strides to achieve desirable effects.
|
||||
|
||||
An explanation of strides can be found in the "ndarray.rst" file in the
|
||||
NumPy reference guide.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy.core.numeric import normalize_axis_tuple
|
||||
from numpy.core.overrides import array_function_dispatch, set_module
|
||||
|
||||
__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
|
||||
|
||||
|
||||
class DummyArray:
|
||||
"""Dummy object that just exists to hang __array_interface__ dictionaries
|
||||
and possibly keep alive a reference to a base array.
|
||||
"""
|
||||
|
||||
def __init__(self, interface, base=None):
|
||||
self.__array_interface__ = interface
|
||||
self.base = base
|
||||
|
||||
|
||||
def _maybe_view_as_subclass(original_array, new_array):
|
||||
if type(original_array) is not type(new_array):
|
||||
# if input was an ndarray subclass and subclasses were OK,
|
||||
# then view the result as that subclass.
|
||||
new_array = new_array.view(type=type(original_array))
|
||||
# Since we have done something akin to a view from original_array, we
|
||||
# should let the subclass finalize (if it has it implemented, i.e., is
|
||||
# not None).
|
||||
if new_array.__array_finalize__:
|
||||
new_array.__array_finalize__(original_array)
|
||||
return new_array
|
||||
|
||||
|
||||
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
|
||||
"""
|
||||
Create a view into the array with the given shape and strides.
|
||||
|
||||
.. warning:: This function has to be used with extreme care, see notes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
Array to create a new.
|
||||
shape : sequence of int, optional
|
||||
The shape of the new array. Defaults to ``x.shape``.
|
||||
strides : sequence of int, optional
|
||||
The strides of the new array. Defaults to ``x.strides``.
|
||||
subok : bool, optional
|
||||
.. versionadded:: 1.10
|
||||
|
||||
If True, subclasses are preserved.
|
||||
writeable : bool, optional
|
||||
.. versionadded:: 1.12
|
||||
|
||||
If set to False, the returned array will always be readonly.
|
||||
Otherwise it will be writable if the original array was. It
|
||||
is advisable to set this to False if possible (see Notes).
|
||||
|
||||
Returns
|
||||
-------
|
||||
view : ndarray
|
||||
|
||||
See also
|
||||
--------
|
||||
broadcast_to : broadcast an array to a given shape.
|
||||
reshape : reshape an array.
|
||||
lib.stride_tricks.sliding_window_view :
|
||||
userfriendly and safe function for the creation of sliding window views.
|
||||
|
||||
Notes
|
||||
-----
|
||||
``as_strided`` creates a view into the array given the exact strides
|
||||
and shape. This means it manipulates the internal data structure of
|
||||
ndarray and, if done incorrectly, the array elements can point to
|
||||
invalid memory and can corrupt results or crash your program.
|
||||
It is advisable to always use the original ``x.strides`` when
|
||||
calculating new strides to avoid reliance on a contiguous memory
|
||||
layout.
|
||||
|
||||
Furthermore, arrays created with this function often contain self
|
||||
overlapping memory, so that two elements are identical.
|
||||
Vectorized write operations on such arrays will typically be
|
||||
unpredictable. They may even give different results for small, large,
|
||||
or transposed arrays.
|
||||
|
||||
Since writing to these arrays has to be tested and done with great
|
||||
care, you may want to use ``writeable=False`` to avoid accidental write
|
||||
operations.
|
||||
|
||||
For these reasons it is advisable to avoid ``as_strided`` when
|
||||
possible.
|
||||
"""
|
||||
# first convert input to array, possibly keeping subclass
|
||||
x = np.array(x, copy=False, subok=subok)
|
||||
interface = dict(x.__array_interface__)
|
||||
if shape is not None:
|
||||
interface['shape'] = tuple(shape)
|
||||
if strides is not None:
|
||||
interface['strides'] = tuple(strides)
|
||||
|
||||
array = np.asarray(DummyArray(interface, base=x))
|
||||
# The route via `__interface__` does not preserve structured
|
||||
# dtypes. Since dtype should remain unchanged, we set it explicitly.
|
||||
array.dtype = x.dtype
|
||||
|
||||
view = _maybe_view_as_subclass(x, array)
|
||||
|
||||
if view.flags.writeable and not writeable:
|
||||
view.flags.writeable = False
|
||||
|
||||
return view
|
||||
|
||||
|
||||
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
|
||||
subok=None, writeable=None):
|
||||
return (x,)
|
||||
|
||||
|
||||
@array_function_dispatch(_sliding_window_view_dispatcher)
|
||||
def sliding_window_view(x, window_shape, axis=None, *,
|
||||
subok=False, writeable=False):
|
||||
"""
|
||||
Create a sliding window view into the array with the given window shape.
|
||||
|
||||
Also known as rolling or moving window, the window slides across all
|
||||
dimensions of the array and extracts subsets of the array at all window
|
||||
positions.
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Array to create the sliding window view from.
|
||||
window_shape : int or tuple of int
|
||||
Size of window over each axis that takes part in the sliding window.
|
||||
If `axis` is not present, must have same length as the number of input
|
||||
array dimensions. Single integers `i` are treated as if they were the
|
||||
tuple `(i,)`.
|
||||
axis : int or tuple of int, optional
|
||||
Axis or axes along which the sliding window is applied.
|
||||
By default, the sliding window is applied to all axes and
|
||||
`window_shape[i]` will refer to axis `i` of `x`.
|
||||
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
|
||||
the axis `axis[i]` of `x`.
|
||||
Single integers `i` are treated as if they were the tuple `(i,)`.
|
||||
subok : bool, optional
|
||||
If True, sub-classes will be passed-through, otherwise the returned
|
||||
array will be forced to be a base-class array (default).
|
||||
writeable : bool, optional
|
||||
When true, allow writing to the returned view. The default is false,
|
||||
as this should be used with caution: the returned view contains the
|
||||
same memory location multiple times, so writing to one location will
|
||||
cause others to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
view : ndarray
|
||||
Sliding window view of the array. The sliding window dimensions are
|
||||
inserted at the end, and the original dimensions are trimmed as
|
||||
required by the size of the sliding window.
|
||||
That is, ``view.shape = x_shape_trimmed + window_shape``, where
|
||||
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
|
||||
than the corresponding window size.
|
||||
|
||||
See Also
|
||||
--------
|
||||
lib.stride_tricks.as_strided: A lower-level and less safe routine for
|
||||
creating arbitrary views from custom shape and strides.
|
||||
broadcast_to: broadcast an array to a given shape.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For many applications using a sliding window view can be convenient, but
|
||||
potentially very slow. Often specialized solutions exist, for example:
|
||||
|
||||
- `scipy.signal.fftconvolve`
|
||||
|
||||
- filtering functions in `scipy.ndimage`
|
||||
|
||||
- moving window functions provided by
|
||||
`bottleneck <https://github.com/pydata/bottleneck>`_.
|
||||
|
||||
As a rough estimate, a sliding window approach with an input size of `N`
|
||||
and a window size of `W` will scale as `O(N*W)` where frequently a special
|
||||
algorithm can achieve `O(N)`. That means that the sliding window variant
|
||||
for a window size of 100 can be a 100 times slower than a more specialized
|
||||
version.
|
||||
|
||||
Nevertheless, for small window sizes, when no custom algorithm exists, or
|
||||
as a prototyping and developing tool, this function can be a good solution.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> x = np.arange(6)
|
||||
>>> x.shape
|
||||
(6,)
|
||||
>>> v = sliding_window_view(x, 3)
|
||||
>>> v.shape
|
||||
(4, 3)
|
||||
>>> v
|
||||
array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5]])
|
||||
|
||||
This also works in more dimensions, e.g.
|
||||
|
||||
>>> i, j = np.ogrid[:3, :4]
|
||||
>>> x = 10*i + j
|
||||
>>> x.shape
|
||||
(3, 4)
|
||||
>>> x
|
||||
array([[ 0, 1, 2, 3],
|
||||
[10, 11, 12, 13],
|
||||
[20, 21, 22, 23]])
|
||||
>>> shape = (2,2)
|
||||
>>> v = sliding_window_view(x, shape)
|
||||
>>> v.shape
|
||||
(2, 3, 2, 2)
|
||||
>>> v
|
||||
array([[[[ 0, 1],
|
||||
[10, 11]],
|
||||
[[ 1, 2],
|
||||
[11, 12]],
|
||||
[[ 2, 3],
|
||||
[12, 13]]],
|
||||
[[[10, 11],
|
||||
[20, 21]],
|
||||
[[11, 12],
|
||||
[21, 22]],
|
||||
[[12, 13],
|
||||
[22, 23]]]])
|
||||
|
||||
The axis can be specified explicitly:
|
||||
|
||||
>>> v = sliding_window_view(x, 3, 0)
|
||||
>>> v.shape
|
||||
(1, 4, 3)
|
||||
>>> v
|
||||
array([[[ 0, 10, 20],
|
||||
[ 1, 11, 21],
|
||||
[ 2, 12, 22],
|
||||
[ 3, 13, 23]]])
|
||||
|
||||
The same axis can be used several times. In that case, every use reduces
|
||||
the corresponding original dimension:
|
||||
|
||||
>>> v = sliding_window_view(x, (2, 3), (1, 1))
|
||||
>>> v.shape
|
||||
(3, 1, 2, 3)
|
||||
>>> v
|
||||
array([[[[ 0, 1, 2],
|
||||
[ 1, 2, 3]]],
|
||||
[[[10, 11, 12],
|
||||
[11, 12, 13]]],
|
||||
[[[20, 21, 22],
|
||||
[21, 22, 23]]]])
|
||||
|
||||
Combining with stepped slicing (`::step`), this can be used to take sliding
|
||||
views which skip elements:
|
||||
|
||||
>>> x = np.arange(7)
|
||||
>>> sliding_window_view(x, 5)[:, ::2]
|
||||
array([[0, 2, 4],
|
||||
[1, 3, 5],
|
||||
[2, 4, 6]])
|
||||
|
||||
or views which move by multiple elements
|
||||
|
||||
>>> x = np.arange(7)
|
||||
>>> sliding_window_view(x, 3)[::2, :]
|
||||
array([[0, 1, 2],
|
||||
[2, 3, 4],
|
||||
[4, 5, 6]])
|
||||
|
||||
A common application of `sliding_window_view` is the calculation of running
|
||||
statistics. The simplest example is the
|
||||
`moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
|
||||
|
||||
>>> x = np.arange(6)
|
||||
>>> x.shape
|
||||
(6,)
|
||||
>>> v = sliding_window_view(x, 3)
|
||||
>>> v.shape
|
||||
(4, 3)
|
||||
>>> v
|
||||
array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5]])
|
||||
>>> moving_average = v.mean(axis=-1)
|
||||
>>> moving_average
|
||||
array([1., 2., 3., 4.])
|
||||
|
||||
Note that a sliding window approach is often **not** optimal (see Notes).
|
||||
"""
|
||||
window_shape = (tuple(window_shape)
|
||||
if np.iterable(window_shape)
|
||||
else (window_shape,))
|
||||
# first convert input to array, possibly keeping subclass
|
||||
x = np.array(x, copy=False, subok=subok)
|
||||
|
||||
window_shape_array = np.array(window_shape)
|
||||
if np.any(window_shape_array < 0):
|
||||
raise ValueError('`window_shape` cannot contain negative values')
|
||||
|
||||
if axis is None:
|
||||
axis = tuple(range(x.ndim))
|
||||
if len(window_shape) != len(axis):
|
||||
raise ValueError(f'Since axis is `None`, must provide '
|
||||
f'window_shape for all dimensions of `x`; '
|
||||
f'got {len(window_shape)} window_shape elements '
|
||||
f'and `x.ndim` is {x.ndim}.')
|
||||
else:
|
||||
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
|
||||
if len(window_shape) != len(axis):
|
||||
raise ValueError(f'Must provide matching length window_shape and '
|
||||
f'axis; got {len(window_shape)} window_shape '
|
||||
f'elements and {len(axis)} axes elements.')
|
||||
|
||||
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
|
||||
|
||||
# note: same axis can be windowed repeatedly
|
||||
x_shape_trimmed = list(x.shape)
|
||||
for ax, dim in zip(axis, window_shape):
|
||||
if x_shape_trimmed[ax] < dim:
|
||||
raise ValueError(
|
||||
'window shape cannot be larger than input array shape')
|
||||
x_shape_trimmed[ax] -= dim - 1
|
||||
out_shape = tuple(x_shape_trimmed) + window_shape
|
||||
return as_strided(x, strides=out_strides, shape=out_shape,
|
||||
subok=subok, writeable=writeable)
|
||||
|
||||
|
||||
def _broadcast_to(array, shape, subok, readonly):
|
||||
shape = tuple(shape) if np.iterable(shape) else (shape,)
|
||||
array = np.array(array, copy=False, subok=subok)
|
||||
if not shape and array.shape:
|
||||
raise ValueError('cannot broadcast a non-scalar to a scalar array')
|
||||
if any(size < 0 for size in shape):
|
||||
raise ValueError('all elements of broadcast shape must be non-'
|
||||
'negative')
|
||||
extras = []
|
||||
it = np.nditer(
|
||||
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
|
||||
op_flags=['readonly'], itershape=shape, order='C')
|
||||
with it:
|
||||
# never really has writebackifcopy semantics
|
||||
broadcast = it.itviews[0]
|
||||
result = _maybe_view_as_subclass(array, broadcast)
|
||||
# In a future version this will go away
|
||||
if not readonly and array.flags._writeable_no_warn:
|
||||
result.flags.writeable = True
|
||||
result.flags._warn_on_write = True
|
||||
return result
|
||||
|
||||
|
||||
def _broadcast_to_dispatcher(array, shape, subok=None):
|
||||
return (array,)
|
||||
|
||||
|
||||
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
|
||||
def broadcast_to(array, shape, subok=False):
|
||||
"""Broadcast an array to a new shape.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : array_like
|
||||
The array to broadcast.
|
||||
shape : tuple or int
|
||||
The shape of the desired array. A single integer ``i`` is interpreted
|
||||
as ``(i,)``.
|
||||
subok : bool, optional
|
||||
If True, then sub-classes will be passed-through, otherwise
|
||||
the returned array will be forced to be a base-class array (default).
|
||||
|
||||
Returns
|
||||
-------
|
||||
broadcast : array
|
||||
A readonly view on the original array with the given shape. It is
|
||||
typically not contiguous. Furthermore, more than one element of a
|
||||
broadcasted array may refer to a single memory location.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the array is not compatible with the new shape according to NumPy's
|
||||
broadcasting rules.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_arrays
|
||||
broadcast_shapes
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 1.10.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> x = np.array([1, 2, 3])
|
||||
>>> np.broadcast_to(x, (3, 3))
|
||||
array([[1, 2, 3],
|
||||
[1, 2, 3],
|
||||
[1, 2, 3]])
|
||||
"""
|
||||
return _broadcast_to(array, shape, subok=subok, readonly=True)
|
||||
|
||||
|
||||
def _broadcast_shape(*args):
|
||||
"""Returns the shape of the arrays that would result from broadcasting the
|
||||
supplied arrays against each other.
|
||||
"""
|
||||
# use the old-iterator because np.nditer does not handle size 0 arrays
|
||||
# consistently
|
||||
b = np.broadcast(*args[:32])
|
||||
# unfortunately, it cannot handle 32 or more arguments directly
|
||||
for pos in range(32, len(args), 31):
|
||||
# ironically, np.broadcast does not properly handle np.broadcast
|
||||
# objects (it treats them as scalars)
|
||||
# use broadcasting to avoid allocating the full array
|
||||
b = broadcast_to(0, b.shape)
|
||||
b = np.broadcast(b, *args[pos:(pos + 31)])
|
||||
return b.shape
|
||||
|
||||
|
||||
@set_module('numpy')
|
||||
def broadcast_shapes(*args):
|
||||
"""
|
||||
Broadcast the input shapes into a single shape.
|
||||
|
||||
:ref:`Learn more about broadcasting here <basics.broadcasting>`.
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Parameters
|
||||
----------
|
||||
`*args` : tuples of ints, or ints
|
||||
The shapes to be broadcast against each other.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
Broadcasted shape.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If the shapes are not compatible and cannot be broadcast according
|
||||
to NumPy's broadcasting rules.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_arrays
|
||||
broadcast_to
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
|
||||
(3, 2)
|
||||
|
||||
>>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
|
||||
(5, 6, 7)
|
||||
"""
|
||||
arrays = [np.empty(x, dtype=[]) for x in args]
|
||||
return _broadcast_shape(*arrays)
|
||||
|
||||
|
||||
def _broadcast_arrays_dispatcher(*args, subok=None):
|
||||
return args
|
||||
|
||||
|
||||
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
|
||||
def broadcast_arrays(*args, subok=False):
|
||||
"""
|
||||
Broadcast any number of arrays against each other.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
`*args` : array_likes
|
||||
The arrays to broadcast.
|
||||
|
||||
subok : bool, optional
|
||||
If True, then sub-classes will be passed-through, otherwise
|
||||
the returned arrays will be forced to be a base-class array (default).
|
||||
|
||||
Returns
|
||||
-------
|
||||
broadcasted : list of arrays
|
||||
These arrays are views on the original arrays. They are typically
|
||||
not contiguous. Furthermore, more than one element of a
|
||||
broadcasted array may refer to a single memory location. If you need
|
||||
to write to the arrays, make copies first. While you can set the
|
||||
``writable`` flag True, writing to a single output value may end up
|
||||
changing more than one location in the output array.
|
||||
|
||||
.. deprecated:: 1.17
|
||||
The output is currently marked so that if written to, a deprecation
|
||||
warning will be emitted. A future version will set the
|
||||
``writable`` flag False so writing to it will raise an error.
|
||||
|
||||
See Also
|
||||
--------
|
||||
broadcast
|
||||
broadcast_to
|
||||
broadcast_shapes
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> x = np.array([[1,2,3]])
|
||||
>>> y = np.array([[4],[5]])
|
||||
>>> np.broadcast_arrays(x, y)
|
||||
[array([[1, 2, 3],
|
||||
[1, 2, 3]]), array([[4, 4, 4],
|
||||
[5, 5, 5]])]
|
||||
|
||||
Here is a useful idiom for getting contiguous copies instead of
|
||||
non-contiguous views.
|
||||
|
||||
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
|
||||
[array([[1, 2, 3],
|
||||
[1, 2, 3]]), array([[4, 4, 4],
|
||||
[5, 5, 5]])]
|
||||
|
||||
"""
|
||||
# nditer is not used here to avoid the limit of 32 arrays.
|
||||
# Otherwise, something like the following one-liner would suffice:
|
||||
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
|
||||
# order='C').itviews
|
||||
|
||||
args = [np.array(_m, copy=False, subok=subok) for _m in args]
|
||||
|
||||
shape = _broadcast_shape(*args)
|
||||
|
||||
if all(array.shape == shape for array in args):
|
||||
# Common case where nothing needs to be broadcasted.
|
||||
return args
|
||||
|
||||
return [_broadcast_to(array, shape, subok=subok, readonly=False)
|
||||
for array in args]
|
@@ -0,0 +1,80 @@
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, TypeVar, overload, SupportsIndex
|
||||
|
||||
from numpy import generic
|
||||
from numpy._typing import (
|
||||
NDArray,
|
||||
ArrayLike,
|
||||
_ShapeLike,
|
||||
_Shape,
|
||||
_ArrayLike
|
||||
)
|
||||
|
||||
_SCT = TypeVar("_SCT", bound=generic)
|
||||
|
||||
__all__: list[str]
|
||||
|
||||
class DummyArray:
|
||||
__array_interface__: dict[str, Any]
|
||||
base: None | NDArray[Any]
|
||||
def __init__(
|
||||
self,
|
||||
interface: dict[str, Any],
|
||||
base: None | NDArray[Any] = ...,
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def as_strided(
|
||||
x: _ArrayLike[_SCT],
|
||||
shape: None | Iterable[int] = ...,
|
||||
strides: None | Iterable[int] = ...,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def as_strided(
|
||||
x: ArrayLike,
|
||||
shape: None | Iterable[int] = ...,
|
||||
strides: None | Iterable[int] = ...,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def sliding_window_view(
|
||||
x: _ArrayLike[_SCT],
|
||||
window_shape: int | Iterable[int],
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def sliding_window_view(
|
||||
x: ArrayLike,
|
||||
window_shape: int | Iterable[int],
|
||||
axis: None | SupportsIndex = ...,
|
||||
*,
|
||||
subok: bool = ...,
|
||||
writeable: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
@overload
|
||||
def broadcast_to(
|
||||
array: _ArrayLike[_SCT],
|
||||
shape: int | Iterable[int],
|
||||
subok: bool = ...,
|
||||
) -> NDArray[_SCT]: ...
|
||||
@overload
|
||||
def broadcast_to(
|
||||
array: ArrayLike,
|
||||
shape: int | Iterable[int],
|
||||
subok: bool = ...,
|
||||
) -> NDArray[Any]: ...
|
||||
|
||||
def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
|
||||
|
||||
def broadcast_arrays(
|
||||
*args: ArrayLike,
|
||||
subok: bool = ...,
|
||||
) -> list[NDArray[Any]]: ...
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,350 @@
|
||||
import os
|
||||
import pytest
|
||||
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
|
||||
from shutil import rmtree
|
||||
|
||||
import numpy.lib._datasource as datasource
|
||||
from numpy.testing import assert_, assert_equal, assert_raises
|
||||
|
||||
import urllib.request as urllib_request
|
||||
from urllib.parse import urlparse
|
||||
from urllib.error import URLError
|
||||
|
||||
|
||||
def urlopen_stub(url, data=None):
|
||||
'''Stub to replace urlopen for testing.'''
|
||||
if url == valid_httpurl():
|
||||
tmpfile = NamedTemporaryFile(prefix='urltmp_')
|
||||
return tmpfile
|
||||
else:
|
||||
raise URLError('Name or service not known')
|
||||
|
||||
# setup and teardown
|
||||
old_urlopen = None
|
||||
|
||||
|
||||
def setup_module():
|
||||
global old_urlopen
|
||||
|
||||
old_urlopen = urllib_request.urlopen
|
||||
urllib_request.urlopen = urlopen_stub
|
||||
|
||||
|
||||
def teardown_module():
|
||||
urllib_request.urlopen = old_urlopen
|
||||
|
||||
# A valid website for more robust testing
|
||||
http_path = 'http://www.google.com/'
|
||||
http_file = 'index.html'
|
||||
|
||||
http_fakepath = 'http://fake.abc.web/site/'
|
||||
http_fakefile = 'fake.txt'
|
||||
|
||||
malicious_files = ['/etc/shadow', '../../shadow',
|
||||
'..\\system.dat', 'c:\\windows\\system.dat']
|
||||
|
||||
magic_line = b'three is the magic number'
|
||||
|
||||
|
||||
# Utility functions used by many tests
|
||||
def valid_textfile(filedir):
|
||||
# Generate and return a valid temporary file.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
|
||||
os.close(fd)
|
||||
return path
|
||||
|
||||
|
||||
def invalid_textfile(filedir):
|
||||
# Generate and return an invalid filename.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
|
||||
os.close(fd)
|
||||
os.remove(path)
|
||||
return path
|
||||
|
||||
|
||||
def valid_httpurl():
|
||||
return http_path+http_file
|
||||
|
||||
|
||||
def invalid_httpurl():
|
||||
return http_fakepath+http_fakefile
|
||||
|
||||
|
||||
def valid_baseurl():
|
||||
return http_path
|
||||
|
||||
|
||||
def invalid_baseurl():
|
||||
return http_fakepath
|
||||
|
||||
|
||||
def valid_httpfile():
|
||||
return http_file
|
||||
|
||||
|
||||
def invalid_httpfile():
|
||||
return http_fakefile
|
||||
|
||||
|
||||
class TestDataSourceOpen:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
fh = self.ds.open(valid_httpurl())
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
url = invalid_httpurl()
|
||||
assert_raises(OSError, self.ds.open, url)
|
||||
try:
|
||||
self.ds.open(url)
|
||||
except OSError as e:
|
||||
# Regression test for bug fixed in r4342.
|
||||
assert_(e.errno is None)
|
||||
|
||||
def test_InvalidHTTPCacheURLError(self):
|
||||
assert_raises(URLError, self.ds._cache, invalid_httpurl())
|
||||
|
||||
def test_ValidFile(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
fh = self.ds.open(local_file)
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalid_file = invalid_textfile(self.tmpdir)
|
||||
assert_raises(OSError, self.ds.open, invalid_file)
|
||||
|
||||
def test_ValidGzipFile(self):
|
||||
try:
|
||||
import gzip
|
||||
except ImportError:
|
||||
# We don't have the gzip capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for Gzip files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
|
||||
fp = gzip.open(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
def test_ValidBz2File(self):
|
||||
try:
|
||||
import bz2
|
||||
except ImportError:
|
||||
# We don't have the bz2 capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for BZip2 files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
|
||||
fp = bz2.BZ2File(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
|
||||
class TestDataSourceExists:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
assert_(self.ds.exists(valid_httpurl()))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
assert_equal(self.ds.exists(invalid_httpurl()), False)
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Test valid file in destpath
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
# Test valid local file not in destpath
|
||||
localdir = mkdtemp()
|
||||
tmpfile = valid_textfile(localdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
rmtree(localdir)
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.ds.exists(tmpfile), False)
|
||||
|
||||
|
||||
class TestDataSourceAbspath:
|
||||
def setup_method(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_equal(local_path, self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_ValidFile(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfile))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
|
||||
invalidhttp = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalidfile = valid_textfile(self.tmpdir)
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_(invalidfile != self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_(invalidfile != self.ds.abspath(tmpfile))
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
|
||||
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
|
||||
|
||||
assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfile).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_ValidFile()
|
||||
self.test_InvalidHTTP()
|
||||
self.test_InvalidFile()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryAbspath:
|
||||
def setup_method(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.repos._destpath, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
filepath = self.repos.abspath(valid_httpfile())
|
||||
assert_equal(local_path, filepath)
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
|
||||
assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryExists:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Create local temp file
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.repos.exists(tmpfile), False)
|
||||
|
||||
def test_RemoveHTTPFile(self):
|
||||
assert_(self.repos.exists(valid_httpurl()))
|
||||
|
||||
def test_CachedHTTPFile(self):
|
||||
localfile = valid_httpurl()
|
||||
# Create a locally cached temp file with an URL based
|
||||
# directory structure. This is similar to what Repository.open
|
||||
# would do.
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
|
||||
local_path = os.path.join(self.repos._destpath, netloc)
|
||||
os.mkdir(local_path, 0o0700)
|
||||
tmpfile = valid_textfile(local_path)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
|
||||
class TestOpenFunc:
|
||||
def setup_method(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
|
||||
def teardown_method(self):
|
||||
rmtree(self.tmpdir)
|
||||
|
||||
def test_DataSourceOpen(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
# Test case where destpath is passed in
|
||||
fp = datasource.open(local_file, destpath=self.tmpdir)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
# Test case where default destpath is used
|
||||
fp = datasource.open(local_file)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
|
||||
def test_del_attr_handling():
|
||||
# DataSource __del__ can be called
|
||||
# even if __init__ fails when the
|
||||
# Exception object is caught by the
|
||||
# caller as happens in refguide_check
|
||||
# is_deprecated() function
|
||||
|
||||
ds = datasource.DataSource()
|
||||
# simulate failed __init__ by removing key attribute
|
||||
# produced within __init__ and expected by __del__
|
||||
del ds._istmpdest
|
||||
# should not raise an AttributeError if __del__
|
||||
# gracefully handles failed __init__:
|
||||
ds.__del__()
|
@@ -0,0 +1,353 @@
|
||||
import time
|
||||
from datetime import date
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_allclose, assert_raises,
|
||||
)
|
||||
from numpy.lib._iotools import (
|
||||
LineSplitter, NameValidator, StringConverter,
|
||||
has_nested_fields, easy_dtype, flatten_dtype
|
||||
)
|
||||
|
||||
|
||||
class TestLineSplitter:
|
||||
"Tests the LineSplitter class."
|
||||
|
||||
def test_no_delimiter(self):
|
||||
"Test LineSplitter w/o delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter()(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
test = LineSplitter('')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
|
||||
def test_space_delimiter(self):
|
||||
"Test space delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1 2 3 4', '5'])
|
||||
|
||||
def test_tab_delimiter(self):
|
||||
"Test tab delimiter"
|
||||
strg = " 1\t 2\t 3\t 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5 6'])
|
||||
strg = " 1 2\t 3 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1 2', '3 4', '5 6'])
|
||||
|
||||
def test_other_delimiter(self):
|
||||
"Test LineSplitter on delimiter"
|
||||
strg = "1,2,3,4,,5"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
#
|
||||
strg = " 1,2,3,4,,5 # test"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
# gh-11028 bytes comment/delimiters should get encoded
|
||||
strg = b" 1,2,3,4,,5 % test"
|
||||
test = LineSplitter(delimiter=b',', comments=b'%')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
def test_constant_fixed_width(self):
|
||||
"Test LineSplitter w/ fixed-width fields"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(3)(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(20)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(30)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
|
||||
def test_variable_fixed_width(self):
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((3, 6, 6, 3))(strg)
|
||||
assert_equal(test, ['1', '3', '4 5', '6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((6, 6, 9))(strg)
|
||||
assert_equal(test, ['1', '3 4', '5 6'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNameValidator:
|
||||
|
||||
def test_case_sensitivity(self):
|
||||
"Test case sensitivity"
|
||||
names = ['A', 'a', 'b', 'c']
|
||||
test = NameValidator().validate(names)
|
||||
assert_equal(test, ['A', 'a', 'b', 'c'])
|
||||
test = NameValidator(case_sensitive=False).validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='upper').validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='lower').validate(names)
|
||||
assert_equal(test, ['a', 'a_1', 'b', 'c'])
|
||||
|
||||
# check exceptions
|
||||
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
|
||||
|
||||
def test_excludelist(self):
|
||||
"Test excludelist"
|
||||
names = ['dates', 'data', 'Other Data', 'mask']
|
||||
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
|
||||
test = validator.validate(names)
|
||||
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
|
||||
|
||||
def test_missing_names(self):
|
||||
"Test validate missing names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist), ['a', 'b', 'c'])
|
||||
namelist = ('', 'b', 'c')
|
||||
assert_equal(validator(namelist), ['f0', 'b', 'c'])
|
||||
namelist = ('a', 'b', '')
|
||||
assert_equal(validator(namelist), ['a', 'b', 'f0'])
|
||||
namelist = ('', 'f0', '')
|
||||
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
|
||||
|
||||
def test_validate_nb_names(self):
|
||||
"Test validate nb names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist, nbfields=1), ('a',))
|
||||
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
|
||||
['a', 'b', 'c', 'g0', 'g1'])
|
||||
|
||||
def test_validate_wo_names(self):
|
||||
"Test validate no names"
|
||||
namelist = None
|
||||
validator = NameValidator()
|
||||
assert_(validator(namelist) is None)
|
||||
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bytes_to_date(s):
|
||||
return date(*time.strptime(s, "%Y-%m-%d")[:3])
|
||||
|
||||
|
||||
class TestStringConverter:
|
||||
"Test StringConverter"
|
||||
|
||||
def test_creation(self):
|
||||
"Test creation of a StringConverter"
|
||||
converter = StringConverter(int, -99999)
|
||||
assert_equal(converter._status, 1)
|
||||
assert_equal(converter.default, -99999)
|
||||
|
||||
def test_upgrade(self):
|
||||
"Tests the upgrade method."
|
||||
|
||||
converter = StringConverter()
|
||||
assert_equal(converter._status, 0)
|
||||
|
||||
# test int
|
||||
assert_equal(converter.upgrade('0'), 0)
|
||||
assert_equal(converter._status, 1)
|
||||
|
||||
# On systems where long defaults to 32-bit, the statuses will be
|
||||
# offset by one, so we check for this here.
|
||||
import numpy.core.numeric as nx
|
||||
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
|
||||
|
||||
# test int > 2**32
|
||||
assert_equal(converter.upgrade('17179869184'), 17179869184)
|
||||
assert_equal(converter._status, 1 + status_offset)
|
||||
|
||||
# test float
|
||||
assert_allclose(converter.upgrade('0.'), 0.0)
|
||||
assert_equal(converter._status, 2 + status_offset)
|
||||
|
||||
# test complex
|
||||
assert_equal(converter.upgrade('0j'), complex('0j'))
|
||||
assert_equal(converter._status, 3 + status_offset)
|
||||
|
||||
# test str
|
||||
# note that the longdouble type has been skipped, so the
|
||||
# _status increases by 2. Everything should succeed with
|
||||
# unicode conversion (8).
|
||||
for s in ['a', b'a']:
|
||||
res = converter.upgrade(s)
|
||||
assert_(type(res) is str)
|
||||
assert_equal(res, 'a')
|
||||
assert_equal(converter._status, 8 + status_offset)
|
||||
|
||||
def test_missing(self):
|
||||
"Tests the use of missing values."
|
||||
converter = StringConverter(missing_values=('missing',
|
||||
'missed'))
|
||||
converter.upgrade('0')
|
||||
assert_equal(converter('0'), 0)
|
||||
assert_equal(converter(''), converter.default)
|
||||
assert_equal(converter('missing'), converter.default)
|
||||
assert_equal(converter('missed'), converter.default)
|
||||
try:
|
||||
converter('miss')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_upgrademapper(self):
|
||||
"Tests updatemapper"
|
||||
dateparser = _bytes_to_date
|
||||
_original_mapper = StringConverter._mapper[:]
|
||||
try:
|
||||
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
|
||||
convert = StringConverter(dateparser, date(2000, 1, 1))
|
||||
test = convert('2001-01-01')
|
||||
assert_equal(test, date(2001, 1, 1))
|
||||
test = convert('2009-01-01')
|
||||
assert_equal(test, date(2009, 1, 1))
|
||||
test = convert('')
|
||||
assert_equal(test, date(2000, 1, 1))
|
||||
finally:
|
||||
StringConverter._mapper = _original_mapper
|
||||
|
||||
def test_string_to_object(self):
|
||||
"Make sure that string-to-object functions are properly recognized"
|
||||
old_mapper = StringConverter._mapper[:] # copy of list
|
||||
conv = StringConverter(_bytes_to_date)
|
||||
assert_equal(conv._mapper, old_mapper)
|
||||
assert_(hasattr(conv, 'default'))
|
||||
|
||||
def test_keep_default(self):
|
||||
"Make sure we don't lose an explicit default"
|
||||
converter = StringConverter(None, missing_values='',
|
||||
default=-999)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, -999)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
#
|
||||
converter = StringConverter(
|
||||
None, missing_values='', default=0)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, 0)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
|
||||
def test_keep_default_zero(self):
|
||||
"Check that we don't lose a default of 0"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(converter.default, 0)
|
||||
|
||||
def test_keep_missing_values(self):
|
||||
"Check that we're not losing missing values"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(
|
||||
converter.missing_values, {'', 'N/A'})
|
||||
|
||||
def test_int64_dtype(self):
|
||||
"Check that int64 integer types can be specified"
|
||||
converter = StringConverter(np.int64, default=0)
|
||||
val = "-9223372036854775807"
|
||||
assert_(converter(val) == -9223372036854775807)
|
||||
val = "9223372036854775807"
|
||||
assert_(converter(val) == 9223372036854775807)
|
||||
|
||||
def test_uint64_dtype(self):
|
||||
"Check that uint64 integer types can be specified"
|
||||
converter = StringConverter(np.uint64, default=0)
|
||||
val = "9223372043271415339"
|
||||
assert_(converter(val) == 9223372043271415339)
|
||||
|
||||
|
||||
class TestMiscFunctions:
|
||||
|
||||
def test_has_nested_dtype(self):
|
||||
"Test has_nested_dtype"
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
||||
assert_equal(has_nested_fields(ndtype), True)
|
||||
|
||||
def test_easy_dtype(self):
|
||||
"Test ndtype on dtypes"
|
||||
# Simple case
|
||||
ndtype = float
|
||||
assert_equal(easy_dtype(ndtype), np.dtype(float))
|
||||
# As string w/o names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', "i4"), ('f1', "f8")]))
|
||||
# As string w/o names but different default format
|
||||
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
|
||||
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
|
||||
# As string w/ names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (too many)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (not enough)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names=", b"),
|
||||
np.dtype([('f0', "i4"), ('b', "f8")]))
|
||||
# ... (with different default format)
|
||||
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
|
||||
np.dtype([('a', "i4"), ('f00', "f8")]))
|
||||
# As list of tuples w/o names
|
||||
ndtype = [('A', int), ('B', float)]
|
||||
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
|
||||
# As list of tuples w/ names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of tuples w/ not enough names
|
||||
assert_equal(easy_dtype(ndtype, names="a"),
|
||||
np.dtype([('a', int), ('f0', float)]))
|
||||
# As list of tuples w/ too many names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b,c"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of types w/o names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
|
||||
# As list of types w names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', int), ('b', float), ('c', float)]))
|
||||
# As simple dtype w/ names
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
|
||||
# As simple dtype w/o names (but multiple fields)
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(
|
||||
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
|
||||
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
|
||||
|
||||
def test_flatten_dtype(self):
|
||||
"Testing flatten_dtype"
|
||||
# Standard dtype
|
||||
dt = np.dtype([("a", "f8"), ("b", "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
||||
# Recursive dtype
|
||||
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
|
||||
# dtype with shaped fields
|
||||
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, int])
|
||||
dt_flat = flatten_dtype(dt, True)
|
||||
assert_equal(dt_flat, [float] * 2 + [int] * 3)
|
||||
# dtype w/ titles
|
||||
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
@@ -0,0 +1,64 @@
|
||||
"""Tests for the NumpyVersion class.
|
||||
|
||||
"""
|
||||
from numpy.testing import assert_, assert_raises
|
||||
from numpy.lib import NumpyVersion
|
||||
|
||||
|
||||
def test_main_versions():
|
||||
assert_(NumpyVersion('1.8.0') == '1.8.0')
|
||||
for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
|
||||
assert_(NumpyVersion('1.8.0') < ver)
|
||||
|
||||
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
||||
assert_(NumpyVersion('1.8.0') > ver)
|
||||
|
||||
|
||||
def test_version_1_point_10():
|
||||
# regression test for gh-2998.
|
||||
assert_(NumpyVersion('1.9.0') < '1.10.0')
|
||||
assert_(NumpyVersion('1.11.0') < '1.11.1')
|
||||
assert_(NumpyVersion('1.11.0') == '1.11.0')
|
||||
assert_(NumpyVersion('1.99.11') < '1.99.12')
|
||||
|
||||
|
||||
def test_alpha_beta_rc():
|
||||
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
|
||||
for ver in ['1.8.0', '1.8.0rc2']:
|
||||
assert_(NumpyVersion('1.8.0rc1') < ver)
|
||||
|
||||
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
||||
assert_(NumpyVersion('1.8.0rc1') > ver)
|
||||
|
||||
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
|
||||
|
||||
|
||||
def test_dev_version():
|
||||
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
|
||||
|
||||
|
||||
def test_dev_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_dev0_version():
|
||||
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
|
||||
|
||||
|
||||
def test_dev0_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_raises():
|
||||
for ver in ['1.9', '1,9.0', '1.7.x']:
|
||||
assert_raises(ValueError, NumpyVersion, ver)
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,944 @@
|
||||
"""Test functions for 1D array set operations.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import (assert_array_equal, assert_equal,
|
||||
assert_raises, assert_raises_regex)
|
||||
from numpy.lib.arraysetops import (
|
||||
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestSetOps:
|
||||
|
||||
def test_intersect1d(self):
|
||||
# unique inputs
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5])
|
||||
|
||||
ec = np.array([1, 2, 5])
|
||||
c = intersect1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
# non-unique inputs
|
||||
a = np.array([5, 5, 7, 1, 2])
|
||||
b = np.array([2, 1, 4, 3, 3, 1, 5])
|
||||
|
||||
ed = np.array([1, 2, 5])
|
||||
c = intersect1d(a, b)
|
||||
assert_array_equal(c, ed)
|
||||
assert_array_equal([], intersect1d([], []))
|
||||
|
||||
def test_intersect1d_array_like(self):
|
||||
# See gh-11772
|
||||
class Test:
|
||||
def __array__(self):
|
||||
return np.arange(3)
|
||||
|
||||
a = Test()
|
||||
res = intersect1d(a, a)
|
||||
assert_array_equal(res, a)
|
||||
res = intersect1d([1, 2, 3], [1, 2, 3])
|
||||
assert_array_equal(res, [1, 2, 3])
|
||||
|
||||
def test_intersect1d_indices(self):
|
||||
# unique inputs
|
||||
a = np.array([1, 2, 3, 4])
|
||||
b = np.array([2, 1, 4, 6])
|
||||
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
||||
ee = np.array([1, 2, 4])
|
||||
assert_array_equal(c, ee)
|
||||
assert_array_equal(a[i1], ee)
|
||||
assert_array_equal(b[i2], ee)
|
||||
|
||||
# non-unique inputs
|
||||
a = np.array([1, 2, 2, 3, 4, 3, 2])
|
||||
b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
|
||||
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
||||
ef = np.array([1, 2, 3, 4])
|
||||
assert_array_equal(c, ef)
|
||||
assert_array_equal(a[i1], ef)
|
||||
assert_array_equal(b[i2], ef)
|
||||
|
||||
# non1d, unique inputs
|
||||
a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
|
||||
b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
|
||||
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
||||
ui1 = np.unravel_index(i1, a.shape)
|
||||
ui2 = np.unravel_index(i2, b.shape)
|
||||
ea = np.array([2, 6, 7, 8])
|
||||
assert_array_equal(ea, a[ui1])
|
||||
assert_array_equal(ea, b[ui2])
|
||||
|
||||
# non1d, not assumed to be uniqueinputs
|
||||
a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
|
||||
b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
|
||||
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
||||
ui1 = np.unravel_index(i1, a.shape)
|
||||
ui2 = np.unravel_index(i2, b.shape)
|
||||
ea = np.array([2, 7, 8])
|
||||
assert_array_equal(ea, a[ui1])
|
||||
assert_array_equal(ea, b[ui2])
|
||||
|
||||
def test_setxor1d(self):
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5])
|
||||
|
||||
ec = np.array([3, 4, 7])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([1, 2, 3])
|
||||
b = np.array([6, 5, 4])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 6])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([1, 8, 2, 3])
|
||||
b = np.array([6, 5, 4, 8])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 6])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
assert_array_equal([], setxor1d([], []))
|
||||
|
||||
def test_ediff1d(self):
|
||||
zero_elem = np.array([])
|
||||
one_elem = np.array([1])
|
||||
two_elem = np.array([1, 2])
|
||||
|
||||
assert_array_equal([], ediff1d(zero_elem))
|
||||
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
|
||||
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
|
||||
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
|
||||
assert_array_equal([], ediff1d(one_elem))
|
||||
assert_array_equal([1], ediff1d(two_elem))
|
||||
assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
|
||||
assert_array_equal([5, 6, 1, 7, 8],
|
||||
ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
|
||||
assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
|
||||
assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
|
||||
assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
|
||||
assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
|
||||
|
||||
@pytest.mark.parametrize("ary, prepend, append, expected", [
|
||||
# should fail because trying to cast
|
||||
# np.nan standard floating point value
|
||||
# into an integer array:
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
None,
|
||||
np.nan,
|
||||
'to_end'),
|
||||
# should fail because attempting
|
||||
# to downcast to int type:
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
np.array([5, 7, 2], dtype=np.float32),
|
||||
None,
|
||||
'to_begin'),
|
||||
# should fail because attempting to cast
|
||||
# two special floating point values
|
||||
# to integers (on both sides of ary),
|
||||
# `to_begin` is in the error message as the impl checks this first:
|
||||
(np.array([1., 3., 9.], dtype=np.int8),
|
||||
np.nan,
|
||||
np.nan,
|
||||
'to_begin'),
|
||||
])
|
||||
def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
|
||||
# verify resolution of gh-11490
|
||||
|
||||
# specifically, raise an appropriate
|
||||
# Exception when attempting to append or
|
||||
# prepend with an incompatible type
|
||||
msg = 'dtype of `{}` must be compatible'.format(expected)
|
||||
with assert_raises_regex(TypeError, msg):
|
||||
ediff1d(ary=ary,
|
||||
to_end=append,
|
||||
to_begin=prepend)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ary,prepend,append,expected",
|
||||
[
|
||||
(np.array([1, 2, 3], dtype=np.int16),
|
||||
2**16, # will be cast to int16 under same kind rule.
|
||||
2**16 + 4,
|
||||
np.array([0, 1, 1, 4], dtype=np.int16)),
|
||||
(np.array([1, 2, 3], dtype=np.float32),
|
||||
np.array([5], dtype=np.float64),
|
||||
None,
|
||||
np.array([5, 1, 1], dtype=np.float32)),
|
||||
(np.array([1, 2, 3], dtype=np.int32),
|
||||
0,
|
||||
0,
|
||||
np.array([0, 1, 1, 0], dtype=np.int32)),
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
3,
|
||||
-9,
|
||||
np.array([3, 1, 1, -9], dtype=np.int64)),
|
||||
]
|
||||
)
|
||||
def test_ediff1d_scalar_handling(self,
|
||||
ary,
|
||||
prepend,
|
||||
append,
|
||||
expected):
|
||||
# maintain backwards-compatibility
|
||||
# of scalar prepend / append behavior
|
||||
# in ediff1d following fix for gh-11490
|
||||
actual = np.ediff1d(ary=ary,
|
||||
to_end=append,
|
||||
to_begin=prepend)
|
||||
assert_equal(actual, expected)
|
||||
assert actual.dtype == expected.dtype
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_isin(self, kind):
|
||||
# the tests for in1d cover most of isin's behavior
|
||||
# if in1d is removed, would need to change those tests to test
|
||||
# isin instead.
|
||||
def _isin_slow(a, b):
|
||||
b = np.asarray(b).flatten().tolist()
|
||||
return a in b
|
||||
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
|
||||
|
||||
def assert_isin_equal(a, b):
|
||||
x = isin(a, b, kind=kind)
|
||||
y = isin_slow(a, b)
|
||||
assert_array_equal(x, y)
|
||||
|
||||
# multidimensional arrays in both arguments
|
||||
a = np.arange(24).reshape([2, 3, 4])
|
||||
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
|
||||
assert_isin_equal(a, b)
|
||||
|
||||
# array-likes as both arguments
|
||||
c = [(9, 8), (7, 6)]
|
||||
d = (9, 7)
|
||||
assert_isin_equal(c, d)
|
||||
|
||||
# zero-d array:
|
||||
f = np.array(3)
|
||||
assert_isin_equal(f, b)
|
||||
assert_isin_equal(a, f)
|
||||
assert_isin_equal(f, f)
|
||||
|
||||
# scalar:
|
||||
assert_isin_equal(5, b)
|
||||
assert_isin_equal(a, 6)
|
||||
assert_isin_equal(5, 6)
|
||||
|
||||
# empty array-like:
|
||||
if kind != "table":
|
||||
# An empty list will become float64,
|
||||
# which is invalid for kind="table"
|
||||
x = []
|
||||
assert_isin_equal(x, b)
|
||||
assert_isin_equal(a, x)
|
||||
assert_isin_equal(x, x)
|
||||
|
||||
# empty array with various types:
|
||||
for dtype in [bool, np.int64, np.float64]:
|
||||
if kind == "table" and dtype == np.float64:
|
||||
continue
|
||||
|
||||
if dtype in {np.int64, np.float64}:
|
||||
ar = np.array([10, 20, 30], dtype=dtype)
|
||||
elif dtype in {bool}:
|
||||
ar = np.array([True, False, False])
|
||||
|
||||
empty_array = np.array([], dtype=dtype)
|
||||
|
||||
assert_isin_equal(empty_array, ar)
|
||||
assert_isin_equal(ar, empty_array)
|
||||
assert_isin_equal(empty_array, empty_array)
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d(self, kind):
|
||||
# we use two different sizes for the b array here to test the
|
||||
# two different paths in in1d().
|
||||
for mult in (1, 10):
|
||||
# One check without np.array to make sure lists are handled correct
|
||||
a = [5, 7, 1, 2]
|
||||
b = [2, 4, 3, 1, 5] * mult
|
||||
ec = np.array([True, False, True, True])
|
||||
c = in1d(a, b, assume_unique=True, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a[0] = 8
|
||||
ec = np.array([False, False, True, True])
|
||||
c = in1d(a, b, assume_unique=True, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a[0], a[3] = 4, 8
|
||||
ec = np.array([True, False, True, False])
|
||||
c = in1d(a, b, assume_unique=True, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
||||
b = [2, 3, 4] * mult
|
||||
ec = [False, True, False, True, True, True, True, True, True,
|
||||
False, True, False, False, False]
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
b = b + [5, 5, 4] * mult
|
||||
ec = [True, True, True, True, True, True, True, True, True, True,
|
||||
True, False, True, True]
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5] * mult)
|
||||
ec = np.array([True, False, True, True])
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 7, 1, 1, 2])
|
||||
b = np.array([2, 4, 3, 3, 1, 5] * mult)
|
||||
ec = np.array([True, False, True, True, True])
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 5])
|
||||
b = np.array([2, 2] * mult)
|
||||
ec = np.array([False, False])
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5])
|
||||
b = np.array([2])
|
||||
ec = np.array([False])
|
||||
c = in1d(a, b, kind=kind)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
if kind in {None, "sort"}:
|
||||
assert_array_equal(in1d([], [], kind=kind), [])
|
||||
|
||||
def test_in1d_char_array(self):
|
||||
a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
|
||||
b = np.array(['a', 'c'])
|
||||
|
||||
ec = np.array([True, False, True, False, False, True, False, False])
|
||||
c = in1d(a, b)
|
||||
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d_invert(self, kind):
|
||||
"Test in1d's invert parameter"
|
||||
# We use two different sizes for the b array here to test the
|
||||
# two different paths in in1d().
|
||||
for mult in (1, 10):
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
||||
b = [2, 3, 4] * mult
|
||||
assert_array_equal(np.invert(in1d(a, b, kind=kind)),
|
||||
in1d(a, b, invert=True, kind=kind))
|
||||
|
||||
# float:
|
||||
if kind in {None, "sort"}:
|
||||
for mult in (1, 10):
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5],
|
||||
dtype=np.float32)
|
||||
b = [2, 3, 4] * mult
|
||||
b = np.array(b, dtype=np.float32)
|
||||
assert_array_equal(np.invert(in1d(a, b, kind=kind)),
|
||||
in1d(a, b, invert=True, kind=kind))
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d_ravel(self, kind):
|
||||
# Test that in1d ravels its input arrays. This is not documented
|
||||
# behavior however. The test is to ensure consistentency.
|
||||
a = np.arange(6).reshape(2, 3)
|
||||
b = np.arange(3, 9).reshape(3, 2)
|
||||
long_b = np.arange(3, 63).reshape(30, 2)
|
||||
ec = np.array([False, False, False, True, True, True])
|
||||
|
||||
assert_array_equal(in1d(a, b, assume_unique=True, kind=kind),
|
||||
ec)
|
||||
assert_array_equal(in1d(a, b, assume_unique=False,
|
||||
kind=kind),
|
||||
ec)
|
||||
assert_array_equal(in1d(a, long_b, assume_unique=True,
|
||||
kind=kind),
|
||||
ec)
|
||||
assert_array_equal(in1d(a, long_b, assume_unique=False,
|
||||
kind=kind),
|
||||
ec)
|
||||
|
||||
def test_in1d_hit_alternate_algorithm(self):
|
||||
"""Hit the standard isin code with integers"""
|
||||
# Need extreme range to hit standard code
|
||||
# This hits it without the use of kind='table'
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)
|
||||
b = np.array([2, 3, 4, 1e9], dtype=np.int64)
|
||||
expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)
|
||||
assert_array_equal(expected, in1d(a, b))
|
||||
assert_array_equal(np.invert(expected), in1d(a, b, invert=True))
|
||||
|
||||
a = np.array([5, 7, 1, 2], dtype=np.int64)
|
||||
b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)
|
||||
ec = np.array([True, False, True, True])
|
||||
c = in1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d_boolean(self, kind):
|
||||
"""Test that in1d works for boolean input"""
|
||||
a = np.array([True, False])
|
||||
b = np.array([False, False, False])
|
||||
expected = np.array([False, True])
|
||||
assert_array_equal(expected,
|
||||
in1d(a, b, kind=kind))
|
||||
assert_array_equal(np.invert(expected),
|
||||
in1d(a, b, invert=True, kind=kind))
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort"])
|
||||
def test_in1d_timedelta(self, kind):
|
||||
"""Test that in1d works for timedelta input"""
|
||||
rstate = np.random.RandomState(0)
|
||||
a = rstate.randint(0, 100, size=10)
|
||||
b = rstate.randint(0, 100, size=10)
|
||||
truth = in1d(a, b)
|
||||
a_timedelta = a.astype("timedelta64[s]")
|
||||
b_timedelta = b.astype("timedelta64[s]")
|
||||
assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind))
|
||||
|
||||
def test_in1d_table_timedelta_fails(self):
|
||||
a = np.array([0, 1, 2], dtype="timedelta64[s]")
|
||||
b = a
|
||||
# Make sure it raises a value error:
|
||||
with pytest.raises(ValueError):
|
||||
in1d(a, b, kind="table")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"dtype1,dtype2",
|
||||
[
|
||||
(np.int8, np.int16),
|
||||
(np.int16, np.int8),
|
||||
(np.uint8, np.uint16),
|
||||
(np.uint16, np.uint8),
|
||||
(np.uint8, np.int16),
|
||||
(np.int16, np.uint8),
|
||||
]
|
||||
)
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
|
||||
"""Test that in1d works as expected for mixed dtype input."""
|
||||
is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
|
||||
ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
|
||||
|
||||
if is_dtype2_signed:
|
||||
ar2 = np.array([-128, 0, 127], dtype=dtype2)
|
||||
else:
|
||||
ar2 = np.array([127, 0, 255], dtype=dtype2)
|
||||
|
||||
expected = np.array([True, True, False, False])
|
||||
|
||||
expect_failure = kind == "table" and any((
|
||||
dtype1 == np.int8 and dtype2 == np.int16,
|
||||
dtype1 == np.int16 and dtype2 == np.int8
|
||||
))
|
||||
|
||||
if expect_failure:
|
||||
with pytest.raises(RuntimeError, match="exceed the maximum"):
|
||||
in1d(ar1, ar2, kind=kind)
|
||||
else:
|
||||
assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
|
||||
|
||||
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
||||
def test_in1d_mixed_boolean(self, kind):
|
||||
"""Test that in1d works as expected for bool/int input."""
|
||||
for dtype in np.typecodes["AllInteger"]:
|
||||
a = np.array([True, False, False], dtype=bool)
|
||||
b = np.array([0, 0, 0, 0], dtype=dtype)
|
||||
expected = np.array([False, True, True], dtype=bool)
|
||||
assert_array_equal(in1d(a, b, kind=kind), expected)
|
||||
|
||||
a, b = b, a
|
||||
expected = np.array([True, True, True, True], dtype=bool)
|
||||
assert_array_equal(in1d(a, b, kind=kind), expected)
|
||||
|
||||
def test_in1d_first_array_is_object(self):
|
||||
ar1 = [None]
|
||||
ar2 = np.array([1]*10)
|
||||
expected = np.array([False])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_second_array_is_object(self):
|
||||
ar1 = 1
|
||||
ar2 = np.array([None]*10)
|
||||
expected = np.array([False])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_both_arrays_are_object(self):
|
||||
ar1 = [None]
|
||||
ar2 = np.array([None]*10)
|
||||
expected = np.array([True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_both_arrays_have_structured_dtype(self):
|
||||
# Test arrays of a structured data type containing an integer field
|
||||
# and a field of dtype `object` allowing for arbitrary Python objects
|
||||
dt = np.dtype([('field1', int), ('field2', object)])
|
||||
ar1 = np.array([(1, None)], dtype=dt)
|
||||
ar2 = np.array([(1, None)]*10, dtype=dt)
|
||||
expected = np.array([True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_with_arrays_containing_tuples(self):
|
||||
ar1 = np.array([(1,), 2], dtype=object)
|
||||
ar2 = np.array([(1,), 2], dtype=object)
|
||||
expected = np.array([True, True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
result = np.in1d(ar1, ar2, invert=True)
|
||||
assert_array_equal(result, np.invert(expected))
|
||||
|
||||
# An integer is added at the end of the array to make sure
|
||||
# that the array builder will create the array with tuples
|
||||
# and after it's created the integer is removed.
|
||||
# There's a bug in the array constructor that doesn't handle
|
||||
# tuples properly and adding the integer fixes that.
|
||||
ar1 = np.array([(1,), (2, 1), 1], dtype=object)
|
||||
ar1 = ar1[:-1]
|
||||
ar2 = np.array([(1,), (2, 1), 1], dtype=object)
|
||||
ar2 = ar2[:-1]
|
||||
expected = np.array([True, True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
result = np.in1d(ar1, ar2, invert=True)
|
||||
assert_array_equal(result, np.invert(expected))
|
||||
|
||||
ar1 = np.array([(1,), (2, 3), 1], dtype=object)
|
||||
ar1 = ar1[:-1]
|
||||
ar2 = np.array([(1,), 2], dtype=object)
|
||||
expected = np.array([True, False])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
result = np.in1d(ar1, ar2, invert=True)
|
||||
assert_array_equal(result, np.invert(expected))
|
||||
|
||||
def test_in1d_errors(self):
|
||||
"""Test that in1d raises expected errors."""
|
||||
|
||||
# Error 1: `kind` is not one of 'sort' 'table' or None.
|
||||
ar1 = np.array([1, 2, 3, 4, 5])
|
||||
ar2 = np.array([2, 4, 6, 8, 10])
|
||||
assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort')
|
||||
|
||||
# Error 2: `kind="table"` does not work for non-integral arrays.
|
||||
obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)
|
||||
obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)
|
||||
assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table')
|
||||
|
||||
for dtype in [np.int32, np.int64]:
|
||||
ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)
|
||||
# The range of this array will overflow:
|
||||
overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)
|
||||
|
||||
# Error 3: `kind="table"` will trigger a runtime error
|
||||
# if there is an integer overflow expected when computing the
|
||||
# range of ar2
|
||||
assert_raises(
|
||||
RuntimeError,
|
||||
in1d, ar1, overflow_ar2, kind='table'
|
||||
)
|
||||
|
||||
# Non-error: `kind=None` will *not* trigger a runtime error
|
||||
# if there is an integer overflow, it will switch to
|
||||
# the `sort` algorithm.
|
||||
result = np.in1d(ar1, overflow_ar2, kind=None)
|
||||
assert_array_equal(result, [True] + [False] * 4)
|
||||
result = np.in1d(ar1, overflow_ar2, kind='sort')
|
||||
assert_array_equal(result, [True] + [False] * 4)
|
||||
|
||||
def test_union1d(self):
|
||||
a = np.array([5, 4, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 7])
|
||||
c = union1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
# Tests gh-10340, arguments to union1d should be
|
||||
# flattened if they are not already 1D
|
||||
x = np.array([[0, 1, 2], [3, 4, 5]])
|
||||
y = np.array([0, 1, 2, 3, 4])
|
||||
ez = np.array([0, 1, 2, 3, 4, 5])
|
||||
z = union1d(x, y)
|
||||
assert_array_equal(z, ez)
|
||||
|
||||
assert_array_equal([], union1d([], []))
|
||||
|
||||
def test_setdiff1d(self):
|
||||
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
|
||||
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
||||
|
||||
ec = np.array([6, 7])
|
||||
c = setdiff1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.arange(21)
|
||||
b = np.arange(19)
|
||||
ec = np.array([19, 20])
|
||||
c = setdiff1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
assert_array_equal([], setdiff1d([], []))
|
||||
a = np.array((), np.uint32)
|
||||
assert_equal(setdiff1d(a, []).dtype, np.uint32)
|
||||
|
||||
def test_setdiff1d_unique(self):
|
||||
a = np.array([3, 2, 1])
|
||||
b = np.array([7, 5, 2])
|
||||
expected = np.array([3, 1])
|
||||
actual = setdiff1d(a, b, assume_unique=True)
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_setdiff1d_char_array(self):
|
||||
a = np.array(['a', 'b', 'c'])
|
||||
b = np.array(['a', 'b', 's'])
|
||||
assert_array_equal(setdiff1d(a, b), np.array(['c']))
|
||||
|
||||
def test_manyways(self):
|
||||
a = np.array([5, 7, 1, 2, 8])
|
||||
b = np.array([9, 8, 2, 4, 3, 1, 5])
|
||||
|
||||
c1 = setxor1d(a, b)
|
||||
aux1 = intersect1d(a, b)
|
||||
aux2 = union1d(a, b)
|
||||
c2 = setdiff1d(aux2, aux1)
|
||||
assert_array_equal(c1, c2)
|
||||
|
||||
|
||||
class TestUnique:
|
||||
|
||||
def test_unique_1d(self):
|
||||
|
||||
def check_all(a, b, i1, i2, c, dt):
|
||||
base_msg = 'check {0} failed for type {1}'
|
||||
|
||||
msg = base_msg.format('values', dt)
|
||||
v = unique(a)
|
||||
assert_array_equal(v, b, msg)
|
||||
|
||||
msg = base_msg.format('return_index', dt)
|
||||
v, j = unique(a, True, False, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, i1, msg)
|
||||
|
||||
msg = base_msg.format('return_inverse', dt)
|
||||
v, j = unique(a, False, True, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, i2, msg)
|
||||
|
||||
msg = base_msg.format('return_counts', dt)
|
||||
v, j = unique(a, False, False, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, c, msg)
|
||||
|
||||
msg = base_msg.format('return_index and return_inverse', dt)
|
||||
v, j1, j2 = unique(a, True, True, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, i2, msg)
|
||||
|
||||
msg = base_msg.format('return_index and return_counts', dt)
|
||||
v, j1, j2 = unique(a, True, False, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, c, msg)
|
||||
|
||||
msg = base_msg.format('return_inverse and return_counts', dt)
|
||||
v, j1, j2 = unique(a, False, True, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i2, msg)
|
||||
assert_array_equal(j2, c, msg)
|
||||
|
||||
msg = base_msg.format(('return_index, return_inverse '
|
||||
'and return_counts'), dt)
|
||||
v, j1, j2, j3 = unique(a, True, True, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, i2, msg)
|
||||
assert_array_equal(j3, c, msg)
|
||||
|
||||
a = [5, 7, 1, 2, 1, 5, 7]*10
|
||||
b = [1, 2, 5, 7]
|
||||
i1 = [2, 3, 0, 1]
|
||||
i2 = [2, 3, 0, 1, 0, 2, 3]*10
|
||||
c = np.multiply([2, 1, 2, 2], 10)
|
||||
|
||||
# test for numeric arrays
|
||||
types = []
|
||||
types.extend(np.typecodes['AllInteger'])
|
||||
types.extend(np.typecodes['AllFloat'])
|
||||
types.append('datetime64[D]')
|
||||
types.append('timedelta64[D]')
|
||||
for dt in types:
|
||||
aa = np.array(a, dt)
|
||||
bb = np.array(b, dt)
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for object arrays
|
||||
dt = 'O'
|
||||
aa = np.empty(len(a), dt)
|
||||
aa[:] = a
|
||||
bb = np.empty(len(b), dt)
|
||||
bb[:] = b
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for structured arrays
|
||||
dt = [('', 'i'), ('', 'i')]
|
||||
aa = np.array(list(zip(a, a)), dt)
|
||||
bb = np.array(list(zip(b, b)), dt)
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for ticket #2799
|
||||
aa = [1. + 0.j, 1 - 1.j, 1]
|
||||
assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
|
||||
|
||||
# test for ticket #4785
|
||||
a = [(1, 2), (1, 2), (2, 3)]
|
||||
unq = [1, 2, 3]
|
||||
inv = [0, 1, 0, 1, 1, 2]
|
||||
a1 = unique(a)
|
||||
assert_array_equal(a1, unq)
|
||||
a2, a2_inv = unique(a, return_inverse=True)
|
||||
assert_array_equal(a2, unq)
|
||||
assert_array_equal(a2_inv, inv)
|
||||
|
||||
# test for chararrays with return_inverse (gh-5099)
|
||||
a = np.chararray(5)
|
||||
a[...] = ''
|
||||
a2, a2_inv = np.unique(a, return_inverse=True)
|
||||
assert_array_equal(a2_inv, np.zeros(5))
|
||||
|
||||
# test for ticket #9137
|
||||
a = []
|
||||
a1_idx = np.unique(a, return_index=True)[1]
|
||||
a2_inv = np.unique(a, return_inverse=True)[1]
|
||||
a3_idx, a3_inv = np.unique(a, return_index=True,
|
||||
return_inverse=True)[1:]
|
||||
assert_equal(a1_idx.dtype, np.intp)
|
||||
assert_equal(a2_inv.dtype, np.intp)
|
||||
assert_equal(a3_idx.dtype, np.intp)
|
||||
assert_equal(a3_inv.dtype, np.intp)
|
||||
|
||||
# test for ticket 2111 - float
|
||||
a = [2.0, np.nan, 1.0, np.nan]
|
||||
ua = [1.0, 2.0, np.nan]
|
||||
ua_idx = [2, 0, 1]
|
||||
ua_inv = [1, 2, 0, 2]
|
||||
ua_cnt = [1, 1, 2]
|
||||
assert_equal(np.unique(a), ua)
|
||||
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
||||
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
||||
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
||||
|
||||
# test for ticket 2111 - complex
|
||||
a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)]
|
||||
ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)]
|
||||
ua_idx = [2, 0, 3]
|
||||
ua_inv = [1, 2, 0, 2, 2]
|
||||
ua_cnt = [1, 1, 3]
|
||||
assert_equal(np.unique(a), ua)
|
||||
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
||||
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
||||
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
||||
|
||||
# test for ticket 2111 - datetime64
|
||||
nat = np.datetime64('nat')
|
||||
a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]
|
||||
ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]
|
||||
ua_idx = [2, 0, 1]
|
||||
ua_inv = [1, 2, 0, 2]
|
||||
ua_cnt = [1, 1, 2]
|
||||
assert_equal(np.unique(a), ua)
|
||||
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
||||
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
||||
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
||||
|
||||
# test for ticket 2111 - timedelta
|
||||
nat = np.timedelta64('nat')
|
||||
a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]
|
||||
ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]
|
||||
ua_idx = [2, 0, 1]
|
||||
ua_inv = [1, 2, 0, 2]
|
||||
ua_cnt = [1, 1, 2]
|
||||
assert_equal(np.unique(a), ua)
|
||||
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
||||
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
||||
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
||||
|
||||
# test for gh-19300
|
||||
all_nans = [np.nan] * 4
|
||||
ua = [np.nan]
|
||||
ua_idx = [0]
|
||||
ua_inv = [0, 0, 0, 0]
|
||||
ua_cnt = [4]
|
||||
assert_equal(np.unique(all_nans), ua)
|
||||
assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))
|
||||
assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))
|
||||
assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))
|
||||
|
||||
def test_unique_axis_errors(self):
|
||||
assert_raises(TypeError, self._run_axis_tests, object)
|
||||
assert_raises(TypeError, self._run_axis_tests,
|
||||
[('a', int), ('b', object)])
|
||||
|
||||
assert_raises(np.AxisError, unique, np.arange(10), axis=2)
|
||||
assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
|
||||
|
||||
def test_unique_axis_list(self):
|
||||
msg = "Unique failed on list of lists"
|
||||
inp = [[0, 1, 0], [0, 1, 0]]
|
||||
inp_arr = np.asarray(inp)
|
||||
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
|
||||
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
|
||||
|
||||
def test_unique_axis(self):
|
||||
types = []
|
||||
types.extend(np.typecodes['AllInteger'])
|
||||
types.extend(np.typecodes['AllFloat'])
|
||||
types.append('datetime64[D]')
|
||||
types.append('timedelta64[D]')
|
||||
types.append([('a', int), ('b', int)])
|
||||
types.append([('a', int), ('b', float)])
|
||||
|
||||
for dtype in types:
|
||||
self._run_axis_tests(dtype)
|
||||
|
||||
msg = 'Non-bitwise-equal booleans test failed'
|
||||
data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
|
||||
result = np.array([[False, True], [True, True]], dtype=bool)
|
||||
assert_array_equal(unique(data, axis=0), result, msg)
|
||||
|
||||
msg = 'Negative zero equality test failed'
|
||||
data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
|
||||
result = np.array([[-0.0, 0.0]])
|
||||
assert_array_equal(unique(data, axis=0), result, msg)
|
||||
|
||||
@pytest.mark.parametrize("axis", [0, -1])
|
||||
def test_unique_1d_with_axis(self, axis):
|
||||
x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
|
||||
uniq = unique(x, axis=axis)
|
||||
assert_array_equal(uniq, [1, 2, 3, 4])
|
||||
|
||||
def test_unique_axis_zeros(self):
|
||||
# issue 15559
|
||||
single_zero = np.empty(shape=(2, 0), dtype=np.int8)
|
||||
uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
|
||||
# there's 1 element of shape (0,) along axis 0
|
||||
assert_equal(uniq.dtype, single_zero.dtype)
|
||||
assert_array_equal(uniq, np.empty(shape=(1, 0)))
|
||||
assert_array_equal(idx, np.array([0]))
|
||||
assert_array_equal(inv, np.array([0, 0]))
|
||||
assert_array_equal(cnt, np.array([2]))
|
||||
|
||||
# there's 0 elements of shape (2,) along axis 1
|
||||
uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
|
||||
assert_equal(uniq.dtype, single_zero.dtype)
|
||||
assert_array_equal(uniq, np.empty(shape=(2, 0)))
|
||||
assert_array_equal(idx, np.array([]))
|
||||
assert_array_equal(inv, np.array([]))
|
||||
assert_array_equal(cnt, np.array([]))
|
||||
|
||||
# test a "complicated" shape
|
||||
shape = (0, 2, 0, 3, 0, 4, 0)
|
||||
multiple_zeros = np.empty(shape=shape)
|
||||
for axis in range(len(shape)):
|
||||
expected_shape = list(shape)
|
||||
if shape[axis] == 0:
|
||||
expected_shape[axis] = 0
|
||||
else:
|
||||
expected_shape[axis] = 1
|
||||
|
||||
assert_array_equal(unique(multiple_zeros, axis=axis),
|
||||
np.empty(shape=expected_shape))
|
||||
|
||||
def test_unique_masked(self):
|
||||
# issue 8664
|
||||
x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
|
||||
dtype='uint8')
|
||||
y = np.ma.masked_equal(x, 0)
|
||||
|
||||
v = np.unique(y)
|
||||
v2, i, c = np.unique(y, return_index=True, return_counts=True)
|
||||
|
||||
msg = 'Unique returned different results when asked for index'
|
||||
assert_array_equal(v.data, v2.data, msg)
|
||||
assert_array_equal(v.mask, v2.mask, msg)
|
||||
|
||||
def test_unique_sort_order_with_axis(self):
|
||||
# These tests fail if sorting along axis is done by treating subarrays
|
||||
# as unsigned byte strings. See gh-10495.
|
||||
fmt = "sort order incorrect for integer type '%s'"
|
||||
for dt in 'bhilq':
|
||||
a = np.array([[-1], [0]], dt)
|
||||
b = np.unique(a, axis=0)
|
||||
assert_array_equal(a, b, fmt % dt)
|
||||
|
||||
def _run_axis_tests(self, dtype):
|
||||
data = np.array([[0, 1, 0, 0],
|
||||
[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[1, 0, 0, 0]]).astype(dtype)
|
||||
|
||||
msg = 'Unique with 1d array and axis=0 failed'
|
||||
result = np.array([0, 1])
|
||||
assert_array_equal(unique(data), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 2d array and axis=0 failed'
|
||||
result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
|
||||
assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 2d array and axis=1 failed'
|
||||
result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
|
||||
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 3d array and axis=2 failed'
|
||||
data3d = np.array([[[1, 1],
|
||||
[1, 0]],
|
||||
[[0, 1],
|
||||
[0, 0]]]).astype(dtype)
|
||||
result = np.take(data3d, [1, 0], axis=2)
|
||||
assert_array_equal(unique(data3d, axis=2), result, msg)
|
||||
|
||||
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
msg = "Unique's return_index=True failed with axis=0"
|
||||
assert_array_equal(data[idx], uniq, msg)
|
||||
msg = "Unique's return_inverse=True failed with axis=0"
|
||||
assert_array_equal(uniq[inv], data)
|
||||
msg = "Unique's return_counts=True failed with axis=0"
|
||||
assert_array_equal(cnt, np.array([2, 2]), msg)
|
||||
|
||||
uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
msg = "Unique's return_index=True failed with axis=1"
|
||||
assert_array_equal(data[:, idx], uniq)
|
||||
msg = "Unique's return_inverse=True failed with axis=1"
|
||||
assert_array_equal(uniq[:, inv], data)
|
||||
msg = "Unique's return_counts=True failed with axis=1"
|
||||
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
|
||||
|
||||
def test_unique_nanequals(self):
|
||||
# issue 20326
|
||||
a = np.array([1, 1, np.nan, np.nan, np.nan])
|
||||
unq = np.unique(a)
|
||||
not_unq = np.unique(a, equal_nan=False)
|
||||
assert_array_equal(unq, np.array([1, np.nan]))
|
||||
assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan]))
|
@@ -0,0 +1,46 @@
|
||||
from operator import mul
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
from numpy.random import randint
|
||||
from numpy.lib import Arrayterator
|
||||
from numpy.testing import assert_
|
||||
|
||||
|
||||
def test():
|
||||
np.random.seed(np.arange(10))
|
||||
|
||||
# Create a random array
|
||||
ndims = randint(5)+1
|
||||
shape = tuple(randint(10)+1 for dim in range(ndims))
|
||||
els = reduce(mul, shape)
|
||||
a = np.arange(els)
|
||||
a.shape = shape
|
||||
|
||||
buf_size = randint(2*els)
|
||||
b = Arrayterator(a, buf_size)
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in b:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(b.flat) == list(a.flat))
|
||||
|
||||
# Slice arrayterator
|
||||
start = [randint(dim) for dim in shape]
|
||||
stop = [randint(dim)+1 for dim in shape]
|
||||
step = [randint(dim)+1 for dim in shape]
|
||||
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
||||
c = b[slice_]
|
||||
d = a[slice_]
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in c:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that the arrayterator is sliced correctly
|
||||
assert_(np.all(c.__array__() == d))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(c.flat) == list(d.flat))
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user