virtuelle Umgebung teil20b

This commit is contained in:
Olli Graf
2023-09-01 16:01:37 +02:00
parent c9aee44812
commit c04c88e2af
7025 changed files with 1325157 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,89 @@
"""
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from pandas.compat import is_platform_windows
import pandas as pd
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
max_size=10,
min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
max_size=10,
min_size=3,
)
OPTIONAL_ONE_OF_ALL = st.one_of(
OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
min_value=pd.Timestamp.min.to_pydatetime(warn=False),
max_value=pd.Timestamp.max.to_pydatetime(warn=False),
)
INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime information (e.g. type hints) to infer how to build them.
YQM_OFFSET = st.one_of(
*map(
st.from_type,
[
MonthBegin,
MonthEnd,
BMonthBegin,
BMonthEnd,
QuarterBegin,
QuarterEnd,
BQuarterBegin,
BQuarterEnd,
YearBegin,
YearEnd,
BYearBegin,
BYearEnd,
],
)
)

View File

@@ -0,0 +1,170 @@
from __future__ import annotations
import gzip
import io
import pathlib
import tarfile
from typing import (
TYPE_CHECKING,
Any,
Callable,
)
import uuid
import zipfile
from pandas.compat import (
get_bz2_file,
get_lzma_file,
)
from pandas.compat._optional import import_optional_dependency
import pandas as pd
from pandas._testing.contexts import ensure_clean
if TYPE_CHECKING:
from pandas._typing import (
FilePath,
ReadPickleBuffer,
)
from pandas import (
DataFrame,
Series,
)
# ------------------------------------------------------------------
# File-IO
def round_trip_pickle(
obj: Any, path: FilePath | ReadPickleBuffer | None = None
) -> DataFrame | Series:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{uuid.uuid4()}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: str | None = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
Path = pathlib.Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path)) # type: ignore[arg-type]
obj = reader(Path(path)) # type: ignore[arg-type]
return obj
def round_trip_localpath(writer, reader, path: str | None = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest: str = "test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "tar":
compress_method = tarfile.TarFile
mode = "w"
file = tarfile.TarInfo(name=dest)
bytes = io.BytesIO(data)
file.size = len(data)
args = (file, bytes)
method = "addfile"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = get_bz2_file()
elif compression == "zstd":
compress_method = import_optional_dependency("zstandard").open
elif compression == "xz":
compress_method = get_lzma_file()
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)

View File

@@ -0,0 +1,220 @@
from __future__ import annotations
from contextlib import (
contextmanager,
nullcontext,
)
import re
import sys
from typing import (
TYPE_CHECKING,
Literal,
cast,
)
import warnings
if TYPE_CHECKING:
from collections.abc import (
Generator,
Sequence,
)
@contextmanager
def assert_produces_warning(
expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning,
filter_level: Literal[
"error", "ignore", "always", "default", "module", "once"
] = "always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: str | None = None,
) -> Generator[list[warnings.WarningMessage], None, None]:
"""
Context manager for running code expected to either raise a specific warning,
multiple specific warnings, or not raise any warnings. Verifies that the code
raises the expected warning(s), and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To raise multiple types of exceptions,
pass them as a tuple. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter(filter_level)
try:
yield w
finally:
if expected_warning:
expected_warning = cast(type[Warning], expected_warning)
_assert_caught_expected_warning(
caught_warnings=w,
expected_warning=expected_warning,
match=match,
check_stacklevel=check_stacklevel,
)
if raise_on_extra_warnings:
_assert_caught_no_extra_warnings(
caught_warnings=w,
expected_warning=expected_warning,
)
def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs):
"""
Return a context manager that possibly checks a warning based on the condition
"""
if condition:
return assert_produces_warning(warning, **kwargs)
else:
return nullcontext()
def _assert_caught_expected_warning(
*,
caught_warnings: Sequence[warnings.WarningMessage],
expected_warning: type[Warning],
match: str | None,
check_stacklevel: bool,
) -> None:
"""Assert that there was the expected warning among the caught warnings."""
saw_warning = False
matched_message = False
unmatched_messages = []
for actual_warning in caught_warnings:
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel:
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None:
if re.search(match, str(actual_warning.message)):
matched_message = True
else:
unmatched_messages.append(actual_warning.message)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching '{match}'. The emitted warning messages are "
f"{unmatched_messages}"
)
def _assert_caught_no_extra_warnings(
*,
caught_warnings: Sequence[warnings.WarningMessage],
expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
) -> None:
"""Assert that no extra warnings apart from the expected ones are caught."""
extra_warnings = []
for actual_warning in caught_warnings:
if _is_unexpected_warning(actual_warning, expected_warning):
# GH#38630 pytest.filterwarnings does not suppress these.
if actual_warning.category == ResourceWarning:
# GH 44732: Don't make the CI flaky by filtering SSL-related
# ResourceWarning from dependencies
if "unclosed <ssl.SSLSocket" in str(actual_warning.message):
continue
# GH 44844: Matplotlib leaves font files open during the entire process
# upon import. Don't make CI flaky if ResourceWarning raised
# due to these open files.
if any("matplotlib" in mod for mod in sys.modules):
continue
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if extra_warnings:
raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
def _is_unexpected_warning(
actual_warning: warnings.WarningMessage,
expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
) -> bool:
"""Check if the actual warning issued is unexpected."""
if actual_warning and not expected_warning:
return True
expected_warning = cast(type[Warning], expected_warning)
return bool(not issubclass(actual_warning.category, expected_warning))
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import (
getframeinfo,
stack,
)
caller = getframeinfo(stack()[4][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
"""
Helpers for sharing tests between DataFrame/Series
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from pandas import DataFrame
if TYPE_CHECKING:
from pandas._typing import DtypeObj
def get_dtype(obj) -> DtypeObj:
if isinstance(obj, DataFrame):
# Note: we are assuming only one column
return obj.dtypes.iat[0]
else:
return obj.dtype
def get_obj(df: DataFrame, klass):
"""
For sharing tests using frame_or_series, either return the DataFrame
unchanged or return it's first column as a Series.
"""
if klass is DataFrame:
return df
return df._ixs(0, axis=1)

View File

@@ -0,0 +1,216 @@
from __future__ import annotations
from contextlib import contextmanager
import os
from pathlib import Path
import tempfile
from typing import (
IO,
TYPE_CHECKING,
Any,
)
import uuid
from pandas.compat import PYPY
from pandas.errors import ChainedAssignmentError
from pandas import set_option
from pandas.io.common import get_handle
if TYPE_CHECKING:
from collections.abc import Generator
from pandas._typing import (
BaseBuffer,
CompressionOptions,
FilePath,
)
@contextmanager
def decompress_file(
path: FilePath | BaseBuffer, compression: CompressionOptions
) -> Generator[IO[bytes], None, None]:
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
Name of the decompression to use
Returns
-------
file object
"""
with get_handle(path, "rb", compression=compression, is_text=False) as handle:
yield handle.handle
@contextmanager
def set_timezone(tz: str) -> Generator[None, None, None]:
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime(2021, 1, 1))
...
'EST'
"""
import time
def setTZ(tz) -> None:
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
@contextmanager
def ensure_clean(
filename=None, return_filelike: bool = False, **kwargs: Any
) -> Generator[Any, None, None]:
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = str(uuid.uuid4()) + filename
path = folder / filename
path.touch()
handle_or_str: str | IO = str(path)
encoding = kwargs.pop("encoding", None)
if return_filelike:
kwargs.setdefault("mode", "w+b")
if encoding is None and "b" not in kwargs["mode"]:
encoding = "utf-8"
handle_or_str = open(path, encoding=encoding, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
try:
yield
finally:
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
set_option("compute.use_numexpr", use)
expr._MIN_ELEMENTS = min_elements
try:
yield
finally:
expr._MIN_ELEMENTS = oldmin
set_option("compute.use_numexpr", olduse)
def raises_chained_assignment_error(extra_warnings=(), extra_match=()):
from pandas._testing import assert_produces_warning
if PYPY and not extra_warnings:
from contextlib import nullcontext
return nullcontext()
elif PYPY and extra_warnings:
return assert_produces_warning(
extra_warnings,
match="|".join(extra_match),
)
else:
match = (
"A value is trying to be set on a copy of a DataFrame or Series "
"through chained assignment"
)
return assert_produces_warning(
(ChainedAssignmentError, *extra_warnings),
match="|".join((match, *extra_match)),
)