virtuelle Umgebungen teil20 und teil20a
This commit is contained in:
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
Test files dedicated to individual (stand-alone) Series methods
|
||||
|
||||
Ideally these files/tests should correspond 1-to-1 with tests.frame.methods
|
||||
|
||||
These may also present opportunities for sharing/de-duplicating test code.
|
||||
"""
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,41 @@
|
||||
import pytest
|
||||
|
||||
from pandas import Index
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
def test_add_prefix_suffix(string_series):
|
||||
with_prefix = string_series.add_prefix("foo#")
|
||||
expected = Index([f"foo#{c}" for c in string_series.index])
|
||||
tm.assert_index_equal(with_prefix.index, expected)
|
||||
|
||||
with_suffix = string_series.add_suffix("#foo")
|
||||
expected = Index([f"{c}#foo" for c in string_series.index])
|
||||
tm.assert_index_equal(with_suffix.index, expected)
|
||||
|
||||
with_pct_prefix = string_series.add_prefix("%")
|
||||
expected = Index([f"%{c}" for c in string_series.index])
|
||||
tm.assert_index_equal(with_pct_prefix.index, expected)
|
||||
|
||||
with_pct_suffix = string_series.add_suffix("%")
|
||||
expected = Index([f"{c}%" for c in string_series.index])
|
||||
tm.assert_index_equal(with_pct_suffix.index, expected)
|
||||
|
||||
|
||||
def test_add_prefix_suffix_axis(string_series):
|
||||
# GH 47819
|
||||
with_prefix = string_series.add_prefix("foo#", axis=0)
|
||||
expected = Index([f"foo#{c}" for c in string_series.index])
|
||||
tm.assert_index_equal(with_prefix.index, expected)
|
||||
|
||||
with_pct_suffix = string_series.add_suffix("#foo", axis=0)
|
||||
expected = Index([f"{c}#foo" for c in string_series.index])
|
||||
tm.assert_index_equal(with_pct_suffix.index, expected)
|
||||
|
||||
|
||||
def test_add_prefix_suffix_invalid_axis(string_series):
|
||||
with pytest.raises(ValueError, match="No axis named 1 for object type Series"):
|
||||
string_series.add_prefix("foo#", axis=1)
|
||||
|
||||
with pytest.raises(ValueError, match="No axis named 1 for object type Series"):
|
||||
string_series.add_suffix("foo#", axis=1)
|
@@ -0,0 +1,249 @@
|
||||
from datetime import timezone
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Series,
|
||||
date_range,
|
||||
period_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"first_slice,second_slice",
|
||||
[
|
||||
[[2, None], [None, -5]],
|
||||
[[None, 0], [None, -5]],
|
||||
[[None, -5], [None, 0]],
|
||||
[[None, 0], [None, 0]],
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("fill", [None, -1])
|
||||
def test_align(datetime_series, first_slice, second_slice, join_type, fill):
|
||||
a = datetime_series[slice(*first_slice)]
|
||||
b = datetime_series[slice(*second_slice)]
|
||||
|
||||
aa, ab = a.align(b, join=join_type, fill_value=fill)
|
||||
|
||||
join_index = a.index.join(b.index, how=join_type)
|
||||
if fill is not None:
|
||||
diff_a = aa.index.difference(join_index)
|
||||
diff_b = ab.index.difference(join_index)
|
||||
if len(diff_a) > 0:
|
||||
assert (aa.reindex(diff_a) == fill).all()
|
||||
if len(diff_b) > 0:
|
||||
assert (ab.reindex(diff_b) == fill).all()
|
||||
|
||||
ea = a.reindex(join_index)
|
||||
eb = b.reindex(join_index)
|
||||
|
||||
if fill is not None:
|
||||
ea = ea.fillna(fill)
|
||||
eb = eb.fillna(fill)
|
||||
|
||||
tm.assert_series_equal(aa, ea)
|
||||
tm.assert_series_equal(ab, eb)
|
||||
assert aa.name == "ts"
|
||||
assert ea.name == "ts"
|
||||
assert ab.name == "ts"
|
||||
assert eb.name == "ts"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"first_slice,second_slice",
|
||||
[
|
||||
[[2, None], [None, -5]],
|
||||
[[None, 0], [None, -5]],
|
||||
[[None, -5], [None, 0]],
|
||||
[[None, 0], [None, 0]],
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("method", ["pad", "bfill"])
|
||||
@pytest.mark.parametrize("limit", [None, 1])
|
||||
def test_align_fill_method(
|
||||
datetime_series, first_slice, second_slice, join_type, method, limit
|
||||
):
|
||||
a = datetime_series[slice(*first_slice)]
|
||||
b = datetime_series[slice(*second_slice)]
|
||||
|
||||
msg = (
|
||||
"The 'method', 'limit', and 'fill_axis' keywords in Series.align "
|
||||
"are deprecated"
|
||||
)
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
|
||||
|
||||
join_index = a.index.join(b.index, how=join_type)
|
||||
ea = a.reindex(join_index)
|
||||
eb = b.reindex(join_index)
|
||||
|
||||
msg2 = "Series.fillna with 'method' is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg2):
|
||||
ea = ea.fillna(method=method, limit=limit)
|
||||
eb = eb.fillna(method=method, limit=limit)
|
||||
|
||||
tm.assert_series_equal(aa, ea)
|
||||
tm.assert_series_equal(ab, eb)
|
||||
|
||||
|
||||
def test_align_nocopy(datetime_series, using_copy_on_write):
|
||||
b = datetime_series[:5].copy()
|
||||
|
||||
# do copy
|
||||
a = datetime_series.copy()
|
||||
ra, _ = a.align(b, join="left")
|
||||
ra[:5] = 5
|
||||
assert not (a[:5] == 5).any()
|
||||
|
||||
# do not copy
|
||||
a = datetime_series.copy()
|
||||
ra, _ = a.align(b, join="left", copy=False)
|
||||
ra[:5] = 5
|
||||
if using_copy_on_write:
|
||||
assert not (a[:5] == 5).any()
|
||||
else:
|
||||
assert (a[:5] == 5).all()
|
||||
|
||||
# do copy
|
||||
a = datetime_series.copy()
|
||||
b = datetime_series[:5].copy()
|
||||
_, rb = a.align(b, join="right")
|
||||
rb[:3] = 5
|
||||
assert not (b[:3] == 5).any()
|
||||
|
||||
# do not copy
|
||||
a = datetime_series.copy()
|
||||
b = datetime_series[:5].copy()
|
||||
_, rb = a.align(b, join="right", copy=False)
|
||||
rb[:2] = 5
|
||||
if using_copy_on_write:
|
||||
assert not (b[:2] == 5).any()
|
||||
else:
|
||||
assert (b[:2] == 5).all()
|
||||
|
||||
|
||||
def test_align_same_index(datetime_series, using_copy_on_write):
|
||||
a, b = datetime_series.align(datetime_series, copy=False)
|
||||
if not using_copy_on_write:
|
||||
assert a.index is datetime_series.index
|
||||
assert b.index is datetime_series.index
|
||||
else:
|
||||
assert a.index.is_(datetime_series.index)
|
||||
assert b.index.is_(datetime_series.index)
|
||||
|
||||
a, b = datetime_series.align(datetime_series, copy=True)
|
||||
assert a.index is not datetime_series.index
|
||||
assert b.index is not datetime_series.index
|
||||
assert a.index.is_(datetime_series.index)
|
||||
assert b.index.is_(datetime_series.index)
|
||||
|
||||
|
||||
def test_align_multiindex():
|
||||
# GH 10665
|
||||
|
||||
midx = pd.MultiIndex.from_product(
|
||||
[range(2), range(3), range(2)], names=("a", "b", "c")
|
||||
)
|
||||
idx = pd.Index(range(2), name="b")
|
||||
s1 = Series(np.arange(12, dtype="int64"), index=midx)
|
||||
s2 = Series(np.arange(2, dtype="int64"), index=idx)
|
||||
|
||||
# these must be the same results (but flipped)
|
||||
res1l, res1r = s1.align(s2, join="left")
|
||||
res2l, res2r = s2.align(s1, join="right")
|
||||
|
||||
expl = s1
|
||||
tm.assert_series_equal(expl, res1l)
|
||||
tm.assert_series_equal(expl, res2r)
|
||||
expr = Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
|
||||
tm.assert_series_equal(expr, res1r)
|
||||
tm.assert_series_equal(expr, res2l)
|
||||
|
||||
res1l, res1r = s1.align(s2, join="right")
|
||||
res2l, res2r = s2.align(s1, join="left")
|
||||
|
||||
exp_idx = pd.MultiIndex.from_product(
|
||||
[range(2), range(2), range(2)], names=("a", "b", "c")
|
||||
)
|
||||
expl = Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
|
||||
tm.assert_series_equal(expl, res1l)
|
||||
tm.assert_series_equal(expl, res2r)
|
||||
expr = Series([0, 0, 1, 1] * 2, index=exp_idx)
|
||||
tm.assert_series_equal(expr, res1r)
|
||||
tm.assert_series_equal(expr, res2l)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
|
||||
def test_align_with_dataframe_method(method):
|
||||
# GH31788
|
||||
ser = Series(range(3), index=range(3))
|
||||
df = pd.DataFrame(0.0, index=range(3), columns=range(3))
|
||||
|
||||
msg = (
|
||||
"The 'method', 'limit', and 'fill_axis' keywords in Series.align "
|
||||
"are deprecated"
|
||||
)
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result_ser, result_df = ser.align(df, method=method)
|
||||
tm.assert_series_equal(result_ser, ser)
|
||||
tm.assert_frame_equal(result_df, df)
|
||||
|
||||
|
||||
def test_align_dt64tzindex_mismatched_tzs():
|
||||
idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
|
||||
ser = Series(np.random.default_rng(2).standard_normal(len(idx1)), index=idx1)
|
||||
ser_central = ser.tz_convert("US/Central")
|
||||
# different timezones convert to UTC
|
||||
|
||||
new1, new2 = ser.align(ser_central)
|
||||
assert new1.index.tz is timezone.utc
|
||||
assert new2.index.tz is timezone.utc
|
||||
|
||||
|
||||
def test_align_periodindex(join_type):
|
||||
rng = period_range("1/1/2000", "1/1/2010", freq="A")
|
||||
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
|
||||
|
||||
# TODO: assert something?
|
||||
ts.align(ts[::2], join=join_type)
|
||||
|
||||
|
||||
def test_align_left_fewer_levels():
|
||||
# GH#45224
|
||||
left = Series([2], index=pd.MultiIndex.from_tuples([(1, 3)], names=["a", "c"]))
|
||||
right = Series(
|
||||
[1], index=pd.MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"])
|
||||
)
|
||||
result_left, result_right = left.align(right)
|
||||
|
||||
expected_right = Series(
|
||||
[1], index=pd.MultiIndex.from_tuples([(1, 3, 2)], names=["a", "c", "b"])
|
||||
)
|
||||
expected_left = Series(
|
||||
[2], index=pd.MultiIndex.from_tuples([(1, 3, 2)], names=["a", "c", "b"])
|
||||
)
|
||||
tm.assert_series_equal(result_left, expected_left)
|
||||
tm.assert_series_equal(result_right, expected_right)
|
||||
|
||||
|
||||
def test_align_left_different_named_levels():
|
||||
# GH#45224
|
||||
left = Series(
|
||||
[2], index=pd.MultiIndex.from_tuples([(1, 4, 3)], names=["a", "d", "c"])
|
||||
)
|
||||
right = Series(
|
||||
[1], index=pd.MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"])
|
||||
)
|
||||
result_left, result_right = left.align(right)
|
||||
|
||||
expected_left = Series(
|
||||
[2], index=pd.MultiIndex.from_tuples([(1, 3, 4, 2)], names=["a", "c", "d", "b"])
|
||||
)
|
||||
expected_right = Series(
|
||||
[1], index=pd.MultiIndex.from_tuples([(1, 3, 4, 2)], names=["a", "c", "d", "b"])
|
||||
)
|
||||
tm.assert_series_equal(result_left, expected_left)
|
||||
tm.assert_series_equal(result_right, expected_right)
|
@@ -0,0 +1,81 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
Timestamp,
|
||||
isna,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesArgsort:
|
||||
def test_argsort_axis(self):
|
||||
# GH#54257
|
||||
ser = Series(range(3))
|
||||
|
||||
msg = "No axis named 2 for object type Series"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.argsort(axis=2)
|
||||
|
||||
def test_argsort_numpy(self, datetime_series):
|
||||
ser = datetime_series
|
||||
|
||||
res = np.argsort(ser).values
|
||||
expected = np.argsort(np.array(ser))
|
||||
tm.assert_numpy_array_equal(res, expected)
|
||||
|
||||
# with missing values
|
||||
ts = ser.copy()
|
||||
ts[::2] = np.nan
|
||||
|
||||
msg = "The behavior of Series.argsort in the presence of NA values"
|
||||
with tm.assert_produces_warning(
|
||||
FutureWarning, match=msg, check_stacklevel=False
|
||||
):
|
||||
result = np.argsort(ts)[1::2]
|
||||
expected = np.argsort(np.array(ts.dropna()))
|
||||
|
||||
tm.assert_numpy_array_equal(result.values, expected)
|
||||
|
||||
def test_argsort(self, datetime_series):
|
||||
argsorted = datetime_series.argsort()
|
||||
assert issubclass(argsorted.dtype.type, np.integer)
|
||||
|
||||
# GH#2967 (introduced bug in 0.11-dev I think)
|
||||
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
|
||||
assert s.dtype == "datetime64[ns]"
|
||||
shifted = s.shift(-1)
|
||||
assert shifted.dtype == "datetime64[ns]"
|
||||
assert isna(shifted[4])
|
||||
|
||||
result = s.argsort()
|
||||
expected = Series(range(5), dtype=np.intp)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
msg = "The behavior of Series.argsort in the presence of NA values"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = shifted.argsort()
|
||||
expected = Series(list(range(4)) + [-1], dtype=np.intp)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_argsort_stable(self):
|
||||
s = Series(np.random.default_rng(2).integers(0, 100, size=10000))
|
||||
mindexer = s.argsort(kind="mergesort")
|
||||
qindexer = s.argsort()
|
||||
|
||||
mexpected = np.argsort(s.values, kind="mergesort")
|
||||
qexpected = np.argsort(s.values, kind="quicksort")
|
||||
|
||||
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
|
||||
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
|
||||
msg = (
|
||||
r"ndarray Expected type <class 'numpy\.ndarray'>, "
|
||||
r"found <class 'pandas\.core\.series\.Series'> instead"
|
||||
)
|
||||
with pytest.raises(AssertionError, match=msg):
|
||||
tm.assert_numpy_array_equal(qindexer, mindexer)
|
||||
|
||||
def test_argsort_preserve_name(self, datetime_series):
|
||||
result = datetime_series.argsort()
|
||||
assert result.name == datetime_series.name
|
@@ -0,0 +1,205 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas._libs.tslibs import IncompatibleFrequency
|
||||
|
||||
from pandas import (
|
||||
DatetimeIndex,
|
||||
PeriodIndex,
|
||||
Series,
|
||||
Timestamp,
|
||||
date_range,
|
||||
isna,
|
||||
notna,
|
||||
offsets,
|
||||
period_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesAsof:
|
||||
def test_asof_nanosecond_index_access(self):
|
||||
ts = Timestamp("20130101").as_unit("ns")._value
|
||||
dti = DatetimeIndex([ts + 50 + i for i in range(100)])
|
||||
ser = Series(np.random.default_rng(2).standard_normal(100), index=dti)
|
||||
|
||||
first_value = ser.asof(ser.index[0])
|
||||
|
||||
# GH#46903 previously incorrectly was "day"
|
||||
assert dti.resolution == "nanosecond"
|
||||
|
||||
# this used to not work bc parsing was done by dateutil that didn't
|
||||
# handle nanoseconds
|
||||
assert first_value == ser["2013-01-01 00:00:00.000000050"]
|
||||
|
||||
expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")
|
||||
assert first_value == ser[Timestamp(expected_ts)]
|
||||
|
||||
def test_basic(self):
|
||||
# array or list or dates
|
||||
N = 50
|
||||
rng = date_range("1/1/1990", periods=N, freq="53s")
|
||||
ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
||||
ts.iloc[15:30] = np.nan
|
||||
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
|
||||
|
||||
result = ts.asof(dates)
|
||||
assert notna(result).all()
|
||||
lb = ts.index[14]
|
||||
ub = ts.index[30]
|
||||
|
||||
result = ts.asof(list(dates))
|
||||
assert notna(result).all()
|
||||
lb = ts.index[14]
|
||||
ub = ts.index[30]
|
||||
|
||||
mask = (result.index >= lb) & (result.index < ub)
|
||||
rs = result[mask]
|
||||
assert (rs == ts[lb]).all()
|
||||
|
||||
val = result[result.index[result.index >= ub][0]]
|
||||
assert ts[ub] == val
|
||||
|
||||
def test_scalar(self):
|
||||
N = 30
|
||||
rng = date_range("1/1/1990", periods=N, freq="53s")
|
||||
# Explicit cast to float avoid implicit cast when setting nan
|
||||
ts = Series(np.arange(N), index=rng, dtype="float")
|
||||
ts.iloc[5:10] = np.nan
|
||||
ts.iloc[15:20] = np.nan
|
||||
|
||||
val1 = ts.asof(ts.index[7])
|
||||
val2 = ts.asof(ts.index[19])
|
||||
|
||||
assert val1 == ts.iloc[4]
|
||||
assert val2 == ts.iloc[14]
|
||||
|
||||
# accepts strings
|
||||
val1 = ts.asof(str(ts.index[7]))
|
||||
assert val1 == ts.iloc[4]
|
||||
|
||||
# in there
|
||||
result = ts.asof(ts.index[3])
|
||||
assert result == ts.iloc[3]
|
||||
|
||||
# no as of value
|
||||
d = ts.index[0] - offsets.BDay()
|
||||
assert np.isnan(ts.asof(d))
|
||||
|
||||
def test_with_nan(self):
|
||||
# basic asof test
|
||||
rng = date_range("1/1/2000", "1/2/2000", freq="4h")
|
||||
s = Series(np.arange(len(rng)), index=rng)
|
||||
r = s.resample("2h").mean()
|
||||
|
||||
result = r.asof(r.index)
|
||||
expected = Series(
|
||||
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0],
|
||||
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
r.iloc[3:5] = np.nan
|
||||
result = r.asof(r.index)
|
||||
expected = Series(
|
||||
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0],
|
||||
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
r.iloc[-3:] = np.nan
|
||||
result = r.asof(r.index)
|
||||
expected = Series(
|
||||
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0],
|
||||
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_periodindex(self):
|
||||
# array or list or dates
|
||||
N = 50
|
||||
rng = period_range("1/1/1990", periods=N, freq="H")
|
||||
ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
||||
ts.iloc[15:30] = np.nan
|
||||
dates = date_range("1/1/1990", periods=N * 3, freq="37min")
|
||||
|
||||
result = ts.asof(dates)
|
||||
assert notna(result).all()
|
||||
lb = ts.index[14]
|
||||
ub = ts.index[30]
|
||||
|
||||
result = ts.asof(list(dates))
|
||||
assert notna(result).all()
|
||||
lb = ts.index[14]
|
||||
ub = ts.index[30]
|
||||
|
||||
pix = PeriodIndex(result.index.values, freq="H")
|
||||
mask = (pix >= lb) & (pix < ub)
|
||||
rs = result[mask]
|
||||
assert (rs == ts[lb]).all()
|
||||
|
||||
ts.iloc[5:10] = np.nan
|
||||
ts.iloc[15:20] = np.nan
|
||||
|
||||
val1 = ts.asof(ts.index[7])
|
||||
val2 = ts.asof(ts.index[19])
|
||||
|
||||
assert val1 == ts.iloc[4]
|
||||
assert val2 == ts.iloc[14]
|
||||
|
||||
# accepts strings
|
||||
val1 = ts.asof(str(ts.index[7]))
|
||||
assert val1 == ts.iloc[4]
|
||||
|
||||
# in there
|
||||
assert ts.asof(ts.index[3]) == ts.iloc[3]
|
||||
|
||||
# no as of value
|
||||
d = ts.index[0].to_timestamp() - offsets.BDay()
|
||||
assert isna(ts.asof(d))
|
||||
|
||||
# Mismatched freq
|
||||
msg = "Input has different freq"
|
||||
with pytest.raises(IncompatibleFrequency, match=msg):
|
||||
ts.asof(rng.asfreq("D"))
|
||||
|
||||
def test_errors(self):
|
||||
s = Series(
|
||||
[1, 2, 3],
|
||||
index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")],
|
||||
)
|
||||
|
||||
# non-monotonic
|
||||
assert not s.index.is_monotonic_increasing
|
||||
with pytest.raises(ValueError, match="requires a sorted index"):
|
||||
s.asof(s.index[0])
|
||||
|
||||
# subset with Series
|
||||
N = 10
|
||||
rng = date_range("1/1/1990", periods=N, freq="53s")
|
||||
s = Series(np.random.default_rng(2).standard_normal(N), index=rng)
|
||||
with pytest.raises(ValueError, match="not valid for Series"):
|
||||
s.asof(s.index[0], subset="foo")
|
||||
|
||||
def test_all_nans(self):
|
||||
# GH 15713
|
||||
# series is all nans
|
||||
|
||||
# testing non-default indexes
|
||||
N = 50
|
||||
rng = date_range("1/1/1990", periods=N, freq="53s")
|
||||
|
||||
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
|
||||
result = Series(np.nan, index=rng).asof(dates)
|
||||
expected = Series(np.nan, index=dates)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# testing scalar input
|
||||
date = date_range("1/1/1990", periods=N * 3, freq="25s")[0]
|
||||
result = Series(np.nan, index=rng).asof(date)
|
||||
assert isna(result)
|
||||
|
||||
# test name is propagated
|
||||
result = Series(np.nan, index=[1, 2, 3, 4], name="test").asof([4, 5])
|
||||
expected = Series(np.nan, index=[4, 5], name="test")
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,642 @@
|
||||
from datetime import (
|
||||
datetime,
|
||||
timedelta,
|
||||
)
|
||||
from importlib import reload
|
||||
import string
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas._libs.tslibs import iNaT
|
||||
import pandas.util._test_decorators as td
|
||||
|
||||
from pandas import (
|
||||
NA,
|
||||
Categorical,
|
||||
CategoricalDtype,
|
||||
DatetimeTZDtype,
|
||||
Index,
|
||||
Interval,
|
||||
NaT,
|
||||
Series,
|
||||
Timedelta,
|
||||
Timestamp,
|
||||
cut,
|
||||
date_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
def rand_str(nchars: int) -> str:
|
||||
"""
|
||||
Generate one random byte string.
|
||||
"""
|
||||
RANDS_CHARS = np.array(
|
||||
list(string.ascii_letters + string.digits), dtype=(np.str_, 1)
|
||||
)
|
||||
return "".join(np.random.default_rng(2).choice(RANDS_CHARS, nchars))
|
||||
|
||||
|
||||
class TestAstypeAPI:
|
||||
def test_astype_unitless_dt64_raises(self):
|
||||
# GH#47844
|
||||
ser = Series(["1970-01-01", "1970-01-01", "1970-01-01"], dtype="datetime64[ns]")
|
||||
df = ser.to_frame()
|
||||
|
||||
msg = "Casting to unit-less dtype 'datetime64' is not supported"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.astype(np.datetime64)
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
df.astype(np.datetime64)
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.astype("datetime64")
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
df.astype("datetime64")
|
||||
|
||||
def test_arg_for_errors_in_astype(self):
|
||||
# see GH#14878
|
||||
ser = Series([1, 2, 3])
|
||||
|
||||
msg = (
|
||||
r"Expected value of kwarg 'errors' to be one of \['raise', "
|
||||
r"'ignore'\]\. Supplied value is 'False'"
|
||||
)
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.astype(np.float64, errors=False)
|
||||
|
||||
ser.astype(np.int8, errors="raise")
|
||||
|
||||
@pytest.mark.parametrize("dtype_class", [dict, Series])
|
||||
def test_astype_dict_like(self, dtype_class):
|
||||
# see GH#7271
|
||||
ser = Series(range(0, 10, 2), name="abc")
|
||||
|
||||
dt1 = dtype_class({"abc": str})
|
||||
result = ser.astype(dt1)
|
||||
expected = Series(["0", "2", "4", "6", "8"], name="abc")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
dt2 = dtype_class({"abc": "float64"})
|
||||
result = ser.astype(dt2)
|
||||
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
dt3 = dtype_class({"abc": str, "def": str})
|
||||
msg = (
|
||||
"Only the Series name can be used for the key in Series dtype "
|
||||
r"mappings\."
|
||||
)
|
||||
with pytest.raises(KeyError, match=msg):
|
||||
ser.astype(dt3)
|
||||
|
||||
dt4 = dtype_class({0: str})
|
||||
with pytest.raises(KeyError, match=msg):
|
||||
ser.astype(dt4)
|
||||
|
||||
# GH#16717
|
||||
# if dtypes provided is empty, it should error
|
||||
if dtype_class is Series:
|
||||
dt5 = dtype_class({}, dtype=object)
|
||||
else:
|
||||
dt5 = dtype_class({})
|
||||
|
||||
with pytest.raises(KeyError, match=msg):
|
||||
ser.astype(dt5)
|
||||
|
||||
|
||||
class TestAstype:
|
||||
def test_astype_mixed_object_to_dt64tz(self):
|
||||
# pre-2.0 this raised ValueError bc of tz mismatch
|
||||
# xref GH#32581
|
||||
ts = Timestamp("2016-01-04 05:06:07", tz="US/Pacific")
|
||||
ts2 = ts.tz_convert("Asia/Tokyo")
|
||||
|
||||
ser = Series([ts, ts2], dtype=object)
|
||||
res = ser.astype("datetime64[ns, Europe/Brussels]")
|
||||
expected = Series(
|
||||
[ts.tz_convert("Europe/Brussels"), ts2.tz_convert("Europe/Brussels")],
|
||||
dtype="datetime64[ns, Europe/Brussels]",
|
||||
)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
@pytest.mark.parametrize("dtype", np.typecodes["All"])
|
||||
def test_astype_empty_constructor_equality(self, dtype):
|
||||
# see GH#15524
|
||||
|
||||
if dtype not in (
|
||||
"S",
|
||||
"V", # poor support (if any) currently
|
||||
"M",
|
||||
"m", # Generic timestamps raise a ValueError. Already tested.
|
||||
):
|
||||
init_empty = Series([], dtype=dtype)
|
||||
as_type_empty = Series([]).astype(dtype)
|
||||
tm.assert_series_equal(init_empty, as_type_empty)
|
||||
|
||||
@pytest.mark.parametrize("dtype", [str, np.str_])
|
||||
@pytest.mark.parametrize(
|
||||
"series",
|
||||
[
|
||||
Series([string.digits * 10, rand_str(63), rand_str(64), rand_str(1000)]),
|
||||
Series([string.digits * 10, rand_str(63), rand_str(64), np.nan, 1.0]),
|
||||
],
|
||||
)
|
||||
def test_astype_str_map(self, dtype, series):
|
||||
# see GH#4405
|
||||
result = series.astype(dtype)
|
||||
expected = series.map(str)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_float_to_period(self):
|
||||
result = Series([np.nan]).astype("period[D]")
|
||||
expected = Series([NaT], dtype="period[D]")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_no_pandas_dtype(self):
|
||||
# https://github.com/pandas-dev/pandas/pull/24866
|
||||
ser = Series([1, 2], dtype="int64")
|
||||
# Don't have NumpyEADtype in the public API, so we use `.array.dtype`,
|
||||
# which is a NumpyEADtype.
|
||||
result = ser.astype(ser.array.dtype)
|
||||
tm.assert_series_equal(result, ser)
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
|
||||
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
|
||||
# see GH#15524, GH#15987
|
||||
data = [1]
|
||||
ser = Series(data)
|
||||
|
||||
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
|
||||
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
|
||||
request.node.add_marker(mark)
|
||||
|
||||
msg = (
|
||||
rf"The '{dtype.__name__}' dtype has no unit\. "
|
||||
rf"Please pass in '{dtype.__name__}\[ns\]' instead."
|
||||
)
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.astype(dtype)
|
||||
|
||||
def test_astype_dt64_to_str(self):
|
||||
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
|
||||
dti = date_range("2012-01-01", periods=3)
|
||||
result = Series(dti).astype(str)
|
||||
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_dt64tz_to_str(self):
|
||||
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
|
||||
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
|
||||
result = Series(dti_tz).astype(str)
|
||||
expected = Series(
|
||||
[
|
||||
"2012-01-01 00:00:00-05:00",
|
||||
"2012-01-02 00:00:00-05:00",
|
||||
"2012-01-03 00:00:00-05:00",
|
||||
],
|
||||
dtype=object,
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_datetime(self):
|
||||
ser = Series(iNaT, dtype="M8[ns]", index=range(5))
|
||||
|
||||
ser = ser.astype("O")
|
||||
assert ser.dtype == np.object_
|
||||
|
||||
ser = Series([datetime(2001, 1, 2, 0, 0)])
|
||||
|
||||
ser = ser.astype("O")
|
||||
assert ser.dtype == np.object_
|
||||
|
||||
ser = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
|
||||
|
||||
ser[1] = np.nan
|
||||
assert ser.dtype == "M8[ns]"
|
||||
|
||||
ser = ser.astype("O")
|
||||
assert ser.dtype == np.object_
|
||||
|
||||
def test_astype_datetime64tz(self):
|
||||
ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))
|
||||
|
||||
# astype
|
||||
result = ser.astype(object)
|
||||
expected = Series(ser.astype(object), dtype=object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = Series(ser.values).dt.tz_localize("UTC").dt.tz_convert(ser.dt.tz)
|
||||
tm.assert_series_equal(result, ser)
|
||||
|
||||
# astype - object, preserves on construction
|
||||
result = Series(ser.astype(object))
|
||||
expected = ser.astype(object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# astype - datetime64[ns, tz]
|
||||
msg = "Cannot use .astype to convert from timezone-naive"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
# dt64->dt64tz astype deprecated
|
||||
Series(ser.values).astype("datetime64[ns, US/Eastern]")
|
||||
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
# dt64->dt64tz astype deprecated
|
||||
Series(ser.values).astype(ser.dtype)
|
||||
|
||||
result = ser.astype("datetime64[ns, CET]")
|
||||
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_str_cast_dt64(self):
|
||||
# see GH#9757
|
||||
ts = Series([Timestamp("2010-01-04 00:00:00")])
|
||||
res = ts.astype(str)
|
||||
|
||||
expected = Series(["2010-01-04"])
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
|
||||
res = ts.astype(str)
|
||||
|
||||
expected = Series(["2010-01-04 00:00:00-05:00"])
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
def test_astype_str_cast_td64(self):
|
||||
# see GH#9757
|
||||
|
||||
td = Series([Timedelta(1, unit="d")])
|
||||
ser = td.astype(str)
|
||||
|
||||
expected = Series(["1 days"])
|
||||
tm.assert_series_equal(ser, expected)
|
||||
|
||||
def test_dt64_series_astype_object(self):
|
||||
dt64ser = Series(date_range("20130101", periods=3))
|
||||
result = dt64ser.astype(object)
|
||||
assert isinstance(result.iloc[0], datetime)
|
||||
assert result.dtype == np.object_
|
||||
|
||||
def test_td64_series_astype_object(self):
|
||||
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
|
||||
result = tdser.astype(object)
|
||||
assert isinstance(result.iloc[0], timedelta)
|
||||
assert result.dtype == np.object_
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, dtype",
|
||||
[
|
||||
(["x", "y", "z"], "string[python]"),
|
||||
pytest.param(
|
||||
["x", "y", "z"],
|
||||
"string[pyarrow]",
|
||||
marks=td.skip_if_no("pyarrow"),
|
||||
),
|
||||
(["x", "y", "z"], "category"),
|
||||
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
|
||||
(3 * [Interval(0, 1)], None),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("errors", ["raise", "ignore"])
|
||||
def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
|
||||
# https://github.com/pandas-dev/pandas/issues/35471
|
||||
ser = Series(data, dtype=dtype)
|
||||
if errors == "ignore":
|
||||
expected = ser
|
||||
result = ser.astype(float, errors="ignore")
|
||||
tm.assert_series_equal(result, expected)
|
||||
else:
|
||||
msg = "(Cannot cast)|(could not convert)"
|
||||
with pytest.raises((ValueError, TypeError), match=msg):
|
||||
ser.astype(float, errors=errors)
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
|
||||
def test_astype_from_float_to_str(self, dtype):
|
||||
# https://github.com/pandas-dev/pandas/issues/36451
|
||||
ser = Series([0.1], dtype=dtype)
|
||||
result = ser.astype(str)
|
||||
expected = Series(["0.1"])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"value, string_value",
|
||||
[
|
||||
(None, "None"),
|
||||
(np.nan, "nan"),
|
||||
(NA, "<NA>"),
|
||||
],
|
||||
)
|
||||
def test_astype_to_str_preserves_na(self, value, string_value):
|
||||
# https://github.com/pandas-dev/pandas/issues/36904
|
||||
ser = Series(["a", "b", value], dtype=object)
|
||||
result = ser.astype(str)
|
||||
expected = Series(["a", "b", string_value], dtype=object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
|
||||
def test_astype(self, dtype):
|
||||
ser = Series(np.random.default_rng(2).standard_normal(5), name="foo")
|
||||
as_typed = ser.astype(dtype)
|
||||
|
||||
assert as_typed.dtype == dtype
|
||||
assert as_typed.name == ser.name
|
||||
|
||||
@pytest.mark.parametrize("value", [np.nan, np.inf])
|
||||
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
|
||||
def test_astype_cast_nan_inf_int(self, dtype, value):
|
||||
# gh-14265: check NaN and inf raise error when converting to int
|
||||
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
|
||||
ser = Series([value])
|
||||
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.astype(dtype)
|
||||
|
||||
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
|
||||
def test_astype_cast_object_int_fail(self, dtype):
|
||||
arr = Series(["car", "house", "tree", "1"])
|
||||
msg = r"invalid literal for int\(\) with base 10: 'car'"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
arr.astype(dtype)
|
||||
|
||||
def test_astype_float_to_uint_negatives_raise(
|
||||
self, float_numpy_dtype, any_unsigned_int_numpy_dtype
|
||||
):
|
||||
# GH#45151 We don't cast negative numbers to nonsense values
|
||||
# TODO: same for EA float/uint dtypes, signed integers?
|
||||
arr = np.arange(5).astype(float_numpy_dtype) - 3 # includes negatives
|
||||
ser = Series(arr)
|
||||
|
||||
msg = "Cannot losslessly cast from .* to .*"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.astype(any_unsigned_int_numpy_dtype)
|
||||
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.to_frame().astype(any_unsigned_int_numpy_dtype)
|
||||
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
# We currently catch and re-raise in Index.astype
|
||||
Index(ser).astype(any_unsigned_int_numpy_dtype)
|
||||
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.array.astype(any_unsigned_int_numpy_dtype)
|
||||
|
||||
def test_astype_cast_object_int(self):
|
||||
arr = Series(["1", "2", "3", "4"], dtype=object)
|
||||
result = arr.astype(int)
|
||||
|
||||
tm.assert_series_equal(result, Series(np.arange(1, 5)))
|
||||
|
||||
def test_astype_unicode(self):
|
||||
# see GH#7758: A bit of magic is required to set
|
||||
# default encoding to utf-8
|
||||
digits = string.digits
|
||||
test_series = [
|
||||
Series([digits * 10, rand_str(63), rand_str(64), rand_str(1000)]),
|
||||
Series(["データーサイエンス、お前はもう死んでいる"]),
|
||||
]
|
||||
|
||||
former_encoding = None
|
||||
|
||||
if sys.getdefaultencoding() == "utf-8":
|
||||
# GH#45326 as of 2.0 Series.astype matches Index.astype by handling
|
||||
# bytes with obj.decode() instead of str(obj)
|
||||
item = "野菜食べないとやばい"
|
||||
ser = Series([item.encode()])
|
||||
result = ser.astype("unicode")
|
||||
expected = Series([item])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
for ser in test_series:
|
||||
res = ser.astype("unicode")
|
||||
expec = ser.map(str)
|
||||
tm.assert_series_equal(res, expec)
|
||||
|
||||
# Restore the former encoding
|
||||
if former_encoding is not None and former_encoding != "utf-8":
|
||||
reload(sys)
|
||||
sys.setdefaultencoding(former_encoding)
|
||||
|
||||
def test_astype_bytes(self):
|
||||
# GH#39474
|
||||
result = Series(["foo", "bar", "baz"]).astype(bytes)
|
||||
assert result.dtypes == np.dtype("S3")
|
||||
|
||||
def test_astype_nan_to_bool(self):
|
||||
# GH#43018
|
||||
ser = Series(np.nan, dtype="object")
|
||||
result = ser.astype("bool")
|
||||
expected = Series(True, dtype="bool")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"dtype",
|
||||
tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES,
|
||||
)
|
||||
def test_astype_ea_to_datetimetzdtype(self, dtype):
|
||||
# GH37553
|
||||
ser = Series([4, 0, 9], dtype=dtype)
|
||||
result = ser.astype(DatetimeTZDtype(tz="US/Pacific"))
|
||||
|
||||
expected = Series(
|
||||
{
|
||||
0: Timestamp("1969-12-31 16:00:00.000000004-08:00", tz="US/Pacific"),
|
||||
1: Timestamp("1969-12-31 16:00:00.000000000-08:00", tz="US/Pacific"),
|
||||
2: Timestamp("1969-12-31 16:00:00.000000009-08:00", tz="US/Pacific"),
|
||||
}
|
||||
)
|
||||
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_retain_attrs(self, any_numpy_dtype):
|
||||
# GH#44414
|
||||
ser = Series([0, 1, 2, 3])
|
||||
ser.attrs["Location"] = "Michigan"
|
||||
|
||||
result = ser.astype(any_numpy_dtype).attrs
|
||||
expected = ser.attrs
|
||||
|
||||
tm.assert_dict_equal(expected, result)
|
||||
|
||||
|
||||
class TestAstypeString:
|
||||
@pytest.mark.parametrize(
|
||||
"data, dtype",
|
||||
[
|
||||
([True, NA], "boolean"),
|
||||
(["A", NA], "category"),
|
||||
(["2020-10-10", "2020-10-10"], "datetime64[ns]"),
|
||||
(["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),
|
||||
(
|
||||
["2012-01-01 00:00:00-05:00", NaT],
|
||||
"datetime64[ns, US/Eastern]",
|
||||
),
|
||||
([1, None], "UInt16"),
|
||||
(["1/1/2021", "2/1/2021"], "period[M]"),
|
||||
(["1/1/2021", "2/1/2021", NaT], "period[M]"),
|
||||
(["1 Day", "59 Days", NaT], "timedelta64[ns]"),
|
||||
# currently no way to parse IntervalArray from a list of strings
|
||||
],
|
||||
)
|
||||
def test_astype_string_to_extension_dtype_roundtrip(
|
||||
self, data, dtype, request, nullable_string_dtype
|
||||
):
|
||||
if dtype == "boolean":
|
||||
mark = pytest.mark.xfail(
|
||||
reason="TODO StringArray.astype() with missing values #GH40566"
|
||||
)
|
||||
request.node.add_marker(mark)
|
||||
# GH-40351
|
||||
ser = Series(data, dtype=dtype)
|
||||
|
||||
# Note: just passing .astype(dtype) fails for dtype="category"
|
||||
# with bc ser.dtype.categories will be object dtype whereas
|
||||
# result.dtype.categories will have string dtype
|
||||
result = ser.astype(nullable_string_dtype).astype(ser.dtype)
|
||||
tm.assert_series_equal(result, ser)
|
||||
|
||||
|
||||
class TestAstypeCategorical:
|
||||
def test_astype_categorical_to_other(self):
|
||||
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
|
||||
ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()
|
||||
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
|
||||
|
||||
expected = ser
|
||||
tm.assert_series_equal(ser.astype("category"), expected)
|
||||
tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)
|
||||
msg = r"Cannot cast object dtype to float64"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.astype("float64")
|
||||
|
||||
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
|
||||
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
|
||||
tm.assert_series_equal(cat.astype("str"), exp)
|
||||
s2 = Series(Categorical(["1", "2", "3", "4"]))
|
||||
exp2 = Series([1, 2, 3, 4]).astype("int")
|
||||
tm.assert_series_equal(s2.astype("int"), exp2)
|
||||
|
||||
# object don't sort correctly, so just compare that we have the same
|
||||
# values
|
||||
def cmp(a, b):
|
||||
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
|
||||
|
||||
expected = Series(np.array(ser.values), name="value_group")
|
||||
cmp(ser.astype("object"), expected)
|
||||
cmp(ser.astype(np.object_), expected)
|
||||
|
||||
# array conversion
|
||||
tm.assert_almost_equal(np.array(ser), np.array(ser.values))
|
||||
|
||||
tm.assert_series_equal(ser.astype("category"), ser)
|
||||
tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)
|
||||
|
||||
roundtrip_expected = ser.cat.set_categories(
|
||||
ser.cat.categories.sort_values()
|
||||
).cat.remove_unused_categories()
|
||||
result = ser.astype("object").astype("category")
|
||||
tm.assert_series_equal(result, roundtrip_expected)
|
||||
result = ser.astype("object").astype(CategoricalDtype())
|
||||
tm.assert_series_equal(result, roundtrip_expected)
|
||||
|
||||
def test_astype_categorical_invalid_conversions(self):
|
||||
# invalid conversion (these are NOT a dtype)
|
||||
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
|
||||
ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()
|
||||
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
|
||||
|
||||
msg = (
|
||||
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
|
||||
"not understood"
|
||||
)
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.astype(Categorical)
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.astype("object").astype(Categorical)
|
||||
|
||||
def test_astype_categoricaldtype(self):
|
||||
ser = Series(["a", "b", "a"])
|
||||
result = ser.astype(CategoricalDtype(["a", "b"], ordered=True))
|
||||
expected = Series(Categorical(["a", "b", "a"], ordered=True))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.astype(CategoricalDtype(["a", "b"], ordered=False))
|
||||
expected = Series(Categorical(["a", "b", "a"], ordered=False))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
|
||||
expected = Series(
|
||||
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
|
||||
|
||||
@pytest.mark.parametrize("name", [None, "foo"])
|
||||
@pytest.mark.parametrize("dtype_ordered", [True, False])
|
||||
@pytest.mark.parametrize("series_ordered", [True, False])
|
||||
def test_astype_categorical_to_categorical(
|
||||
self, name, dtype_ordered, series_ordered
|
||||
):
|
||||
# GH#10696, GH#18593
|
||||
s_data = list("abcaacbab")
|
||||
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
|
||||
ser = Series(s_data, dtype=s_dtype, name=name)
|
||||
|
||||
# unspecified categories
|
||||
dtype = CategoricalDtype(ordered=dtype_ordered)
|
||||
result = ser.astype(dtype)
|
||||
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
|
||||
expected = Series(s_data, name=name, dtype=exp_dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# different categories
|
||||
dtype = CategoricalDtype(list("adc"), dtype_ordered)
|
||||
result = ser.astype(dtype)
|
||||
expected = Series(s_data, name=name, dtype=dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
if dtype_ordered is False:
|
||||
# not specifying ordered, so only test once
|
||||
expected = ser
|
||||
result = ser.astype("category")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_bool_missing_to_categorical(self):
|
||||
# GH-19182
|
||||
ser = Series([True, False, np.nan])
|
||||
assert ser.dtypes == np.object_
|
||||
|
||||
result = ser.astype(CategoricalDtype(categories=[True, False]))
|
||||
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_astype_categories_raises(self):
|
||||
# deprecated GH#17636, removed in GH#27141
|
||||
ser = Series(["a", "b", "a"])
|
||||
with pytest.raises(TypeError, match="got an unexpected"):
|
||||
ser.astype("category", categories=["a", "b"], ordered=True)
|
||||
|
||||
@pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])
|
||||
def test_astype_from_categorical(self, items):
|
||||
ser = Series(items)
|
||||
exp = Series(Categorical(items))
|
||||
res = ser.astype("category")
|
||||
tm.assert_series_equal(res, exp)
|
||||
|
||||
def test_astype_from_categorical_with_keywords(self):
|
||||
# with keywords
|
||||
lst = ["a", "b", "c", "a"]
|
||||
ser = Series(lst)
|
||||
exp = Series(Categorical(lst, ordered=True))
|
||||
res = ser.astype(CategoricalDtype(None, ordered=True))
|
||||
tm.assert_series_equal(res, exp)
|
||||
|
||||
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
|
||||
res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True))
|
||||
tm.assert_series_equal(res, exp)
|
||||
|
||||
def test_astype_timedelta64_with_np_nan(self):
|
||||
# GH45798
|
||||
result = Series([Timedelta(1), np.nan], dtype="timedelta64[ns]")
|
||||
expected = Series([Timedelta(1), NaT], dtype="timedelta64[ns]")
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,30 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TestAutoCorr:
|
||||
def test_autocorr(self, datetime_series):
|
||||
# Just run the function
|
||||
corr1 = datetime_series.autocorr()
|
||||
|
||||
# Now run it with the lag parameter
|
||||
corr2 = datetime_series.autocorr(lag=1)
|
||||
|
||||
# corr() with lag needs Series of at least length 2
|
||||
if len(datetime_series) <= 2:
|
||||
assert np.isnan(corr1)
|
||||
assert np.isnan(corr2)
|
||||
else:
|
||||
assert corr1 == corr2
|
||||
|
||||
# Choose a random lag between 1 and length of Series - 2
|
||||
# and compare the result with the Series corr() function
|
||||
n = 1 + np.random.default_rng(2).integers(max(1, len(datetime_series) - 2))
|
||||
corr1 = datetime_series.corr(datetime_series.shift(n))
|
||||
corr2 = datetime_series.autocorr(lag=n)
|
||||
|
||||
# corr() with lag needs Series of at least length 2
|
||||
if len(datetime_series) <= 2:
|
||||
assert np.isnan(corr1)
|
||||
assert np.isnan(corr2)
|
||||
else:
|
||||
assert corr1 == corr2
|
@@ -0,0 +1,75 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
bdate_range,
|
||||
date_range,
|
||||
period_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestBetween:
|
||||
def test_between(self):
|
||||
series = Series(date_range("1/1/2000", periods=10))
|
||||
left, right = series[[2, 7]]
|
||||
|
||||
result = series.between(left, right)
|
||||
expected = (series >= left) & (series <= right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_between_datetime_object_dtype(self):
|
||||
ser = Series(bdate_range("1/1/2000", periods=20).astype(object))
|
||||
ser[::2] = np.nan
|
||||
|
||||
result = ser[ser.between(ser[3], ser[17])]
|
||||
expected = ser[3:18].dropna()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser[ser.between(ser[3], ser[17], inclusive="neither")]
|
||||
expected = ser[5:16].dropna()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_between_period_values(self):
|
||||
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
|
||||
left, right = ser[[2, 7]]
|
||||
result = ser.between(left, right)
|
||||
expected = (ser >= left) & (ser <= right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_between_inclusive_string(self):
|
||||
# GH 40628
|
||||
series = Series(date_range("1/1/2000", periods=10))
|
||||
left, right = series[[2, 7]]
|
||||
|
||||
result = series.between(left, right, inclusive="both")
|
||||
expected = (series >= left) & (series <= right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = series.between(left, right, inclusive="left")
|
||||
expected = (series >= left) & (series < right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = series.between(left, right, inclusive="right")
|
||||
expected = (series > left) & (series <= right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = series.between(left, right, inclusive="neither")
|
||||
expected = (series > left) & (series < right)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("inclusive", ["yes", True, False])
|
||||
def test_between_error_args(self, inclusive):
|
||||
# GH 40628
|
||||
series = Series(date_range("1/1/2000", periods=10))
|
||||
left, right = series[[2, 7]]
|
||||
|
||||
value_error_msg = (
|
||||
"Inclusive has to be either string of 'both',"
|
||||
"'left', 'right', or 'neither'."
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match=value_error_msg):
|
||||
series = Series(date_range("1/1/2000", periods=10))
|
||||
series.between(left, right, inclusive=inclusive)
|
@@ -0,0 +1,138 @@
|
||||
from datetime import datetime
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Series,
|
||||
Timestamp,
|
||||
isna,
|
||||
notna,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesClip:
|
||||
def test_clip(self, datetime_series):
|
||||
val = datetime_series.median()
|
||||
|
||||
assert datetime_series.clip(lower=val).min() == val
|
||||
assert datetime_series.clip(upper=val).max() == val
|
||||
|
||||
result = datetime_series.clip(-0.5, 0.5)
|
||||
expected = np.clip(datetime_series, -0.5, 0.5)
|
||||
tm.assert_series_equal(result, expected)
|
||||
assert isinstance(expected, Series)
|
||||
|
||||
def test_clip_types_and_nulls(self):
|
||||
sers = [
|
||||
Series([np.nan, 1.0, 2.0, 3.0]),
|
||||
Series([None, "a", "b", "c"]),
|
||||
Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
|
||||
]
|
||||
|
||||
for s in sers:
|
||||
thresh = s[2]
|
||||
lower = s.clip(lower=thresh)
|
||||
upper = s.clip(upper=thresh)
|
||||
assert lower[notna(lower)].min() == thresh
|
||||
assert upper[notna(upper)].max() == thresh
|
||||
assert list(isna(s)) == list(isna(lower))
|
||||
assert list(isna(s)) == list(isna(upper))
|
||||
|
||||
def test_series_clipping_with_na_values(self, any_numeric_ea_dtype, nulls_fixture):
|
||||
# Ensure that clipping method can handle NA values with out failing
|
||||
# GH#40581
|
||||
|
||||
if nulls_fixture is pd.NaT:
|
||||
# constructor will raise, see
|
||||
# test_constructor_mismatched_null_nullable_dtype
|
||||
pytest.skip("See test_constructor_mismatched_null_nullable_dtype")
|
||||
|
||||
ser = Series([nulls_fixture, 1.0, 3.0], dtype=any_numeric_ea_dtype)
|
||||
s_clipped_upper = ser.clip(upper=2.0)
|
||||
s_clipped_lower = ser.clip(lower=2.0)
|
||||
|
||||
expected_upper = Series([nulls_fixture, 1.0, 2.0], dtype=any_numeric_ea_dtype)
|
||||
expected_lower = Series([nulls_fixture, 2.0, 3.0], dtype=any_numeric_ea_dtype)
|
||||
|
||||
tm.assert_series_equal(s_clipped_upper, expected_upper)
|
||||
tm.assert_series_equal(s_clipped_lower, expected_lower)
|
||||
|
||||
def test_clip_with_na_args(self):
|
||||
"""Should process np.nan argument as None"""
|
||||
# GH#17276
|
||||
s = Series([1, 2, 3])
|
||||
|
||||
tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
|
||||
tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
|
||||
|
||||
# GH#19992
|
||||
tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, 3]))
|
||||
tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, 2, 1]))
|
||||
|
||||
# GH#40420
|
||||
s = Series([1, 2, 3])
|
||||
result = s.clip(0, [np.nan, np.nan, np.nan])
|
||||
tm.assert_series_equal(s, result)
|
||||
|
||||
def test_clip_against_series(self):
|
||||
# GH#6966
|
||||
|
||||
s = Series([1.0, 1.0, 4.0])
|
||||
|
||||
lower = Series([1.0, 2.0, 3.0])
|
||||
upper = Series([1.5, 2.5, 3.5])
|
||||
|
||||
tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
|
||||
tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
|
||||
|
||||
@pytest.mark.parametrize("inplace", [True, False])
|
||||
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
|
||||
def test_clip_against_list_like(self, inplace, upper):
|
||||
# GH#15390
|
||||
original = Series([5, 6, 7])
|
||||
result = original.clip(upper=upper, inplace=inplace)
|
||||
expected = Series([1, 2, 3])
|
||||
|
||||
if inplace:
|
||||
result = original
|
||||
tm.assert_series_equal(result, expected, check_exact=True)
|
||||
|
||||
def test_clip_with_datetimes(self):
|
||||
# GH#11838
|
||||
# naive and tz-aware datetimes
|
||||
|
||||
t = Timestamp("2015-12-01 09:30:30")
|
||||
s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
|
||||
result = s.clip(upper=t)
|
||||
expected = Series(
|
||||
[Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
|
||||
s = Series(
|
||||
[
|
||||
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
|
||||
Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
|
||||
]
|
||||
)
|
||||
result = s.clip(upper=t)
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
|
||||
Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_clip_with_timestamps_and_oob_datetimes(self):
|
||||
# GH-42794
|
||||
ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)])
|
||||
|
||||
result = ser.clip(lower=Timestamp.min, upper=Timestamp.max)
|
||||
expected = Series([Timestamp.min, Timestamp.max], dtype="object")
|
||||
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,17 @@
|
||||
from pandas import Series
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestCombine:
|
||||
def test_combine_scalar(self):
|
||||
# GH#21248
|
||||
# Note - combine() with another Series is tested elsewhere because
|
||||
# it is used when testing operators
|
||||
ser = Series([i * 10 for i in range(5)])
|
||||
result = ser.combine(3, lambda x, y: x + y)
|
||||
expected = Series([i * 10 + 3 for i in range(5)])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.combine(22, lambda x, y: min(x, y))
|
||||
expected = Series([min(i * 10, 22) for i in range(5)])
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,149 @@
|
||||
from datetime import datetime
|
||||
|
||||
import numpy as np
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Period,
|
||||
Series,
|
||||
date_range,
|
||||
period_range,
|
||||
to_datetime,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestCombineFirst:
|
||||
def test_combine_first_period_datetime(self):
|
||||
# GH#3367
|
||||
didx = date_range(start="1950-01-31", end="1950-07-31", freq="M")
|
||||
pidx = period_range(start=Period("1950-1"), end=Period("1950-7"), freq="M")
|
||||
# check to be consistent with DatetimeIndex
|
||||
for idx in [didx, pidx]:
|
||||
a = Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
|
||||
b = Series([9, 9, 9, 9, 9, 9, 9], index=idx)
|
||||
|
||||
result = a.combine_first(b)
|
||||
expected = Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_combine_first_name(self, datetime_series):
|
||||
result = datetime_series.combine_first(datetime_series[:5])
|
||||
assert result.name == datetime_series.name
|
||||
|
||||
def test_combine_first(self):
|
||||
values = tm.makeIntIndex(20).values.astype(float)
|
||||
series = Series(values, index=tm.makeIntIndex(20))
|
||||
|
||||
series_copy = series * 2
|
||||
series_copy[::2] = np.nan
|
||||
|
||||
# nothing used from the input
|
||||
combined = series.combine_first(series_copy)
|
||||
|
||||
tm.assert_series_equal(combined, series)
|
||||
|
||||
# Holes filled from input
|
||||
combined = series_copy.combine_first(series)
|
||||
assert np.isfinite(combined).all()
|
||||
|
||||
tm.assert_series_equal(combined[::2], series[::2])
|
||||
tm.assert_series_equal(combined[1::2], series_copy[1::2])
|
||||
|
||||
# mixed types
|
||||
index = tm.makeStringIndex(20)
|
||||
floats = Series(np.random.default_rng(2).standard_normal(20), index=index)
|
||||
strings = Series(tm.makeStringIndex(10), index=index[::2])
|
||||
|
||||
combined = strings.combine_first(floats)
|
||||
|
||||
tm.assert_series_equal(strings, combined.loc[index[::2]])
|
||||
tm.assert_series_equal(floats[1::2].astype(object), combined.loc[index[1::2]])
|
||||
|
||||
# corner case
|
||||
ser = Series([1.0, 2, 3], index=[0, 1, 2])
|
||||
empty = Series([], index=[], dtype=object)
|
||||
msg = "The behavior of array concatenation with empty entries is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = ser.combine_first(empty)
|
||||
ser.index = ser.index.astype("O")
|
||||
tm.assert_series_equal(ser, result)
|
||||
|
||||
def test_combine_first_dt64(self):
|
||||
s0 = to_datetime(Series(["2010", np.nan]))
|
||||
s1 = to_datetime(Series([np.nan, "2011"]))
|
||||
rs = s0.combine_first(s1)
|
||||
xp = to_datetime(Series(["2010", "2011"]))
|
||||
tm.assert_series_equal(rs, xp)
|
||||
|
||||
s0 = to_datetime(Series(["2010", np.nan]))
|
||||
s1 = Series([np.nan, "2011"])
|
||||
rs = s0.combine_first(s1)
|
||||
|
||||
xp = Series([datetime(2010, 1, 1), "2011"], dtype="datetime64[ns]")
|
||||
|
||||
tm.assert_series_equal(rs, xp)
|
||||
|
||||
def test_combine_first_dt_tz_values(self, tz_naive_fixture):
|
||||
ser1 = Series(
|
||||
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
|
||||
name="ser1",
|
||||
)
|
||||
ser2 = Series(
|
||||
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture),
|
||||
index=[2, 3, 4],
|
||||
name="ser2",
|
||||
)
|
||||
result = ser1.combine_first(ser2)
|
||||
exp_vals = pd.DatetimeIndex(
|
||||
["20150101", "20150102", "20150103", "20160515", "20160516"],
|
||||
tz=tz_naive_fixture,
|
||||
)
|
||||
exp = Series(exp_vals, name="ser1")
|
||||
tm.assert_series_equal(exp, result)
|
||||
|
||||
def test_combine_first_timezone_series_with_empty_series(self):
|
||||
# GH 41800
|
||||
time_index = date_range(
|
||||
datetime(2021, 1, 1, 1),
|
||||
datetime(2021, 1, 1, 10),
|
||||
freq="H",
|
||||
tz="Europe/Rome",
|
||||
)
|
||||
s1 = Series(range(10), index=time_index)
|
||||
s2 = Series(index=time_index)
|
||||
msg = "The behavior of array concatenation with empty entries is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s1.combine_first(s2)
|
||||
tm.assert_series_equal(result, s1)
|
||||
|
||||
def test_combine_first_preserves_dtype(self):
|
||||
# GH51764
|
||||
s1 = Series([1666880195890293744, 1666880195890293837])
|
||||
s2 = Series([1, 2, 3])
|
||||
result = s1.combine_first(s2)
|
||||
expected = Series([1666880195890293744, 1666880195890293837, 3])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_combine_mixed_timezone(self):
|
||||
# GH 26283
|
||||
uniform_tz = Series({pd.Timestamp("2019-05-01", tz="UTC"): 1.0})
|
||||
multi_tz = Series(
|
||||
{
|
||||
pd.Timestamp("2019-05-01 01:00:00+0100", tz="Europe/London"): 2.0,
|
||||
pd.Timestamp("2019-05-02", tz="UTC"): 3.0,
|
||||
}
|
||||
)
|
||||
|
||||
result = uniform_tz.combine_first(multi_tz)
|
||||
expected = Series(
|
||||
[1.0, 3.0],
|
||||
index=pd.Index(
|
||||
[
|
||||
pd.Timestamp("2019-05-01 00:00:00+00:00", tz="UTC"),
|
||||
pd.Timestamp("2019-05-02 00:00:00+00:00", tz="UTC"),
|
||||
],
|
||||
dtype="object",
|
||||
),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,141 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
|
||||
def test_compare_axis(align_axis):
|
||||
# GH#30429
|
||||
s1 = pd.Series(["a", "b", "c"])
|
||||
s2 = pd.Series(["x", "b", "z"])
|
||||
|
||||
result = s1.compare(s2, align_axis=align_axis)
|
||||
|
||||
if align_axis in (1, "columns"):
|
||||
indices = pd.Index([0, 2])
|
||||
columns = pd.Index(["self", "other"])
|
||||
expected = pd.DataFrame(
|
||||
[["a", "x"], ["c", "z"]], index=indices, columns=columns
|
||||
)
|
||||
tm.assert_frame_equal(result, expected)
|
||||
else:
|
||||
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
|
||||
expected = pd.Series(["a", "x", "c", "z"], index=indices)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep_shape, keep_equal",
|
||||
[
|
||||
(True, False),
|
||||
(False, True),
|
||||
(True, True),
|
||||
# False, False case is already covered in test_compare_axis
|
||||
],
|
||||
)
|
||||
def test_compare_various_formats(keep_shape, keep_equal):
|
||||
s1 = pd.Series(["a", "b", "c"])
|
||||
s2 = pd.Series(["x", "b", "z"])
|
||||
|
||||
result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)
|
||||
|
||||
if keep_shape:
|
||||
indices = pd.Index([0, 1, 2])
|
||||
columns = pd.Index(["self", "other"])
|
||||
if keep_equal:
|
||||
expected = pd.DataFrame(
|
||||
[["a", "x"], ["b", "b"], ["c", "z"]], index=indices, columns=columns
|
||||
)
|
||||
else:
|
||||
expected = pd.DataFrame(
|
||||
[["a", "x"], [np.nan, np.nan], ["c", "z"]],
|
||||
index=indices,
|
||||
columns=columns,
|
||||
)
|
||||
else:
|
||||
indices = pd.Index([0, 2])
|
||||
columns = pd.Index(["self", "other"])
|
||||
expected = pd.DataFrame(
|
||||
[["a", "x"], ["c", "z"]], index=indices, columns=columns
|
||||
)
|
||||
tm.assert_frame_equal(result, expected)
|
||||
|
||||
|
||||
def test_compare_with_equal_nulls():
|
||||
# We want to make sure two NaNs are considered the same
|
||||
# and dropped where applicable
|
||||
s1 = pd.Series(["a", "b", np.nan])
|
||||
s2 = pd.Series(["x", "b", np.nan])
|
||||
|
||||
result = s1.compare(s2)
|
||||
expected = pd.DataFrame([["a", "x"]], columns=["self", "other"])
|
||||
tm.assert_frame_equal(result, expected)
|
||||
|
||||
|
||||
def test_compare_with_non_equal_nulls():
|
||||
# We want to make sure the relevant NaNs do not get dropped
|
||||
s1 = pd.Series(["a", "b", "c"])
|
||||
s2 = pd.Series(["x", "b", np.nan])
|
||||
|
||||
result = s1.compare(s2, align_axis=0)
|
||||
|
||||
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
|
||||
expected = pd.Series(["a", "x", "c", np.nan], index=indices)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_compare_multi_index():
|
||||
index = pd.MultiIndex.from_arrays([[0, 0, 1], [0, 1, 2]])
|
||||
s1 = pd.Series(["a", "b", "c"], index=index)
|
||||
s2 = pd.Series(["x", "b", "z"], index=index)
|
||||
|
||||
result = s1.compare(s2, align_axis=0)
|
||||
|
||||
indices = pd.MultiIndex.from_arrays(
|
||||
[[0, 0, 1, 1], [0, 0, 2, 2], ["self", "other", "self", "other"]]
|
||||
)
|
||||
expected = pd.Series(["a", "x", "c", "z"], index=indices)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_compare_unaligned_objects():
|
||||
# test Series with different indices
|
||||
msg = "Can only compare identically-labeled Series objects"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
|
||||
ser2 = pd.Series([1, 2, 3], index=["a", "b", "d"])
|
||||
ser1.compare(ser2)
|
||||
|
||||
# test Series with different lengths
|
||||
msg = "Can only compare identically-labeled Series objects"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser1 = pd.Series([1, 2, 3])
|
||||
ser2 = pd.Series([1, 2, 3, 4])
|
||||
ser1.compare(ser2)
|
||||
|
||||
|
||||
def test_compare_datetime64_and_string():
|
||||
# Issue https://github.com/pandas-dev/pandas/issues/45506
|
||||
# Catch OverflowError when comparing datetime64 and string
|
||||
data = [
|
||||
{"a": "2015-07-01", "b": "08335394550"},
|
||||
{"a": "2015-07-02", "b": "+49 (0) 0345 300033"},
|
||||
{"a": "2015-07-03", "b": "+49(0)2598 04457"},
|
||||
{"a": "2015-07-04", "b": "0741470003"},
|
||||
{"a": "2015-07-05", "b": "04181 83668"},
|
||||
]
|
||||
dtypes = {"a": "datetime64[ns]", "b": "string"}
|
||||
df = pd.DataFrame(data=data).astype(dtypes)
|
||||
|
||||
result_eq1 = df["a"].eq(df["b"])
|
||||
result_eq2 = df["a"] == df["b"]
|
||||
result_neq = df["a"] != df["b"]
|
||||
|
||||
expected_eq = pd.Series([False] * 5) # For .eq and ==
|
||||
expected_neq = pd.Series([True] * 5) # For !=
|
||||
|
||||
tm.assert_series_equal(result_eq1, expected_eq)
|
||||
tm.assert_series_equal(result_eq2, expected_eq)
|
||||
tm.assert_series_equal(result_neq, expected_neq)
|
@@ -0,0 +1,267 @@
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
import pandas._testing as tm
|
||||
|
||||
# Each test case consists of a tuple with the data and dtype to create the
|
||||
# test Series, the default dtype for the expected result (which is valid
|
||||
# for most cases), and the specific cases where the result deviates from
|
||||
# this default. Those overrides are defined as a dict with (keyword, val) as
|
||||
# dictionary key. In case of multiple items, the last override takes precedence.
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
(
|
||||
# data
|
||||
[1, 2, 3],
|
||||
# original dtype
|
||||
np.dtype("int32"),
|
||||
# default expected dtype
|
||||
"Int32",
|
||||
# exceptions on expected dtype
|
||||
{("convert_integer", False): np.dtype("int32")},
|
||||
),
|
||||
(
|
||||
[1, 2, 3],
|
||||
np.dtype("int64"),
|
||||
"Int64",
|
||||
{("convert_integer", False): np.dtype("int64")},
|
||||
),
|
||||
(
|
||||
["x", "y", "z"],
|
||||
np.dtype("O"),
|
||||
pd.StringDtype(),
|
||||
{("convert_string", False): np.dtype("O")},
|
||||
),
|
||||
(
|
||||
[True, False, np.nan],
|
||||
np.dtype("O"),
|
||||
pd.BooleanDtype(),
|
||||
{("convert_boolean", False): np.dtype("O")},
|
||||
),
|
||||
(
|
||||
["h", "i", np.nan],
|
||||
np.dtype("O"),
|
||||
pd.StringDtype(),
|
||||
{("convert_string", False): np.dtype("O")},
|
||||
),
|
||||
( # GH32117
|
||||
["h", "i", 1],
|
||||
np.dtype("O"),
|
||||
np.dtype("O"),
|
||||
{},
|
||||
),
|
||||
(
|
||||
[10, np.nan, 20],
|
||||
np.dtype("float"),
|
||||
"Int64",
|
||||
{
|
||||
("convert_integer", False, "convert_floating", True): "Float64",
|
||||
("convert_integer", False, "convert_floating", False): np.dtype(
|
||||
"float"
|
||||
),
|
||||
},
|
||||
),
|
||||
(
|
||||
[np.nan, 100.5, 200],
|
||||
np.dtype("float"),
|
||||
"Float64",
|
||||
{("convert_floating", False): np.dtype("float")},
|
||||
),
|
||||
(
|
||||
[3, 4, 5],
|
||||
"Int8",
|
||||
"Int8",
|
||||
{},
|
||||
),
|
||||
(
|
||||
[[1, 2], [3, 4], [5]],
|
||||
None,
|
||||
np.dtype("O"),
|
||||
{},
|
||||
),
|
||||
(
|
||||
[4, 5, 6],
|
||||
np.dtype("uint32"),
|
||||
"UInt32",
|
||||
{("convert_integer", False): np.dtype("uint32")},
|
||||
),
|
||||
(
|
||||
[-10, 12, 13],
|
||||
np.dtype("i1"),
|
||||
"Int8",
|
||||
{("convert_integer", False): np.dtype("i1")},
|
||||
),
|
||||
(
|
||||
[1.2, 1.3],
|
||||
np.dtype("float32"),
|
||||
"Float32",
|
||||
{("convert_floating", False): np.dtype("float32")},
|
||||
),
|
||||
(
|
||||
[1, 2.0],
|
||||
object,
|
||||
"Int64",
|
||||
{
|
||||
("convert_integer", False): "Float64",
|
||||
("convert_integer", False, "convert_floating", False): np.dtype(
|
||||
"float"
|
||||
),
|
||||
("infer_objects", False): np.dtype("object"),
|
||||
},
|
||||
),
|
||||
(
|
||||
[1, 2.5],
|
||||
object,
|
||||
"Float64",
|
||||
{
|
||||
("convert_floating", False): np.dtype("float"),
|
||||
("infer_objects", False): np.dtype("object"),
|
||||
},
|
||||
),
|
||||
(["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
|
||||
(
|
||||
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
|
||||
pd.DatetimeTZDtype(tz="UTC"),
|
||||
pd.DatetimeTZDtype(tz="UTC"),
|
||||
{},
|
||||
),
|
||||
(
|
||||
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
|
||||
"datetime64[ns]",
|
||||
np.dtype("datetime64[ns]"),
|
||||
{},
|
||||
),
|
||||
(
|
||||
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
|
||||
object,
|
||||
np.dtype("datetime64[ns]"),
|
||||
{("infer_objects", False): np.dtype("object")},
|
||||
),
|
||||
(
|
||||
pd.period_range("1/1/2011", freq="M", periods=3),
|
||||
None,
|
||||
pd.PeriodDtype("M"),
|
||||
{},
|
||||
),
|
||||
(
|
||||
pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
|
||||
None,
|
||||
pd.IntervalDtype("int64", "right"),
|
||||
{},
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_cases(request):
|
||||
return request.param
|
||||
|
||||
|
||||
class TestSeriesConvertDtypes:
|
||||
@pytest.mark.parametrize("params", product(*[(True, False)] * 5))
|
||||
def test_convert_dtypes(
|
||||
self,
|
||||
test_cases,
|
||||
params,
|
||||
):
|
||||
data, maindtype, expected_default, expected_other = test_cases
|
||||
if (
|
||||
hasattr(data, "dtype")
|
||||
and data.dtype == "M8[ns]"
|
||||
and isinstance(maindtype, pd.DatetimeTZDtype)
|
||||
):
|
||||
# this astype is deprecated in favor of tz_localize
|
||||
msg = "Cannot use .astype to convert from timezone-naive dtype"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
pd.Series(data, dtype=maindtype)
|
||||
return
|
||||
|
||||
if maindtype is not None:
|
||||
series = pd.Series(data, dtype=maindtype)
|
||||
else:
|
||||
series = pd.Series(data)
|
||||
|
||||
result = series.convert_dtypes(*params)
|
||||
|
||||
param_names = [
|
||||
"infer_objects",
|
||||
"convert_string",
|
||||
"convert_integer",
|
||||
"convert_boolean",
|
||||
"convert_floating",
|
||||
]
|
||||
params_dict = dict(zip(param_names, params))
|
||||
|
||||
expected_dtype = expected_default
|
||||
for spec, dtype in expected_other.items():
|
||||
if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):
|
||||
expected_dtype = dtype
|
||||
|
||||
expected = pd.Series(data, dtype=expected_dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# Test that it is a copy
|
||||
copy = series.copy(deep=True)
|
||||
|
||||
if result.notna().sum() > 0 and result.dtype in ["interval[int64, right]"]:
|
||||
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
|
||||
result[result.notna()] = np.nan
|
||||
else:
|
||||
result[result.notna()] = np.nan
|
||||
|
||||
# Make sure original not changed
|
||||
tm.assert_series_equal(series, copy)
|
||||
|
||||
def test_convert_string_dtype(self, nullable_string_dtype):
|
||||
# https://github.com/pandas-dev/pandas/issues/31731 -> converting columns
|
||||
# that are already string dtype
|
||||
df = pd.DataFrame(
|
||||
{"A": ["a", "b", pd.NA], "B": ["ä", "ö", "ü"]}, dtype=nullable_string_dtype
|
||||
)
|
||||
result = df.convert_dtypes()
|
||||
tm.assert_frame_equal(df, result)
|
||||
|
||||
def test_convert_bool_dtype(self):
|
||||
# GH32287
|
||||
df = pd.DataFrame({"A": pd.array([True])})
|
||||
tm.assert_frame_equal(df, df.convert_dtypes())
|
||||
|
||||
def test_convert_byte_string_dtype(self):
|
||||
# GH-43183
|
||||
byte_str = b"binary-string"
|
||||
|
||||
df = pd.DataFrame(data={"A": byte_str}, index=[0])
|
||||
result = df.convert_dtypes()
|
||||
expected = df
|
||||
tm.assert_frame_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"infer_objects, dtype", [(True, "Int64"), (False, "object")]
|
||||
)
|
||||
def test_convert_dtype_object_with_na(self, infer_objects, dtype):
|
||||
# GH#48791
|
||||
ser = pd.Series([1, pd.NA])
|
||||
result = ser.convert_dtypes(infer_objects=infer_objects)
|
||||
expected = pd.Series([1, pd.NA], dtype=dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"infer_objects, dtype", [(True, "Float64"), (False, "object")]
|
||||
)
|
||||
def test_convert_dtype_object_with_na_float(self, infer_objects, dtype):
|
||||
# GH#48791
|
||||
ser = pd.Series([1.5, pd.NA])
|
||||
result = ser.convert_dtypes(infer_objects=infer_objects)
|
||||
expected = pd.Series([1.5, pd.NA], dtype=dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_convert_dtypes_pyarrow_to_np_nullable(self):
|
||||
# GH 53648
|
||||
pytest.importorskip("pyarrow")
|
||||
ser = pd.Series(range(2), dtype="int32[pyarrow]")
|
||||
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
|
||||
expected = pd.Series(range(2), dtype="Int32")
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,89 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
Timestamp,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestCopy:
|
||||
@pytest.mark.parametrize("deep", ["default", None, False, True])
|
||||
def test_copy(self, deep, using_copy_on_write):
|
||||
ser = Series(np.arange(10), dtype="float64")
|
||||
|
||||
# default deep is True
|
||||
if deep == "default":
|
||||
ser2 = ser.copy()
|
||||
else:
|
||||
ser2 = ser.copy(deep=deep)
|
||||
|
||||
if using_copy_on_write:
|
||||
# INFO(CoW) a shallow copy doesn't yet copy the data
|
||||
# but parent will not be modified (CoW)
|
||||
if deep is None or deep is False:
|
||||
assert np.may_share_memory(ser.values, ser2.values)
|
||||
else:
|
||||
assert not np.may_share_memory(ser.values, ser2.values)
|
||||
|
||||
ser2[::2] = np.nan
|
||||
|
||||
if deep is not False or using_copy_on_write:
|
||||
# Did not modify original Series
|
||||
assert np.isnan(ser2[0])
|
||||
assert not np.isnan(ser[0])
|
||||
else:
|
||||
# we DID modify the original Series
|
||||
assert np.isnan(ser2[0])
|
||||
assert np.isnan(ser[0])
|
||||
|
||||
@pytest.mark.parametrize("deep", ["default", None, False, True])
|
||||
def test_copy_tzaware(self, deep, using_copy_on_write):
|
||||
# GH#11794
|
||||
# copy of tz-aware
|
||||
expected = Series([Timestamp("2012/01/01", tz="UTC")])
|
||||
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
|
||||
|
||||
ser = Series([Timestamp("2012/01/01", tz="UTC")])
|
||||
|
||||
if deep == "default":
|
||||
ser2 = ser.copy()
|
||||
else:
|
||||
ser2 = ser.copy(deep=deep)
|
||||
|
||||
if using_copy_on_write:
|
||||
# INFO(CoW) a shallow copy doesn't yet copy the data
|
||||
# but parent will not be modified (CoW)
|
||||
if deep is None or deep is False:
|
||||
assert np.may_share_memory(ser.values, ser2.values)
|
||||
else:
|
||||
assert not np.may_share_memory(ser.values, ser2.values)
|
||||
|
||||
ser2[0] = Timestamp("1999/01/01", tz="UTC")
|
||||
|
||||
# default deep is True
|
||||
if deep is not False or using_copy_on_write:
|
||||
# Did not modify original Series
|
||||
tm.assert_series_equal(ser2, expected2)
|
||||
tm.assert_series_equal(ser, expected)
|
||||
else:
|
||||
# we DID modify the original Series
|
||||
tm.assert_series_equal(ser2, expected2)
|
||||
tm.assert_series_equal(ser, expected2)
|
||||
|
||||
def test_copy_name(self, datetime_series):
|
||||
result = datetime_series.copy()
|
||||
assert result.name == datetime_series.name
|
||||
|
||||
def test_copy_index_name_checking(self, datetime_series):
|
||||
# don't want to be able to modify the index stored elsewhere after
|
||||
# making a copy
|
||||
|
||||
datetime_series.index.name = None
|
||||
assert datetime_series.index.name is None
|
||||
assert datetime_series is datetime_series
|
||||
|
||||
cp = datetime_series.copy()
|
||||
cp.index.name = "foo"
|
||||
assert datetime_series.index.name is None
|
@@ -0,0 +1,34 @@
|
||||
import numpy as np
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Categorical,
|
||||
Series,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesCount:
|
||||
def test_count(self, datetime_series):
|
||||
assert datetime_series.count() == len(datetime_series)
|
||||
|
||||
datetime_series[::2] = np.nan
|
||||
|
||||
assert datetime_series.count() == np.isfinite(datetime_series).sum()
|
||||
|
||||
def test_count_inf_as_na(self):
|
||||
# GH#29478
|
||||
ser = Series([pd.Timestamp("1990/1/1")])
|
||||
msg = "use_inf_as_na option is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
with pd.option_context("use_inf_as_na", True):
|
||||
assert ser.count() == 1
|
||||
|
||||
def test_count_categorical(self):
|
||||
ser = Series(
|
||||
Categorical(
|
||||
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
|
||||
)
|
||||
)
|
||||
result = ser.count()
|
||||
assert result == 2
|
@@ -0,0 +1,176 @@
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Series,
|
||||
isna,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesCov:
|
||||
def test_cov(self, datetime_series):
|
||||
# full overlap
|
||||
tm.assert_almost_equal(
|
||||
datetime_series.cov(datetime_series), datetime_series.std() ** 2
|
||||
)
|
||||
|
||||
# partial overlap
|
||||
tm.assert_almost_equal(
|
||||
datetime_series[:15].cov(datetime_series[5:]),
|
||||
datetime_series[5:15].std() ** 2,
|
||||
)
|
||||
|
||||
# No overlap
|
||||
assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
|
||||
|
||||
# all NA
|
||||
cp = datetime_series[:10].copy()
|
||||
cp[:] = np.nan
|
||||
assert isna(cp.cov(cp))
|
||||
|
||||
# min_periods
|
||||
assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
|
||||
|
||||
ts1 = datetime_series[:15].reindex(datetime_series.index)
|
||||
ts2 = datetime_series[5:].reindex(datetime_series.index)
|
||||
assert isna(ts1.cov(ts2, min_periods=12))
|
||||
|
||||
@pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
|
||||
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
|
||||
def test_cov_ddof(self, test_ddof, dtype):
|
||||
# GH#34611
|
||||
np_array1 = np.random.default_rng(2).random(10)
|
||||
np_array2 = np.random.default_rng(2).random(10)
|
||||
|
||||
s1 = Series(np_array1, dtype=dtype)
|
||||
s2 = Series(np_array2, dtype=dtype)
|
||||
|
||||
result = s1.cov(s2, ddof=test_ddof)
|
||||
expected = np.cov(np_array1, np_array2, ddof=test_ddof)[0][1]
|
||||
assert math.isclose(expected, result)
|
||||
|
||||
|
||||
class TestSeriesCorr:
|
||||
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
|
||||
def test_corr(self, datetime_series, dtype):
|
||||
stats = pytest.importorskip("scipy.stats")
|
||||
|
||||
datetime_series = datetime_series.astype(dtype)
|
||||
|
||||
# full overlap
|
||||
tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
|
||||
|
||||
# partial overlap
|
||||
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
|
||||
|
||||
assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
|
||||
|
||||
ts1 = datetime_series[:15].reindex(datetime_series.index)
|
||||
ts2 = datetime_series[5:].reindex(datetime_series.index)
|
||||
assert isna(ts1.corr(ts2, min_periods=12))
|
||||
|
||||
# No overlap
|
||||
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
|
||||
|
||||
# all NA
|
||||
cp = datetime_series[:10].copy()
|
||||
cp[:] = np.nan
|
||||
assert isna(cp.corr(cp))
|
||||
|
||||
A = tm.makeTimeSeries()
|
||||
B = tm.makeTimeSeries()
|
||||
result = A.corr(B)
|
||||
expected, _ = stats.pearsonr(A, B)
|
||||
tm.assert_almost_equal(result, expected)
|
||||
|
||||
def test_corr_rank(self):
|
||||
stats = pytest.importorskip("scipy.stats")
|
||||
|
||||
# kendall and spearman
|
||||
A = tm.makeTimeSeries()
|
||||
B = tm.makeTimeSeries()
|
||||
A[-5:] = A[:5]
|
||||
result = A.corr(B, method="kendall")
|
||||
expected = stats.kendalltau(A, B)[0]
|
||||
tm.assert_almost_equal(result, expected)
|
||||
|
||||
result = A.corr(B, method="spearman")
|
||||
expected = stats.spearmanr(A, B)[0]
|
||||
tm.assert_almost_equal(result, expected)
|
||||
|
||||
# results from R
|
||||
A = Series(
|
||||
[
|
||||
-0.89926396,
|
||||
0.94209606,
|
||||
-1.03289164,
|
||||
-0.95445587,
|
||||
0.76910310,
|
||||
-0.06430576,
|
||||
-2.09704447,
|
||||
0.40660407,
|
||||
-0.89926396,
|
||||
0.94209606,
|
||||
]
|
||||
)
|
||||
B = Series(
|
||||
[
|
||||
-1.01270225,
|
||||
-0.62210117,
|
||||
-1.56895827,
|
||||
0.59592943,
|
||||
-0.01680292,
|
||||
1.17258718,
|
||||
-1.06009347,
|
||||
-0.10222060,
|
||||
-0.89076239,
|
||||
0.89372375,
|
||||
]
|
||||
)
|
||||
kexp = 0.4319297
|
||||
sexp = 0.5853767
|
||||
tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
|
||||
tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
|
||||
|
||||
def test_corr_invalid_method(self):
|
||||
# GH PR #22298
|
||||
s1 = Series(np.random.default_rng(2).standard_normal(10))
|
||||
s2 = Series(np.random.default_rng(2).standard_normal(10))
|
||||
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s1.corr(s2, method="____")
|
||||
|
||||
def test_corr_callable_method(self, datetime_series):
|
||||
# simple correlation example
|
||||
# returns 1 if exact equality, 0 otherwise
|
||||
my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
|
||||
|
||||
# simple example
|
||||
s1 = Series([1, 2, 3, 4, 5])
|
||||
s2 = Series([5, 4, 3, 2, 1])
|
||||
expected = 0
|
||||
tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
|
||||
|
||||
# full overlap
|
||||
tm.assert_almost_equal(
|
||||
datetime_series.corr(datetime_series, method=my_corr), 1.0
|
||||
)
|
||||
|
||||
# partial overlap
|
||||
tm.assert_almost_equal(
|
||||
datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
|
||||
)
|
||||
|
||||
# No overlap
|
||||
assert np.isnan(
|
||||
datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
|
||||
)
|
||||
|
||||
# dataframe example
|
||||
df = pd.DataFrame([s1, s2])
|
||||
expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
|
||||
tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
|
@@ -0,0 +1,203 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas.compat.numpy import np_version_gte1p25
|
||||
|
||||
from pandas.core.dtypes.common import (
|
||||
is_complex_dtype,
|
||||
is_extension_array_dtype,
|
||||
)
|
||||
|
||||
from pandas import (
|
||||
NA,
|
||||
Period,
|
||||
Series,
|
||||
Timedelta,
|
||||
Timestamp,
|
||||
date_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesDescribe:
|
||||
def test_describe_ints(self):
|
||||
ser = Series([0, 1, 2, 3, 4], name="int_data")
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[5, 2, ser.std(), 0, 1, 2, 3, 4],
|
||||
name="int_data",
|
||||
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_bools(self):
|
||||
ser = Series([True, True, False, False, False], name="bool_data")
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_strs(self):
|
||||
ser = Series(["a", "a", "b", "c", "d"], name="str_data")
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_timedelta64(self):
|
||||
ser = Series(
|
||||
[
|
||||
Timedelta("1 days"),
|
||||
Timedelta("2 days"),
|
||||
Timedelta("3 days"),
|
||||
Timedelta("4 days"),
|
||||
Timedelta("5 days"),
|
||||
],
|
||||
name="timedelta_data",
|
||||
)
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[5, ser[2], ser.std(), ser[0], ser[1], ser[2], ser[3], ser[4]],
|
||||
name="timedelta_data",
|
||||
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_period(self):
|
||||
ser = Series(
|
||||
[Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],
|
||||
name="period_data",
|
||||
)
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[3, 2, ser[0], 2],
|
||||
name="period_data",
|
||||
index=["count", "unique", "top", "freq"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_empty_object(self):
|
||||
# https://github.com/pandas-dev/pandas/issues/27183
|
||||
s = Series([None, None], dtype=object)
|
||||
result = s.describe()
|
||||
expected = Series(
|
||||
[0, 0, np.nan, np.nan],
|
||||
dtype=object,
|
||||
index=["count", "unique", "top", "freq"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = s[:0].describe()
|
||||
tm.assert_series_equal(result, expected)
|
||||
# ensure NaN, not None
|
||||
assert np.isnan(result.iloc[2])
|
||||
assert np.isnan(result.iloc[3])
|
||||
|
||||
def test_describe_with_tz(self, tz_naive_fixture):
|
||||
# GH 21332
|
||||
tz = tz_naive_fixture
|
||||
name = str(tz_naive_fixture)
|
||||
start = Timestamp(2018, 1, 1)
|
||||
end = Timestamp(2018, 1, 5)
|
||||
s = Series(date_range(start, end, tz=tz), name=name)
|
||||
result = s.describe()
|
||||
expected = Series(
|
||||
[
|
||||
5,
|
||||
Timestamp(2018, 1, 3).tz_localize(tz),
|
||||
start.tz_localize(tz),
|
||||
s[1],
|
||||
s[2],
|
||||
s[3],
|
||||
end.tz_localize(tz),
|
||||
],
|
||||
name=name,
|
||||
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_with_tz_numeric(self):
|
||||
name = tz = "CET"
|
||||
start = Timestamp(2018, 1, 1)
|
||||
end = Timestamp(2018, 1, 5)
|
||||
s = Series(date_range(start, end, tz=tz), name=name)
|
||||
|
||||
result = s.describe()
|
||||
|
||||
expected = Series(
|
||||
[
|
||||
5,
|
||||
Timestamp("2018-01-03 00:00:00", tz=tz),
|
||||
Timestamp("2018-01-01 00:00:00", tz=tz),
|
||||
Timestamp("2018-01-02 00:00:00", tz=tz),
|
||||
Timestamp("2018-01-03 00:00:00", tz=tz),
|
||||
Timestamp("2018-01-04 00:00:00", tz=tz),
|
||||
Timestamp("2018-01-05 00:00:00", tz=tz),
|
||||
],
|
||||
name=name,
|
||||
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_datetime_is_numeric_includes_datetime(self):
|
||||
s = Series(date_range("2012", periods=3))
|
||||
result = s.describe()
|
||||
expected = Series(
|
||||
[
|
||||
3,
|
||||
Timestamp("2012-01-02"),
|
||||
Timestamp("2012-01-01"),
|
||||
Timestamp("2012-01-01T12:00:00"),
|
||||
Timestamp("2012-01-02"),
|
||||
Timestamp("2012-01-02T12:00:00"),
|
||||
Timestamp("2012-01-03"),
|
||||
],
|
||||
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:Casting complex values to real discards")
|
||||
def test_numeric_result_dtype(self, any_numeric_dtype):
|
||||
# GH#48340 - describe should always return float on non-complex numeric input
|
||||
if is_extension_array_dtype(any_numeric_dtype):
|
||||
dtype = "Float64"
|
||||
else:
|
||||
dtype = "complex128" if is_complex_dtype(any_numeric_dtype) else None
|
||||
|
||||
ser = Series([0, 1], dtype=any_numeric_dtype)
|
||||
if dtype == "complex128" and np_version_gte1p25:
|
||||
with pytest.raises(
|
||||
TypeError, match=r"^a must be an array of real numbers$"
|
||||
):
|
||||
ser.describe()
|
||||
return
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[
|
||||
2.0,
|
||||
0.5,
|
||||
ser.std(),
|
||||
0,
|
||||
0.25,
|
||||
0.5,
|
||||
0.75,
|
||||
1.0,
|
||||
],
|
||||
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
|
||||
dtype=dtype,
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_describe_one_element_ea(self):
|
||||
# GH#52515
|
||||
ser = Series([0.0], dtype="Float64")
|
||||
with tm.assert_produces_warning(None):
|
||||
result = ser.describe()
|
||||
expected = Series(
|
||||
[1, 0, NA, 0, 0, 0, 0, 0],
|
||||
dtype="Float64",
|
||||
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,84 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
TimedeltaIndex,
|
||||
date_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestSeriesDiff:
|
||||
def test_diff_np(self):
|
||||
# TODO(__array_function__): could make np.diff return a Series
|
||||
# matching ser.diff()
|
||||
|
||||
ser = Series(np.arange(5))
|
||||
|
||||
res = np.diff(ser)
|
||||
expected = np.array([1, 1, 1, 1])
|
||||
tm.assert_numpy_array_equal(res, expected)
|
||||
|
||||
def test_diff_int(self):
|
||||
# int dtype
|
||||
a = 10000000000000000
|
||||
b = a + 1
|
||||
ser = Series([a, b])
|
||||
|
||||
result = ser.diff()
|
||||
assert result[1] == 1
|
||||
|
||||
def test_diff_tz(self):
|
||||
# Combined datetime diff, normal diff and boolean diff test
|
||||
ts = tm.makeTimeSeries(name="ts")
|
||||
ts.diff()
|
||||
|
||||
# neg n
|
||||
result = ts.diff(-1)
|
||||
expected = ts - ts.shift(-1)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# 0
|
||||
result = ts.diff(0)
|
||||
expected = ts - ts
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_diff_dt64(self):
|
||||
# datetime diff (GH#3100)
|
||||
ser = Series(date_range("20130102", periods=5))
|
||||
result = ser.diff()
|
||||
expected = ser - ser.shift(1)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# timedelta diff
|
||||
result = result - result.shift(1) # previous result
|
||||
expected = expected.diff() # previously expected
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_diff_dt64tz(self):
|
||||
# with tz
|
||||
ser = Series(
|
||||
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
|
||||
)
|
||||
result = ser.diff()
|
||||
expected = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input,output,diff",
|
||||
[([False, True, True, False, False], [np.nan, True, False, True, False], 1)],
|
||||
)
|
||||
def test_diff_bool(self, input, output, diff):
|
||||
# boolean series (test for fixing #17294)
|
||||
ser = Series(input)
|
||||
result = ser.diff()
|
||||
expected = Series(output)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_diff_object_dtype(self):
|
||||
# object series
|
||||
ser = Series([False, True, 5.0, np.nan, True, False])
|
||||
result = ser.diff()
|
||||
expected = ser - ser.shift(1)
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,99 @@
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Index,
|
||||
Series,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
from pandas.api.types import is_bool_dtype
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, index, drop_labels, axis, expected_data, expected_index",
|
||||
[
|
||||
# Unique Index
|
||||
([1, 2], ["one", "two"], ["two"], 0, [1], ["one"]),
|
||||
([1, 2], ["one", "two"], ["two"], "rows", [1], ["one"]),
|
||||
([1, 1, 2], ["one", "two", "one"], ["two"], 0, [1, 2], ["one", "one"]),
|
||||
# GH 5248 Non-Unique Index
|
||||
([1, 1, 2], ["one", "two", "one"], "two", 0, [1, 2], ["one", "one"]),
|
||||
([1, 1, 2], ["one", "two", "one"], ["one"], 0, [1], ["two"]),
|
||||
([1, 1, 2], ["one", "two", "one"], "one", 0, [1], ["two"]),
|
||||
],
|
||||
)
|
||||
def test_drop_unique_and_non_unique_index(
|
||||
data, index, axis, drop_labels, expected_data, expected_index
|
||||
):
|
||||
ser = Series(data=data, index=index)
|
||||
result = ser.drop(drop_labels, axis=axis)
|
||||
expected = Series(data=expected_data, index=expected_index)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, index, drop_labels, axis, error_type, error_desc",
|
||||
[
|
||||
# single string/tuple-like
|
||||
(range(3), list("abc"), "bc", 0, KeyError, "not found in axis"),
|
||||
# bad axis
|
||||
(range(3), list("abc"), ("a",), 0, KeyError, "not found in axis"),
|
||||
(range(3), list("abc"), "one", "columns", ValueError, "No axis named columns"),
|
||||
],
|
||||
)
|
||||
def test_drop_exception_raised(data, index, drop_labels, axis, error_type, error_desc):
|
||||
ser = Series(data, index=index)
|
||||
with pytest.raises(error_type, match=error_desc):
|
||||
ser.drop(drop_labels, axis=axis)
|
||||
|
||||
|
||||
def test_drop_with_ignore_errors():
|
||||
# errors='ignore'
|
||||
ser = Series(range(3), index=list("abc"))
|
||||
result = ser.drop("bc", errors="ignore")
|
||||
tm.assert_series_equal(result, ser)
|
||||
result = ser.drop(["a", "d"], errors="ignore")
|
||||
expected = ser.iloc[1:]
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# GH 8522
|
||||
ser = Series([2, 3], index=[True, False])
|
||||
assert is_bool_dtype(ser.index)
|
||||
assert ser.index.dtype == bool
|
||||
result = ser.drop(True)
|
||||
expected = Series([3], index=[False])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 3]])
|
||||
@pytest.mark.parametrize("drop_labels", [[], [1], [3]])
|
||||
def test_drop_empty_list(index, drop_labels):
|
||||
# GH 21494
|
||||
expected_index = [i for i in index if i not in drop_labels]
|
||||
series = Series(index=index, dtype=object).drop(drop_labels)
|
||||
expected = Series(index=expected_index, dtype=object)
|
||||
tm.assert_series_equal(series, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, index, drop_labels",
|
||||
[
|
||||
(None, [1, 2, 3], [1, 4]),
|
||||
(None, [1, 2, 2], [1, 4]),
|
||||
([2, 3], [0, 1], [False, True]),
|
||||
],
|
||||
)
|
||||
def test_drop_non_empty_list(data, index, drop_labels):
|
||||
# GH 21494 and GH 16877
|
||||
dtype = object if data is None else None
|
||||
ser = Series(data=data, index=index, dtype=dtype)
|
||||
with pytest.raises(KeyError, match="not found in axis"):
|
||||
ser.drop(drop_labels)
|
||||
|
||||
|
||||
def test_drop_index_ea_dtype(any_numeric_ea_dtype):
|
||||
# GH#45860
|
||||
df = Series(100, index=Index([1, 2, 2], dtype=any_numeric_ea_dtype))
|
||||
idx = Index([df.index[1]])
|
||||
result = df.drop(idx)
|
||||
expected = Series(100, index=Index([1], dtype=any_numeric_ea_dtype))
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,258 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
Categorical,
|
||||
Series,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep, expected",
|
||||
[
|
||||
("first", Series([False, False, False, False, True, True, False])),
|
||||
("last", Series([False, True, True, False, False, False, False])),
|
||||
(False, Series([False, True, True, False, True, True, False])),
|
||||
],
|
||||
)
|
||||
def test_drop_duplicates(any_numpy_dtype, keep, expected):
|
||||
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
|
||||
|
||||
if tc.dtype == "bool":
|
||||
pytest.skip("tested separately in test_drop_duplicates_bool")
|
||||
|
||||
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
|
||||
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
|
||||
sc = tc.copy()
|
||||
return_value = sc.drop_duplicates(keep=keep, inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc[~expected])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep, expected",
|
||||
[
|
||||
("first", Series([False, False, True, True])),
|
||||
("last", Series([True, True, False, False])),
|
||||
(False, Series([True, True, True, True])),
|
||||
],
|
||||
)
|
||||
def test_drop_duplicates_bool(keep, expected):
|
||||
tc = Series([True, False, True, False])
|
||||
|
||||
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
|
||||
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
|
||||
sc = tc.copy()
|
||||
return_value = sc.drop_duplicates(keep=keep, inplace=True)
|
||||
tm.assert_series_equal(sc, tc[~expected])
|
||||
assert return_value is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("values", [[], list(range(5))])
|
||||
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
|
||||
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
|
||||
expected = Series([False] * len(tc), dtype="bool")
|
||||
|
||||
if tc.dtype == "bool":
|
||||
# 0 -> False and 1-> True
|
||||
# any other value would be duplicated
|
||||
tc = tc[:2]
|
||||
expected = expected[:2]
|
||||
|
||||
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
|
||||
|
||||
result_dropped = tc.drop_duplicates(keep=keep)
|
||||
tm.assert_series_equal(result_dropped, tc)
|
||||
|
||||
# validate shallow copy
|
||||
assert result_dropped is not tc
|
||||
|
||||
|
||||
class TestSeriesDropDuplicates:
|
||||
@pytest.fixture(
|
||||
params=["int_", "uint", "float64", "str_", "timedelta64[h]", "datetime64[D]"]
|
||||
)
|
||||
def dtype(self, request):
|
||||
return request.param
|
||||
|
||||
@pytest.fixture
|
||||
def cat_series_unused_category(self, dtype, ordered):
|
||||
# Test case 1
|
||||
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
|
||||
|
||||
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
|
||||
cat = Categorical(input1, categories=cat_array, ordered=ordered)
|
||||
tc1 = Series(cat)
|
||||
return tc1
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool(self, cat_series_unused_category):
|
||||
tc1 = cat_series_unused_category
|
||||
|
||||
expected = Series([False, False, False, True])
|
||||
|
||||
result = tc1.duplicated()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc1.drop_duplicates()
|
||||
tm.assert_series_equal(result, tc1[~expected])
|
||||
|
||||
sc = tc1.copy()
|
||||
return_value = sc.drop_duplicates(inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc1[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool_keeplast(
|
||||
self, cat_series_unused_category
|
||||
):
|
||||
tc1 = cat_series_unused_category
|
||||
|
||||
expected = Series([False, False, True, False])
|
||||
|
||||
result = tc1.duplicated(keep="last")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc1.drop_duplicates(keep="last")
|
||||
tm.assert_series_equal(result, tc1[~expected])
|
||||
|
||||
sc = tc1.copy()
|
||||
return_value = sc.drop_duplicates(keep="last", inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc1[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool_keepfalse(
|
||||
self, cat_series_unused_category
|
||||
):
|
||||
tc1 = cat_series_unused_category
|
||||
|
||||
expected = Series([False, False, True, True])
|
||||
|
||||
result = tc1.duplicated(keep=False)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc1.drop_duplicates(keep=False)
|
||||
tm.assert_series_equal(result, tc1[~expected])
|
||||
|
||||
sc = tc1.copy()
|
||||
return_value = sc.drop_duplicates(keep=False, inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc1[~expected])
|
||||
|
||||
@pytest.fixture
|
||||
def cat_series(self, dtype, ordered):
|
||||
# no unused categories, unlike cat_series_unused_category
|
||||
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
|
||||
|
||||
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
|
||||
cat = Categorical(input2, categories=cat_array, ordered=ordered)
|
||||
tc2 = Series(cat)
|
||||
return tc2
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool2(self, cat_series):
|
||||
tc2 = cat_series
|
||||
|
||||
expected = Series([False, False, False, False, True, True, False])
|
||||
|
||||
result = tc2.duplicated()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc2.drop_duplicates()
|
||||
tm.assert_series_equal(result, tc2[~expected])
|
||||
|
||||
sc = tc2.copy()
|
||||
return_value = sc.drop_duplicates(inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc2[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series):
|
||||
tc2 = cat_series
|
||||
|
||||
expected = Series([False, True, True, False, False, False, False])
|
||||
|
||||
result = tc2.duplicated(keep="last")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc2.drop_duplicates(keep="last")
|
||||
tm.assert_series_equal(result, tc2[~expected])
|
||||
|
||||
sc = tc2.copy()
|
||||
return_value = sc.drop_duplicates(keep="last", inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc2[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series):
|
||||
tc2 = cat_series
|
||||
|
||||
expected = Series([False, True, True, False, True, True, False])
|
||||
|
||||
result = tc2.duplicated(keep=False)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = tc2.drop_duplicates(keep=False)
|
||||
tm.assert_series_equal(result, tc2[~expected])
|
||||
|
||||
sc = tc2.copy()
|
||||
return_value = sc.drop_duplicates(keep=False, inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc2[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_bool(self, ordered):
|
||||
tc = Series(
|
||||
Categorical(
|
||||
[True, False, True, False], categories=[True, False], ordered=ordered
|
||||
)
|
||||
)
|
||||
|
||||
expected = Series([False, False, True, True])
|
||||
tm.assert_series_equal(tc.duplicated(), expected)
|
||||
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
|
||||
sc = tc.copy()
|
||||
return_value = sc.drop_duplicates(inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc[~expected])
|
||||
|
||||
expected = Series([True, True, False, False])
|
||||
tm.assert_series_equal(tc.duplicated(keep="last"), expected)
|
||||
tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])
|
||||
sc = tc.copy()
|
||||
return_value = sc.drop_duplicates(keep="last", inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc[~expected])
|
||||
|
||||
expected = Series([True, True, True, True])
|
||||
tm.assert_series_equal(tc.duplicated(keep=False), expected)
|
||||
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
|
||||
sc = tc.copy()
|
||||
return_value = sc.drop_duplicates(keep=False, inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(sc, tc[~expected])
|
||||
|
||||
def test_drop_duplicates_categorical_bool_na(self, nulls_fixture):
|
||||
# GH#44351
|
||||
ser = Series(
|
||||
Categorical(
|
||||
[True, False, True, False, nulls_fixture],
|
||||
categories=[True, False],
|
||||
ordered=True,
|
||||
)
|
||||
)
|
||||
result = ser.drop_duplicates()
|
||||
expected = Series(
|
||||
Categorical([True, False, np.nan], categories=[True, False], ordered=True),
|
||||
index=[0, 1, 4],
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_drop_duplicates_ignore_index(self):
|
||||
# GH#48304
|
||||
ser = Series([1, 2, 2, 3])
|
||||
result = ser.drop_duplicates(ignore_index=True)
|
||||
expected = Series([1, 2, 3])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_duplicated_arrow_dtype(self):
|
||||
pytest.importorskip("pyarrow")
|
||||
ser = Series([True, False, None, False], dtype="bool[pyarrow]")
|
||||
result = ser.drop_duplicates()
|
||||
expected = Series([True, False, None], dtype="bool[pyarrow]")
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,113 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
DatetimeIndex,
|
||||
IntervalIndex,
|
||||
NaT,
|
||||
Period,
|
||||
Series,
|
||||
Timestamp,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestDropna:
|
||||
def test_dropna_empty(self):
|
||||
ser = Series([], dtype=object)
|
||||
|
||||
assert len(ser.dropna()) == 0
|
||||
return_value = ser.dropna(inplace=True)
|
||||
assert return_value is None
|
||||
assert len(ser) == 0
|
||||
|
||||
# invalid axis
|
||||
msg = "No axis named 1 for object type Series"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.dropna(axis=1)
|
||||
|
||||
def test_dropna_preserve_name(self, datetime_series):
|
||||
datetime_series[:5] = np.nan
|
||||
result = datetime_series.dropna()
|
||||
assert result.name == datetime_series.name
|
||||
name = datetime_series.name
|
||||
ts = datetime_series.copy()
|
||||
return_value = ts.dropna(inplace=True)
|
||||
assert return_value is None
|
||||
assert ts.name == name
|
||||
|
||||
def test_dropna_no_nan(self):
|
||||
for ser in [
|
||||
Series([1, 2, 3], name="x"),
|
||||
Series([False, True, False], name="x"),
|
||||
]:
|
||||
result = ser.dropna()
|
||||
tm.assert_series_equal(result, ser)
|
||||
assert result is not ser
|
||||
|
||||
s2 = ser.copy()
|
||||
return_value = s2.dropna(inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(s2, ser)
|
||||
|
||||
def test_dropna_intervals(self):
|
||||
ser = Series(
|
||||
[np.nan, 1, 2, 3],
|
||||
IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),
|
||||
)
|
||||
|
||||
result = ser.dropna()
|
||||
expected = ser.iloc[1:]
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_dropna_period_dtype(self):
|
||||
# GH#13737
|
||||
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
|
||||
result = ser.dropna()
|
||||
expected = Series([Period("2011-01", freq="M")])
|
||||
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_datetime64_tz_dropna(self):
|
||||
# DatetimeLikeBlock
|
||||
ser = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
NaT,
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
NaT,
|
||||
]
|
||||
)
|
||||
result = ser.dropna()
|
||||
expected = Series(
|
||||
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")], index=[0, 2]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# DatetimeTZBlock
|
||||
idx = DatetimeIndex(
|
||||
["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz="Asia/Tokyo"
|
||||
)
|
||||
ser = Series(idx)
|
||||
assert ser.dtype == "datetime64[ns, Asia/Tokyo]"
|
||||
result = ser.dropna()
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
|
||||
Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),
|
||||
],
|
||||
index=[0, 2],
|
||||
)
|
||||
assert result.dtype == "datetime64[ns, Asia/Tokyo]"
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("val", [1, 1.5])
|
||||
def test_dropna_ignore_index(self, val):
|
||||
# GH#31725
|
||||
ser = Series([1, 2, val], index=[3, 2, 1])
|
||||
result = ser.dropna(ignore_index=True)
|
||||
expected = Series([1, 2, val])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
ser.dropna(ignore_index=True, inplace=True)
|
||||
tm.assert_series_equal(ser, expected)
|
@@ -0,0 +1,7 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TestSeriesDtypes:
|
||||
def test_dtype(self, datetime_series):
|
||||
assert datetime_series.dtype == np.dtype("float64")
|
||||
assert datetime_series.dtypes == np.dtype("float64")
|
@@ -0,0 +1,77 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas import (
|
||||
NA,
|
||||
Categorical,
|
||||
Series,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep, expected",
|
||||
[
|
||||
("first", Series([False, False, True, False, True], name="name")),
|
||||
("last", Series([True, True, False, False, False], name="name")),
|
||||
(False, Series([True, True, True, False, True], name="name")),
|
||||
],
|
||||
)
|
||||
def test_duplicated_keep(keep, expected):
|
||||
ser = Series(["a", "b", "b", "c", "a"], name="name")
|
||||
|
||||
result = ser.duplicated(keep=keep)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep, expected",
|
||||
[
|
||||
("first", Series([False, False, True, False, True])),
|
||||
("last", Series([True, True, False, False, False])),
|
||||
(False, Series([True, True, True, False, True])),
|
||||
],
|
||||
)
|
||||
def test_duplicated_nan_none(keep, expected):
|
||||
ser = Series([np.nan, 3, 3, None, np.nan], dtype=object)
|
||||
|
||||
result = ser.duplicated(keep=keep)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_duplicated_categorical_bool_na(nulls_fixture):
|
||||
# GH#44351
|
||||
ser = Series(
|
||||
Categorical(
|
||||
[True, False, True, False, nulls_fixture],
|
||||
categories=[True, False],
|
||||
ordered=True,
|
||||
)
|
||||
)
|
||||
result = ser.duplicated()
|
||||
expected = Series([False, False, True, True, False])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keep, vals",
|
||||
[
|
||||
("last", [True, True, False]),
|
||||
("first", [False, True, True]),
|
||||
(False, [True, True, True]),
|
||||
],
|
||||
)
|
||||
def test_duplicated_mask(keep, vals):
|
||||
# GH#48150
|
||||
ser = Series([1, 2, NA, NA, NA], dtype="Int64")
|
||||
result = ser.duplicated(keep=keep)
|
||||
expected = Series([False, False] + vals)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_duplicated_mask_no_duplicated_na(keep):
|
||||
# GH#48150
|
||||
ser = Series([1, 2, NA], dtype="Int64")
|
||||
result = ser.duplicated(keep=keep)
|
||||
expected = Series([False, False, False])
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,143 @@
|
||||
from contextlib import nullcontext
|
||||
import copy
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pandas._libs.missing import is_matching_na
|
||||
from pandas.compat.numpy import np_version_gte1p25
|
||||
|
||||
from pandas.core.dtypes.common import is_float
|
||||
|
||||
from pandas import (
|
||||
Index,
|
||||
MultiIndex,
|
||||
Series,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"arr, idx",
|
||||
[
|
||||
([1, 2, 3, 4], [0, 2, 1, 3]),
|
||||
([1, np.nan, 3, np.nan], [0, 2, 1, 3]),
|
||||
(
|
||||
[1, np.nan, 3, np.nan],
|
||||
MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_equals(arr, idx):
|
||||
s1 = Series(arr, index=idx)
|
||||
s2 = s1.copy()
|
||||
assert s1.equals(s2)
|
||||
|
||||
s1[1] = 9
|
||||
assert not s1.equals(s2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]
|
||||
)
|
||||
def test_equals_list_array(val):
|
||||
# GH20676 Verify equals operator for list of Numpy arrays
|
||||
arr = np.array([1, 2])
|
||||
s1 = Series([arr, arr])
|
||||
s2 = s1.copy()
|
||||
assert s1.equals(s2)
|
||||
|
||||
s1[1] = val
|
||||
|
||||
cm = (
|
||||
tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
|
||||
if isinstance(val, str) and not np_version_gte1p25
|
||||
else nullcontext()
|
||||
)
|
||||
with cm:
|
||||
assert not s1.equals(s2)
|
||||
|
||||
|
||||
def test_equals_false_negative():
|
||||
# GH8437 Verify false negative behavior of equals function for dtype object
|
||||
arr = [False, np.nan]
|
||||
s1 = Series(arr)
|
||||
s2 = s1.copy()
|
||||
s3 = Series(index=range(2), dtype=object)
|
||||
s4 = s3.copy()
|
||||
s5 = s3.copy()
|
||||
s6 = s3.copy()
|
||||
|
||||
s3[:-1] = s4[:-1] = s5[0] = s6[0] = False
|
||||
assert s1.equals(s1)
|
||||
assert s1.equals(s2)
|
||||
assert s1.equals(s3)
|
||||
assert s1.equals(s4)
|
||||
assert s1.equals(s5)
|
||||
assert s5.equals(s6)
|
||||
|
||||
|
||||
def test_equals_matching_nas():
|
||||
# matching but not identical NAs
|
||||
left = Series([np.datetime64("NaT")], dtype=object)
|
||||
right = Series([np.datetime64("NaT")], dtype=object)
|
||||
assert left.equals(right)
|
||||
assert Index(left).equals(Index(right))
|
||||
assert left.array.equals(right.array)
|
||||
|
||||
left = Series([np.timedelta64("NaT")], dtype=object)
|
||||
right = Series([np.timedelta64("NaT")], dtype=object)
|
||||
assert left.equals(right)
|
||||
assert Index(left).equals(Index(right))
|
||||
assert left.array.equals(right.array)
|
||||
|
||||
left = Series([np.float64("NaN")], dtype=object)
|
||||
right = Series([np.float64("NaN")], dtype=object)
|
||||
assert left.equals(right)
|
||||
assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype))
|
||||
assert left.array.equals(right.array)
|
||||
|
||||
|
||||
def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2):
|
||||
# GH#39650
|
||||
left = nulls_fixture
|
||||
right = nulls_fixture2
|
||||
if hasattr(right, "copy"):
|
||||
right = right.copy()
|
||||
else:
|
||||
right = copy.copy(right)
|
||||
|
||||
ser = Series([left], dtype=object)
|
||||
ser2 = Series([right], dtype=object)
|
||||
|
||||
if is_matching_na(left, right):
|
||||
assert ser.equals(ser2)
|
||||
elif (left is None and is_float(right)) or (right is None and is_float(left)):
|
||||
assert ser.equals(ser2)
|
||||
else:
|
||||
assert not ser.equals(ser2)
|
||||
|
||||
|
||||
def test_equals_none_vs_nan():
|
||||
# GH#39650
|
||||
ser = Series([1, None], dtype=object)
|
||||
ser2 = Series([1, np.nan], dtype=object)
|
||||
|
||||
assert ser.equals(ser2)
|
||||
assert Index(ser, dtype=ser.dtype).equals(Index(ser2, dtype=ser2.dtype))
|
||||
assert ser.array.equals(ser2.array)
|
||||
|
||||
|
||||
def test_equals_None_vs_float():
|
||||
# GH#44190
|
||||
left = Series([-np.inf, np.nan, -1.0, 0.0, 1.0, 10 / 3, np.inf], dtype=object)
|
||||
right = Series([None] * len(left))
|
||||
|
||||
# these series were found to be equal due to a bug, check that they are correctly
|
||||
# found to not equal
|
||||
assert not left.equals(right)
|
||||
assert not right.equals(left)
|
||||
assert not left.to_frame().equals(right.to_frame())
|
||||
assert not right.to_frame().equals(left.to_frame())
|
||||
assert not Index(left, dtype="object").equals(Index(right, dtype="object"))
|
||||
assert not Index(right, dtype="object").equals(Index(left, dtype="object"))
|
@@ -0,0 +1,165 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas as pd
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
def test_basic():
|
||||
s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")
|
||||
result = s.explode()
|
||||
expected = pd.Series(
|
||||
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_mixed_type():
|
||||
s = pd.Series(
|
||||
[[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"
|
||||
)
|
||||
result = s.explode()
|
||||
expected = pd.Series(
|
||||
[0, 1, 2, np.nan, None, np.nan, "a", "b"],
|
||||
index=[0, 0, 0, 1, 2, 3, 4, 4],
|
||||
dtype=object,
|
||||
name="foo",
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_empty():
|
||||
s = pd.Series(dtype=object)
|
||||
result = s.explode()
|
||||
expected = s.copy()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_nested_lists():
|
||||
s = pd.Series([[[1, 2, 3]], [1, 2], 1])
|
||||
result = s.explode()
|
||||
expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_multi_index():
|
||||
s = pd.Series(
|
||||
[[0, 1, 2], np.nan, [], (3, 4)],
|
||||
name="foo",
|
||||
index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),
|
||||
)
|
||||
result = s.explode()
|
||||
index = pd.MultiIndex.from_tuples(
|
||||
[("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],
|
||||
names=["foo", "bar"],
|
||||
)
|
||||
expected = pd.Series(
|
||||
[0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_large():
|
||||
s = pd.Series([range(256)]).explode()
|
||||
result = s.explode()
|
||||
tm.assert_series_equal(result, s)
|
||||
|
||||
|
||||
def test_invert_array():
|
||||
df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})
|
||||
|
||||
listify = df.apply(lambda x: x.array, axis=1)
|
||||
result = listify.explode()
|
||||
tm.assert_series_equal(result, df["a"].rename())
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]
|
||||
)
|
||||
def test_non_object_dtype(s):
|
||||
result = s.explode()
|
||||
tm.assert_series_equal(result, s)
|
||||
|
||||
|
||||
def test_typical_usecase():
|
||||
df = pd.DataFrame(
|
||||
[{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],
|
||||
columns=["var1", "var2"],
|
||||
)
|
||||
exploded = df.var1.str.split(",").explode()
|
||||
result = df[["var2"]].join(exploded)
|
||||
expected = pd.DataFrame(
|
||||
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
|
||||
columns=["var2", "var1"],
|
||||
index=[0, 0, 0, 1, 1, 1],
|
||||
)
|
||||
tm.assert_frame_equal(result, expected)
|
||||
|
||||
|
||||
def test_nested_EA():
|
||||
# a nested EA array
|
||||
s = pd.Series(
|
||||
[
|
||||
pd.date_range("20170101", periods=3, tz="UTC"),
|
||||
pd.date_range("20170104", periods=3, tz="UTC"),
|
||||
]
|
||||
)
|
||||
result = s.explode()
|
||||
expected = pd.Series(
|
||||
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_duplicate_index():
|
||||
# GH 28005
|
||||
s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
|
||||
result = s.explode()
|
||||
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_ignore_index():
|
||||
# GH 34932
|
||||
s = pd.Series([[1, 2], [3, 4]])
|
||||
result = s.explode(ignore_index=True)
|
||||
expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_explode_sets():
|
||||
# https://github.com/pandas-dev/pandas/issues/35614
|
||||
s = pd.Series([{"a", "b", "c"}], index=[1])
|
||||
result = s.explode().sort_values()
|
||||
expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
def test_explode_scalars_can_ignore_index():
|
||||
# https://github.com/pandas-dev/pandas/issues/40487
|
||||
s = pd.Series([1, 2, 3], index=["a", "b", "c"])
|
||||
result = s.explode(ignore_index=True)
|
||||
expected = pd.Series([1, 2, 3])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ignore_index", [True, False])
|
||||
def test_explode_pyarrow_list_type(ignore_index):
|
||||
# GH 53602
|
||||
pa = pytest.importorskip("pyarrow")
|
||||
|
||||
data = [
|
||||
[None, None],
|
||||
[1],
|
||||
[],
|
||||
[2, 3],
|
||||
None,
|
||||
]
|
||||
ser = pd.Series(data, dtype=pd.ArrowDtype(pa.list_(pa.int64())))
|
||||
result = ser.explode(ignore_index=ignore_index)
|
||||
expected = pd.Series(
|
||||
data=[None, None, 1, None, 2, 3, None],
|
||||
index=None if ignore_index else [0, 0, 1, 2, 3, 3, 4],
|
||||
dtype=pd.ArrowDtype(pa.int64()),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,999 @@
|
||||
from datetime import (
|
||||
datetime,
|
||||
timedelta,
|
||||
timezone,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import pytz
|
||||
|
||||
from pandas import (
|
||||
Categorical,
|
||||
DataFrame,
|
||||
DatetimeIndex,
|
||||
NaT,
|
||||
Period,
|
||||
Series,
|
||||
Timedelta,
|
||||
Timestamp,
|
||||
date_range,
|
||||
isna,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
from pandas.core.arrays import period_array
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings(
|
||||
"ignore:(Series|DataFrame).fillna with 'method' is deprecated:FutureWarning"
|
||||
)
|
||||
class TestSeriesFillNA:
|
||||
def test_fillna_nat(self):
|
||||
series = Series([0, 1, 2, NaT._value], dtype="M8[ns]")
|
||||
|
||||
filled = series.fillna(method="pad")
|
||||
filled2 = series.fillna(value=series.values[2])
|
||||
|
||||
expected = series.copy()
|
||||
expected.iloc[3] = expected.iloc[2]
|
||||
|
||||
tm.assert_series_equal(filled, expected)
|
||||
tm.assert_series_equal(filled2, expected)
|
||||
|
||||
df = DataFrame({"A": series})
|
||||
filled = df.fillna(method="pad")
|
||||
filled2 = df.fillna(value=series.values[2])
|
||||
expected = DataFrame({"A": expected})
|
||||
tm.assert_frame_equal(filled, expected)
|
||||
tm.assert_frame_equal(filled2, expected)
|
||||
|
||||
series = Series([NaT._value, 0, 1, 2], dtype="M8[ns]")
|
||||
|
||||
filled = series.fillna(method="bfill")
|
||||
filled2 = series.fillna(value=series[1])
|
||||
|
||||
expected = series.copy()
|
||||
expected[0] = expected[1]
|
||||
|
||||
tm.assert_series_equal(filled, expected)
|
||||
tm.assert_series_equal(filled2, expected)
|
||||
|
||||
df = DataFrame({"A": series})
|
||||
filled = df.fillna(method="bfill")
|
||||
filled2 = df.fillna(value=series[1])
|
||||
expected = DataFrame({"A": expected})
|
||||
tm.assert_frame_equal(filled, expected)
|
||||
tm.assert_frame_equal(filled2, expected)
|
||||
|
||||
def test_fillna_value_or_method(self, datetime_series):
|
||||
msg = "Cannot specify both 'value' and 'method'"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
datetime_series.fillna(value=0, method="ffill")
|
||||
|
||||
def test_fillna(self):
|
||||
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
|
||||
|
||||
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
|
||||
|
||||
ts.iloc[2] = np.nan
|
||||
|
||||
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
|
||||
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
|
||||
|
||||
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
|
||||
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
|
||||
|
||||
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
|
||||
tm.assert_series_equal(ts.fillna(value=5), exp)
|
||||
|
||||
msg = "Must specify a fill 'value' or 'method'"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ts.fillna()
|
||||
|
||||
def test_fillna_nonscalar(self):
|
||||
# GH#5703
|
||||
s1 = Series([np.nan])
|
||||
s2 = Series([1])
|
||||
result = s1.fillna(s2)
|
||||
expected = Series([1.0])
|
||||
tm.assert_series_equal(result, expected)
|
||||
result = s1.fillna({})
|
||||
tm.assert_series_equal(result, s1)
|
||||
result = s1.fillna(Series((), dtype=object))
|
||||
tm.assert_series_equal(result, s1)
|
||||
result = s2.fillna(s1)
|
||||
tm.assert_series_equal(result, s2)
|
||||
result = s1.fillna({0: 1})
|
||||
tm.assert_series_equal(result, expected)
|
||||
result = s1.fillna({1: 1})
|
||||
tm.assert_series_equal(result, Series([np.nan]))
|
||||
result = s1.fillna({0: 1, 1: 1})
|
||||
tm.assert_series_equal(result, expected)
|
||||
result = s1.fillna(Series({0: 1, 1: 1}))
|
||||
tm.assert_series_equal(result, expected)
|
||||
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
|
||||
tm.assert_series_equal(result, s1)
|
||||
|
||||
def test_fillna_aligns(self):
|
||||
s1 = Series([0, 1, 2], list("abc"))
|
||||
s2 = Series([0, np.nan, 2], list("bac"))
|
||||
result = s2.fillna(s1)
|
||||
expected = Series([0, 0, 2.0], list("bac"))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_limit(self):
|
||||
ser = Series(np.nan, index=[0, 1, 2])
|
||||
result = ser.fillna(999, limit=1)
|
||||
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.fillna(999, limit=2)
|
||||
expected = Series([999, 999, np.nan], index=[0, 1, 2])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_dont_cast_strings(self):
|
||||
# GH#9043
|
||||
# make sure a string representation of int/float values can be filled
|
||||
# correctly without raising errors or being converted
|
||||
vals = ["0", "1.5", "-0.3"]
|
||||
for val in vals:
|
||||
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
|
||||
result = ser.fillna(val)
|
||||
expected = Series([0, 1, val, val, 4], dtype="object")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_consistency(self):
|
||||
# GH#16402
|
||||
# fillna with a tz aware to a tz-naive, should result in object
|
||||
|
||||
ser = Series([Timestamp("20130101"), NaT])
|
||||
|
||||
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
|
||||
expected = Series(
|
||||
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
|
||||
dtype="object",
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# with a non-datetime
|
||||
result = ser.fillna("foo")
|
||||
expected = Series([Timestamp("20130101"), "foo"])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# assignment
|
||||
ser2 = ser.copy()
|
||||
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
|
||||
ser2[1] = "foo"
|
||||
tm.assert_series_equal(ser2, expected)
|
||||
|
||||
def test_fillna_downcast(self):
|
||||
# GH#15277
|
||||
# infer int64 from float64
|
||||
ser = Series([1.0, np.nan])
|
||||
msg = "The 'downcast' keyword in fillna is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = ser.fillna(0, downcast="infer")
|
||||
expected = Series([1, 0])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# infer int64 from float64 when fillna value is a dict
|
||||
ser = Series([1.0, np.nan])
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = ser.fillna({1: 0}, downcast="infer")
|
||||
expected = Series([1, 0])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_downcast_infer_objects_to_numeric(self):
|
||||
# GH#44241 if we have object-dtype, 'downcast="infer"' should
|
||||
# _actually_ infer
|
||||
|
||||
arr = np.arange(5).astype(object)
|
||||
arr[3] = np.nan
|
||||
|
||||
ser = Series(arr)
|
||||
|
||||
msg = "The 'downcast' keyword in fillna is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.fillna(3, downcast="infer")
|
||||
expected = Series(np.arange(5), dtype=np.int64)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
msg = "The 'downcast' keyword in ffill is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.ffill(downcast="infer")
|
||||
expected = Series([0, 1, 2, 2, 4], dtype=np.int64)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
msg = "The 'downcast' keyword in bfill is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.bfill(downcast="infer")
|
||||
expected = Series([0, 1, 2, 4, 4], dtype=np.int64)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
# with a non-round float present, we will downcast to float64
|
||||
ser[2] = 2.5
|
||||
|
||||
expected = Series([0, 1, 2.5, 3, 4], dtype=np.float64)
|
||||
msg = "The 'downcast' keyword in fillna is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.fillna(3, downcast="infer")
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
msg = "The 'downcast' keyword in ffill is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.ffill(downcast="infer")
|
||||
expected = Series([0, 1, 2.5, 2.5, 4], dtype=np.float64)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
msg = "The 'downcast' keyword in bfill is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
res = ser.bfill(downcast="infer")
|
||||
expected = Series([0, 1, 2.5, 4, 4], dtype=np.float64)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
def test_timedelta_fillna(self, frame_or_series):
|
||||
# GH#3371
|
||||
ser = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130102"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
td = ser.diff()
|
||||
obj = frame_or_series(td)
|
||||
|
||||
# reg fillna
|
||||
result = obj.fillna(Timedelta(seconds=0))
|
||||
expected = Series(
|
||||
[
|
||||
timedelta(0),
|
||||
timedelta(0),
|
||||
timedelta(1),
|
||||
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
|
||||
]
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
# GH#45746 pre-1.? ints were interpreted as seconds. then that was
|
||||
# deprecated and changed to raise. In 2.0 it casts to common dtype,
|
||||
# consistent with every other dtype's behavior
|
||||
res = obj.fillna(1)
|
||||
expected = obj.astype(object).fillna(1)
|
||||
tm.assert_equal(res, expected)
|
||||
|
||||
result = obj.fillna(Timedelta(seconds=1))
|
||||
expected = Series(
|
||||
[
|
||||
timedelta(seconds=1),
|
||||
timedelta(0),
|
||||
timedelta(1),
|
||||
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
|
||||
]
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
result = obj.fillna(timedelta(days=1, seconds=1))
|
||||
expected = Series(
|
||||
[
|
||||
timedelta(days=1, seconds=1),
|
||||
timedelta(0),
|
||||
timedelta(1),
|
||||
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
|
||||
]
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
result = obj.fillna(np.timedelta64(10**9))
|
||||
expected = Series(
|
||||
[
|
||||
timedelta(seconds=1),
|
||||
timedelta(0),
|
||||
timedelta(1),
|
||||
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
|
||||
]
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
result = obj.fillna(NaT)
|
||||
expected = Series(
|
||||
[
|
||||
NaT,
|
||||
timedelta(0),
|
||||
timedelta(1),
|
||||
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
|
||||
],
|
||||
dtype="m8[ns]",
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
# ffill
|
||||
td[2] = np.nan
|
||||
obj = frame_or_series(td)
|
||||
result = obj.ffill()
|
||||
expected = td.fillna(Timedelta(seconds=0))
|
||||
expected[0] = np.nan
|
||||
expected = frame_or_series(expected)
|
||||
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
# bfill
|
||||
td[2] = np.nan
|
||||
obj = frame_or_series(td)
|
||||
result = obj.bfill()
|
||||
expected = td.fillna(Timedelta(seconds=0))
|
||||
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
def test_datetime64_fillna(self):
|
||||
ser = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130102"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
ser[2] = np.nan
|
||||
|
||||
# ffill
|
||||
result = ser.ffill()
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# bfill
|
||||
result = ser.bfill()
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_datetime64_fillna_backfill(self):
|
||||
# GH#6587
|
||||
# make sure that we are treating as integer when filling
|
||||
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"], dtype="M8[ns]")
|
||||
|
||||
expected = Series(
|
||||
[
|
||||
"2013-08-05 15:30:00.000001",
|
||||
"2013-08-05 15:30:00.000001",
|
||||
"2013-08-05 15:30:00.000001",
|
||||
],
|
||||
dtype="M8[ns]",
|
||||
)
|
||||
result = ser.fillna(method="backfill")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
|
||||
def test_datetime64_tz_fillna(self, tz):
|
||||
# DatetimeLikeBlock
|
||||
ser = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
NaT,
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
NaT,
|
||||
]
|
||||
)
|
||||
null_loc = Series([False, True, False, True])
|
||||
|
||||
result = ser.fillna(Timestamp("2011-01-02 10:00"))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
Timestamp("2011-01-02 10:00"),
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
Timestamp("2011-01-02 10:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
# check s is not changed
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
Timestamp("2011-01-02 10:00", tz=tz),
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
Timestamp("2011-01-02 10:00", tz=tz),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna("AAA")
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
"AAA",
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
"AAA",
|
||||
],
|
||||
dtype=object,
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(
|
||||
{
|
||||
1: Timestamp("2011-01-02 10:00", tz=tz),
|
||||
3: Timestamp("2011-01-04 10:00"),
|
||||
}
|
||||
)
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
Timestamp("2011-01-02 10:00", tz=tz),
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
Timestamp("2011-01-04 10:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(
|
||||
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
|
||||
)
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00"),
|
||||
Timestamp("2011-01-02 10:00"),
|
||||
Timestamp("2011-01-03 10:00"),
|
||||
Timestamp("2011-01-04 10:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
# DatetimeTZBlock
|
||||
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
|
||||
ser = Series(idx)
|
||||
assert ser.dtype == f"datetime64[ns, {tz}]"
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(Timestamp("2011-01-02 10:00"))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
Timestamp("2011-01-02 10:00"),
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
Timestamp("2011-01-02 10:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
|
||||
idx = DatetimeIndex(
|
||||
[
|
||||
"2011-01-01 10:00",
|
||||
"2011-01-02 10:00",
|
||||
"2011-01-03 10:00",
|
||||
"2011-01-02 10:00",
|
||||
],
|
||||
tz=tz,
|
||||
)
|
||||
expected = Series(idx)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
|
||||
idx = DatetimeIndex(
|
||||
[
|
||||
"2011-01-01 10:00",
|
||||
"2011-01-02 10:00",
|
||||
"2011-01-03 10:00",
|
||||
"2011-01-02 10:00",
|
||||
],
|
||||
tz=tz,
|
||||
)
|
||||
expected = Series(idx)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna("AAA")
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
"AAA",
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
"AAA",
|
||||
],
|
||||
dtype=object,
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(
|
||||
{
|
||||
1: Timestamp("2011-01-02 10:00", tz=tz),
|
||||
3: Timestamp("2011-01-04 10:00"),
|
||||
}
|
||||
)
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
Timestamp("2011-01-02 10:00", tz=tz),
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
Timestamp("2011-01-04 10:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
result = ser.fillna(
|
||||
{
|
||||
1: Timestamp("2011-01-02 10:00", tz=tz),
|
||||
3: Timestamp("2011-01-04 10:00", tz=tz),
|
||||
}
|
||||
)
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
Timestamp("2011-01-02 10:00", tz=tz),
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
Timestamp("2011-01-04 10:00", tz=tz),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
# filling with a naive/other zone, coerce to object
|
||||
result = ser.fillna(Timestamp("20130101"))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
Timestamp("2013-01-01"),
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
Timestamp("2013-01-01"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
# pre-2.0 fillna with mixed tzs would cast to object, in 2.0
|
||||
# it retains dtype.
|
||||
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("2011-01-01 10:00", tz=tz),
|
||||
Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz),
|
||||
Timestamp("2011-01-03 10:00", tz=tz),
|
||||
Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(expected, result)
|
||||
tm.assert_series_equal(isna(ser), null_loc)
|
||||
|
||||
def test_fillna_dt64tz_with_method(self):
|
||||
# with timezone
|
||||
# GH#15855
|
||||
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
|
||||
exp = Series(
|
||||
[
|
||||
Timestamp("2012-11-11 00:00:00+01:00"),
|
||||
Timestamp("2012-11-11 00:00:00+01:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(ser.fillna(method="pad"), exp)
|
||||
|
||||
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
|
||||
exp = Series(
|
||||
[
|
||||
Timestamp("2012-11-11 00:00:00+01:00"),
|
||||
Timestamp("2012-11-11 00:00:00+01:00"),
|
||||
]
|
||||
)
|
||||
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
|
||||
|
||||
def test_fillna_pytimedelta(self):
|
||||
# GH#8209
|
||||
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
|
||||
|
||||
result = ser.fillna(timedelta(1))
|
||||
expected = Series(Timedelta("1 days"), index=["A", "B"])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_period(self):
|
||||
# GH#13737
|
||||
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
|
||||
|
||||
res = ser.fillna(Period("2012-01", freq="M"))
|
||||
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
|
||||
tm.assert_series_equal(res, exp)
|
||||
assert res.dtype == "Period[M]"
|
||||
|
||||
def test_fillna_dt64_timestamp(self, frame_or_series):
|
||||
ser = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130102"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
ser[2] = np.nan
|
||||
obj = frame_or_series(ser)
|
||||
|
||||
# reg fillna
|
||||
result = obj.fillna(Timestamp("20130104"))
|
||||
expected = Series(
|
||||
[
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130101"),
|
||||
Timestamp("20130104"),
|
||||
Timestamp("20130103 9:01:01"),
|
||||
]
|
||||
)
|
||||
expected = frame_or_series(expected)
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
result = obj.fillna(NaT)
|
||||
expected = obj
|
||||
tm.assert_equal(result, expected)
|
||||
|
||||
def test_fillna_dt64_non_nao(self):
|
||||
# GH#27419
|
||||
ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
|
||||
val = np.datetime64("1975-04-05", "ms")
|
||||
|
||||
result = ser.fillna(val)
|
||||
expected = Series(
|
||||
[Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_numeric_inplace(self):
|
||||
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
|
||||
y = x.copy()
|
||||
|
||||
return_value = y.fillna(value=0, inplace=True)
|
||||
assert return_value is None
|
||||
|
||||
expected = x.fillna(value=0)
|
||||
tm.assert_series_equal(y, expected)
|
||||
|
||||
# ---------------------------------------------------------------
|
||||
# CategoricalDtype
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"fill_value, expected_output",
|
||||
[
|
||||
("a", ["a", "a", "b", "a", "a"]),
|
||||
({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
|
||||
({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
|
||||
({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
|
||||
(Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
|
||||
(Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
|
||||
(Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
|
||||
(Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
|
||||
],
|
||||
)
|
||||
def test_fillna_categorical(self, fill_value, expected_output):
|
||||
# GH#17033
|
||||
# Test fillna for a Categorical series
|
||||
data = ["a", np.nan, "b", np.nan, np.nan]
|
||||
ser = Series(Categorical(data, categories=["a", "b"]))
|
||||
exp = Series(Categorical(expected_output, categories=["a", "b"]))
|
||||
result = ser.fillna(fill_value)
|
||||
tm.assert_series_equal(result, exp)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"fill_value, expected_output",
|
||||
[
|
||||
(Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
|
||||
(Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
|
||||
(
|
||||
Series(
|
||||
Categorical(
|
||||
["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
|
||||
)
|
||||
),
|
||||
["a", "d", "b", "d", "a"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
|
||||
# GH#26215
|
||||
data = ["a", np.nan, "b", np.nan, np.nan]
|
||||
ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
|
||||
exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
|
||||
result = ser.fillna(fill_value)
|
||||
tm.assert_series_equal(result, exp)
|
||||
|
||||
def test_fillna_categorical_raises(self):
|
||||
data = ["a", np.nan, "b", np.nan, np.nan]
|
||||
ser = Series(Categorical(data, categories=["a", "b"]))
|
||||
cat = ser._values
|
||||
|
||||
msg = "Cannot setitem on a Categorical with a new category"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna("d")
|
||||
|
||||
msg2 = "Length of 'value' does not match."
|
||||
with pytest.raises(ValueError, match=msg2):
|
||||
cat.fillna(Series("d"))
|
||||
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna({1: "d", 3: "a"})
|
||||
|
||||
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna(["a", "b"])
|
||||
|
||||
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna(("a", "b"))
|
||||
|
||||
msg = (
|
||||
'"value" parameter must be a scalar, dict '
|
||||
'or Series, but you passed a "DataFrame"'
|
||||
)
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))
|
||||
|
||||
@pytest.mark.parametrize("dtype", [float, "float32", "float64"])
|
||||
@pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES)
|
||||
@pytest.mark.parametrize("scalar", [True, False])
|
||||
def test_fillna_float_casting(self, dtype, fill_type, scalar):
|
||||
# GH-43424
|
||||
ser = Series([np.nan, 1.2], dtype=dtype)
|
||||
fill_values = Series([2, 2], dtype=fill_type)
|
||||
if scalar:
|
||||
fill_values = fill_values.dtype.type(2)
|
||||
|
||||
result = ser.fillna(fill_values)
|
||||
expected = Series([2.0, 1.2], dtype=dtype)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
ser = Series([np.nan, 1.2], dtype=dtype)
|
||||
mask = ser.isna().to_numpy()
|
||||
ser[mask] = fill_values
|
||||
tm.assert_series_equal(ser, expected)
|
||||
|
||||
ser = Series([np.nan, 1.2], dtype=dtype)
|
||||
ser.mask(mask, fill_values, inplace=True)
|
||||
tm.assert_series_equal(ser, expected)
|
||||
|
||||
ser = Series([np.nan, 1.2], dtype=dtype)
|
||||
res = ser.where(~mask, fill_values)
|
||||
tm.assert_series_equal(res, expected)
|
||||
|
||||
def test_fillna_f32_upcast_with_dict(self):
|
||||
# GH-43424
|
||||
ser = Series([np.nan, 1.2], dtype=np.float32)
|
||||
result = ser.fillna({0: 1})
|
||||
expected = Series([1.0, 1.2], dtype=np.float32)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# ---------------------------------------------------------------
|
||||
# Invalid Usages
|
||||
|
||||
def test_fillna_invalid_method(self, datetime_series):
|
||||
try:
|
||||
datetime_series.fillna(method="ffil")
|
||||
except ValueError as inst:
|
||||
assert "ffil" in str(inst)
|
||||
|
||||
def test_fillna_listlike_invalid(self):
|
||||
ser = Series(np.random.default_rng(2).integers(-100, 100, 50))
|
||||
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna([1, 2])
|
||||
|
||||
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
ser.fillna((1, 2))
|
||||
|
||||
def test_fillna_method_and_limit_invalid(self):
|
||||
# related GH#9217, make sure limit is an int and greater than 0
|
||||
ser = Series([1, 2, 3, None])
|
||||
msg = "|".join(
|
||||
[
|
||||
r"Cannot specify both 'value' and 'method'\.",
|
||||
"Limit must be greater than 0",
|
||||
"Limit must be an integer",
|
||||
]
|
||||
)
|
||||
for limit in [-1, 0, 1.0, 2.0]:
|
||||
for method in ["backfill", "bfill", "pad", "ffill", None]:
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
ser.fillna(1, limit=limit, method=method)
|
||||
|
||||
def test_fillna_datetime64_with_timezone_tzinfo(self):
|
||||
# https://github.com/pandas-dev/pandas/issues/38851
|
||||
# different tzinfos representing UTC treated as equal
|
||||
ser = Series(date_range("2020", periods=3, tz="UTC"))
|
||||
expected = ser.copy()
|
||||
ser[1] = NaT
|
||||
result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# pre-2.0 we cast to object with mixed tzs, in 2.0 we retain dtype
|
||||
ts = Timestamp("2000-01-01", tz="US/Pacific")
|
||||
ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))
|
||||
assert ser2.dtype.kind == "M"
|
||||
result = ser2.fillna(ts)
|
||||
expected = Series(
|
||||
[ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],
|
||||
dtype=ser2.dtype,
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input, input_fillna, expected_data, expected_categories",
|
||||
[
|
||||
(["A", "B", None, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]),
|
||||
(["A", "B", np.nan, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]),
|
||||
],
|
||||
)
|
||||
def test_fillna_categorical_accept_same_type(
|
||||
self, input, input_fillna, expected_data, expected_categories
|
||||
):
|
||||
# GH32414
|
||||
cat = Categorical(input)
|
||||
ser = Series(cat).fillna(input_fillna)
|
||||
filled = cat.fillna(ser)
|
||||
result = cat.fillna(filled)
|
||||
expected = Categorical(expected_data, categories=expected_categories)
|
||||
tm.assert_categorical_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings(
|
||||
"ignore:Series.fillna with 'method' is deprecated:FutureWarning"
|
||||
)
|
||||
class TestFillnaPad:
|
||||
def test_fillna_bug(self):
|
||||
ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
|
||||
filled = ser.fillna(method="ffill")
|
||||
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)
|
||||
tm.assert_series_equal(filled, expected)
|
||||
|
||||
filled = ser.fillna(method="bfill")
|
||||
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)
|
||||
tm.assert_series_equal(filled, expected)
|
||||
|
||||
def test_ffill(self):
|
||||
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
|
||||
ts.iloc[2] = np.nan
|
||||
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
|
||||
|
||||
def test_ffill_mixed_dtypes_without_missing_data(self):
|
||||
# GH#14956
|
||||
series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
|
||||
result = series.ffill()
|
||||
tm.assert_series_equal(series, result)
|
||||
|
||||
def test_bfill(self):
|
||||
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
|
||||
ts.iloc[2] = np.nan
|
||||
tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
|
||||
|
||||
def test_pad_nan(self):
|
||||
x = Series(
|
||||
[np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float
|
||||
)
|
||||
|
||||
return_value = x.fillna(method="pad", inplace=True)
|
||||
assert return_value is None
|
||||
|
||||
expected = Series(
|
||||
[np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float
|
||||
)
|
||||
tm.assert_series_equal(x[1:], expected[1:])
|
||||
assert np.isnan(x.iloc[0]), np.isnan(expected.iloc[0])
|
||||
|
||||
def test_series_fillna_limit(self):
|
||||
index = np.arange(10)
|
||||
s = Series(np.random.default_rng(2).standard_normal(10), index=index)
|
||||
|
||||
result = s[:2].reindex(index)
|
||||
result = result.fillna(method="pad", limit=5)
|
||||
|
||||
expected = s[:2].reindex(index).fillna(method="pad")
|
||||
expected[-3:] = np.nan
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = s[-2:].reindex(index)
|
||||
result = result.fillna(method="bfill", limit=5)
|
||||
|
||||
expected = s[-2:].reindex(index).fillna(method="backfill")
|
||||
expected[:3] = np.nan
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_series_pad_backfill_limit(self):
|
||||
index = np.arange(10)
|
||||
s = Series(np.random.default_rng(2).standard_normal(10), index=index)
|
||||
|
||||
result = s[:2].reindex(index, method="pad", limit=5)
|
||||
|
||||
expected = s[:2].reindex(index).fillna(method="pad")
|
||||
expected[-3:] = np.nan
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = s[-2:].reindex(index, method="backfill", limit=5)
|
||||
|
||||
expected = s[-2:].reindex(index).fillna(method="backfill")
|
||||
expected[:3] = np.nan
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_fillna_int(self):
|
||||
ser = Series(np.random.default_rng(2).integers(-100, 100, 50))
|
||||
return_value = ser.fillna(method="ffill", inplace=True)
|
||||
assert return_value is None
|
||||
tm.assert_series_equal(ser.fillna(method="ffill", inplace=False), ser)
|
||||
|
||||
def test_datetime64tz_fillna_round_issue(self):
|
||||
# GH#14872
|
||||
|
||||
data = Series(
|
||||
[NaT, NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)]
|
||||
)
|
||||
|
||||
filled = data.bfill()
|
||||
|
||||
expected = Series(
|
||||
[
|
||||
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
|
||||
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
|
||||
datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),
|
||||
]
|
||||
)
|
||||
|
||||
tm.assert_series_equal(filled, expected)
|
||||
|
||||
def test_fillna_parr(self):
|
||||
# GH-24537
|
||||
dti = date_range(
|
||||
Timestamp.max - Timedelta(nanoseconds=10), periods=5, freq="ns"
|
||||
)
|
||||
ser = Series(dti.to_period("ns"))
|
||||
ser[2] = NaT
|
||||
arr = period_array(
|
||||
[
|
||||
Timestamp("2262-04-11 23:47:16.854775797"),
|
||||
Timestamp("2262-04-11 23:47:16.854775798"),
|
||||
Timestamp("2262-04-11 23:47:16.854775798"),
|
||||
Timestamp("2262-04-11 23:47:16.854775800"),
|
||||
Timestamp("2262-04-11 23:47:16.854775801"),
|
||||
],
|
||||
freq="ns",
|
||||
)
|
||||
expected = Series(arr)
|
||||
|
||||
filled = ser.ffill()
|
||||
|
||||
tm.assert_series_equal(filled, expected)
|
||||
|
||||
@pytest.mark.parametrize("func", ["pad", "backfill"])
|
||||
def test_pad_backfill_deprecated(self, func):
|
||||
# GH#33396
|
||||
ser = Series([1, 2, 3])
|
||||
with tm.assert_produces_warning(FutureWarning):
|
||||
getattr(ser, func)()
|
@@ -0,0 +1,35 @@
|
||||
from pandas import (
|
||||
Index,
|
||||
Series,
|
||||
date_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestGetNumericData:
|
||||
def test_get_numeric_data_preserve_dtype(self, using_copy_on_write):
|
||||
# get the numeric data
|
||||
obj = Series([1, 2, 3])
|
||||
result = obj._get_numeric_data()
|
||||
tm.assert_series_equal(result, obj)
|
||||
|
||||
# returned object is a shallow copy
|
||||
result.iloc[0] = 0
|
||||
if using_copy_on_write:
|
||||
assert obj.iloc[0] == 1
|
||||
else:
|
||||
assert obj.iloc[0] == 0
|
||||
|
||||
obj = Series([1, "2", 3.0])
|
||||
result = obj._get_numeric_data()
|
||||
expected = Series([], dtype=object, index=Index([], dtype=object))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
obj = Series([True, False, True])
|
||||
result = obj._get_numeric_data()
|
||||
tm.assert_series_equal(result, obj)
|
||||
|
||||
obj = Series(date_range("20130101", periods=3))
|
||||
result = obj._get_numeric_data()
|
||||
expected = Series([], dtype="M8[ns]", index=Index([], dtype=object))
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,8 @@
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
def test_head_tail(string_series):
|
||||
tm.assert_series_equal(string_series.head(), string_series[:5])
|
||||
tm.assert_series_equal(string_series.head(0), string_series[0:0])
|
||||
tm.assert_series_equal(string_series.tail(), string_series[-5:])
|
||||
tm.assert_series_equal(string_series.tail(0), string_series[0:0])
|
@@ -0,0 +1,56 @@
|
||||
import numpy as np
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
interval_range,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
class TestInferObjects:
|
||||
def test_copy(self, index_or_series):
|
||||
# GH#50096
|
||||
# case where we don't need to do inference because it is already non-object
|
||||
obj = index_or_series(np.array([1, 2, 3], dtype="int64"))
|
||||
|
||||
result = obj.infer_objects(copy=False)
|
||||
assert tm.shares_memory(result, obj)
|
||||
|
||||
# case where we try to do inference but can't do better than object
|
||||
obj2 = index_or_series(np.array(["foo", 2], dtype=object))
|
||||
result2 = obj2.infer_objects(copy=False)
|
||||
assert tm.shares_memory(result2, obj2)
|
||||
|
||||
def test_infer_objects_series(self, index_or_series):
|
||||
# GH#11221
|
||||
actual = index_or_series(np.array([1, 2, 3], dtype="O")).infer_objects()
|
||||
expected = index_or_series([1, 2, 3])
|
||||
tm.assert_equal(actual, expected)
|
||||
|
||||
actual = index_or_series(np.array([1, 2, 3, None], dtype="O")).infer_objects()
|
||||
expected = index_or_series([1.0, 2.0, 3.0, np.nan])
|
||||
tm.assert_equal(actual, expected)
|
||||
|
||||
# only soft conversions, unconvertable pass thru unchanged
|
||||
|
||||
obj = index_or_series(np.array([1, 2, 3, None, "a"], dtype="O"))
|
||||
actual = obj.infer_objects()
|
||||
expected = index_or_series([1, 2, 3, None, "a"], dtype=object)
|
||||
|
||||
assert actual.dtype == "object"
|
||||
tm.assert_equal(actual, expected)
|
||||
|
||||
def test_infer_objects_interval(self, index_or_series):
|
||||
# GH#50090
|
||||
ii = interval_range(1, 10)
|
||||
obj = index_or_series(ii)
|
||||
|
||||
result = obj.astype(object).infer_objects()
|
||||
tm.assert_equal(result, obj)
|
||||
|
||||
def test_infer_objects_bytes(self):
|
||||
# GH#49650
|
||||
ser = Series([b"a"], dtype="bytes")
|
||||
expected = ser.copy()
|
||||
result = ser.infer_objects()
|
||||
tm.assert_series_equal(result, expected)
|
@@ -0,0 +1,860 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import pandas.util._test_decorators as td
|
||||
|
||||
import pandas as pd
|
||||
from pandas import (
|
||||
Index,
|
||||
MultiIndex,
|
||||
Series,
|
||||
date_range,
|
||||
isna,
|
||||
)
|
||||
import pandas._testing as tm
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
"linear",
|
||||
"index",
|
||||
"values",
|
||||
"nearest",
|
||||
"slinear",
|
||||
"zero",
|
||||
"quadratic",
|
||||
"cubic",
|
||||
"barycentric",
|
||||
"krogh",
|
||||
"polynomial",
|
||||
"spline",
|
||||
"piecewise_polynomial",
|
||||
"from_derivatives",
|
||||
"pchip",
|
||||
"akima",
|
||||
"cubicspline",
|
||||
]
|
||||
)
|
||||
def nontemporal_method(request):
|
||||
"""Fixture that returns an (method name, required kwargs) pair.
|
||||
|
||||
This fixture does not include method 'time' as a parameterization; that
|
||||
method requires a Series with a DatetimeIndex, and is generally tested
|
||||
separately from these non-temporal methods.
|
||||
"""
|
||||
method = request.param
|
||||
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
|
||||
return method, kwargs
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
"linear",
|
||||
"slinear",
|
||||
"zero",
|
||||
"quadratic",
|
||||
"cubic",
|
||||
"barycentric",
|
||||
"krogh",
|
||||
"polynomial",
|
||||
"spline",
|
||||
"piecewise_polynomial",
|
||||
"from_derivatives",
|
||||
"pchip",
|
||||
"akima",
|
||||
"cubicspline",
|
||||
]
|
||||
)
|
||||
def interp_methods_ind(request):
|
||||
"""Fixture that returns a (method name, required kwargs) pair to
|
||||
be tested for various Index types.
|
||||
|
||||
This fixture does not include methods - 'time', 'index', 'nearest',
|
||||
'values' as a parameterization
|
||||
"""
|
||||
method = request.param
|
||||
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
|
||||
return method, kwargs
|
||||
|
||||
|
||||
class TestSeriesInterpolateData:
|
||||
@pytest.mark.xfail(reason="EA.fillna does not handle 'linear' method")
|
||||
def test_interpolate_period_values(self):
|
||||
orig = Series(date_range("2012-01-01", periods=5))
|
||||
ser = orig.copy()
|
||||
ser[2] = pd.NaT
|
||||
|
||||
# period cast
|
||||
ser_per = ser.dt.to_period("D")
|
||||
res_per = ser_per.interpolate()
|
||||
expected_per = orig.dt.to_period("D")
|
||||
tm.assert_series_equal(res_per, expected_per)
|
||||
|
||||
def test_interpolate(self, datetime_series):
|
||||
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
|
||||
|
||||
ts_copy = ts.copy()
|
||||
ts_copy[5:10] = np.nan
|
||||
|
||||
linear_interp = ts_copy.interpolate(method="linear")
|
||||
tm.assert_series_equal(linear_interp, ts)
|
||||
|
||||
ord_ts = Series(
|
||||
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
|
||||
).astype(float)
|
||||
|
||||
ord_ts_copy = ord_ts.copy()
|
||||
ord_ts_copy[5:10] = np.nan
|
||||
|
||||
time_interp = ord_ts_copy.interpolate(method="time")
|
||||
tm.assert_series_equal(time_interp, ord_ts)
|
||||
|
||||
def test_interpolate_time_raises_for_non_timeseries(self):
|
||||
# When method='time' is used on a non-TimeSeries that contains a null
|
||||
# value, a ValueError should be raised.
|
||||
non_ts = Series([0, 1, 2, np.nan])
|
||||
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
non_ts.interpolate(method="time")
|
||||
|
||||
def test_interpolate_cubicspline(self):
|
||||
pytest.importorskip("scipy")
|
||||
ser = Series([10, 11, 12, 13])
|
||||
|
||||
expected = Series(
|
||||
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
|
||||
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
|
||||
)
|
||||
# interpolate at new_index
|
||||
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
|
||||
float
|
||||
)
|
||||
result = ser.reindex(new_index).interpolate(method="cubicspline").loc[1:3]
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interpolate_pchip(self):
|
||||
pytest.importorskip("scipy")
|
||||
ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
|
||||
|
||||
# interpolate at new_index
|
||||
new_index = ser.index.union(
|
||||
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
|
||||
).astype(float)
|
||||
interp_s = ser.reindex(new_index).interpolate(method="pchip")
|
||||
# does not blow up, GH5977
|
||||
interp_s.loc[49:51]
|
||||
|
||||
def test_interpolate_akima(self):
|
||||
pytest.importorskip("scipy")
|
||||
ser = Series([10, 11, 12, 13])
|
||||
|
||||
# interpolate at new_index where `der` is zero
|
||||
expected = Series(
|
||||
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
|
||||
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
|
||||
)
|
||||
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
|
||||
float
|
||||
)
|
||||
interp_s = ser.reindex(new_index).interpolate(method="akima")
|
||||
tm.assert_series_equal(interp_s.loc[1:3], expected)
|
||||
|
||||
# interpolate at new_index where `der` is a non-zero int
|
||||
expected = Series(
|
||||
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
|
||||
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
|
||||
)
|
||||
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
|
||||
float
|
||||
)
|
||||
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
|
||||
tm.assert_series_equal(interp_s.loc[1:3], expected)
|
||||
|
||||
def test_interpolate_piecewise_polynomial(self):
|
||||
pytest.importorskip("scipy")
|
||||
ser = Series([10, 11, 12, 13])
|
||||
|
||||
expected = Series(
|
||||
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
|
||||
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
|
||||
)
|
||||
# interpolate at new_index
|
||||
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
|
||||
float
|
||||
)
|
||||
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
|
||||
tm.assert_series_equal(interp_s.loc[1:3], expected)
|
||||
|
||||
def test_interpolate_from_derivatives(self):
|
||||
pytest.importorskip("scipy")
|
||||
ser = Series([10, 11, 12, 13])
|
||||
|
||||
expected = Series(
|
||||
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
|
||||
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
|
||||
)
|
||||
# interpolate at new_index
|
||||
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
|
||||
float
|
||||
)
|
||||
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
|
||||
tm.assert_series_equal(interp_s.loc[1:3], expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"kwargs",
|
||||
[
|
||||
{},
|
||||
pytest.param(
|
||||
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_interpolate_corners(self, kwargs):
|
||||
s = Series([np.nan, np.nan])
|
||||
tm.assert_series_equal(s.interpolate(**kwargs), s)
|
||||
|
||||
s = Series([], dtype=object).interpolate()
|
||||
tm.assert_series_equal(s.interpolate(**kwargs), s)
|
||||
|
||||
def test_interpolate_index_values(self):
|
||||
s = Series(np.nan, index=np.sort(np.random.default_rng(2).random(30)))
|
||||
s.loc[::3] = np.random.default_rng(2).standard_normal(10)
|
||||
|
||||
vals = s.index.values.astype(float)
|
||||
|
||||
result = s.interpolate(method="index")
|
||||
|
||||
expected = s.copy()
|
||||
bad = isna(expected.values)
|
||||
good = ~bad
|
||||
expected = Series(
|
||||
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
|
||||
)
|
||||
|
||||
tm.assert_series_equal(result[bad], expected)
|
||||
|
||||
# 'values' is synonymous with 'index' for the method kwarg
|
||||
other_result = s.interpolate(method="values")
|
||||
|
||||
tm.assert_series_equal(other_result, result)
|
||||
tm.assert_series_equal(other_result[bad], expected)
|
||||
|
||||
def test_interpolate_non_ts(self):
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
|
||||
msg = (
|
||||
"time-weighted interpolation only works on Series or DataFrames "
|
||||
"with a DatetimeIndex"
|
||||
)
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="time")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"kwargs",
|
||||
[
|
||||
{},
|
||||
pytest.param(
|
||||
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_nan_interpolate(self, kwargs):
|
||||
s = Series([0, 1, np.nan, 3])
|
||||
result = s.interpolate(**kwargs)
|
||||
expected = Series([0.0, 1.0, 2.0, 3.0])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_nan_irregular_index(self):
|
||||
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
|
||||
result = s.interpolate()
|
||||
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_nan_str_index(self):
|
||||
s = Series([0, 1, 2, np.nan], index=list("abcd"))
|
||||
result = s.interpolate()
|
||||
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_quad(self):
|
||||
pytest.importorskip("scipy")
|
||||
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
|
||||
result = sq.interpolate(method="quadratic")
|
||||
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_scipy_basic(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, 3, np.nan, 12, np.nan, 25])
|
||||
# slinear
|
||||
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
|
||||
result = s.interpolate(method="slinear")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
msg = "The 'downcast' keyword in Series.interpolate is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(method="slinear", downcast="infer")
|
||||
tm.assert_series_equal(result, expected)
|
||||
# nearest
|
||||
expected = Series([1, 3, 3, 12, 12, 25])
|
||||
result = s.interpolate(method="nearest")
|
||||
tm.assert_series_equal(result, expected.astype("float"))
|
||||
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(method="nearest", downcast="infer")
|
||||
tm.assert_series_equal(result, expected)
|
||||
# zero
|
||||
expected = Series([1, 3, 3, 12, 12, 25])
|
||||
result = s.interpolate(method="zero")
|
||||
tm.assert_series_equal(result, expected.astype("float"))
|
||||
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(method="zero", downcast="infer")
|
||||
tm.assert_series_equal(result, expected)
|
||||
# quadratic
|
||||
# GH #15662.
|
||||
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
|
||||
result = s.interpolate(method="quadratic")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(method="quadratic", downcast="infer")
|
||||
tm.assert_series_equal(result, expected)
|
||||
# cubic
|
||||
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
|
||||
result = s.interpolate(method="cubic")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit(self):
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
|
||||
|
||||
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
|
||||
result = s.interpolate(method="linear", limit=2)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("limit", [-1, 0])
|
||||
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
|
||||
# GH 9217: make sure limit is greater than zero.
|
||||
s = Series([1, 2, np.nan, 4])
|
||||
method, kwargs = nontemporal_method
|
||||
with pytest.raises(ValueError, match="Limit must be greater than 0"):
|
||||
s.interpolate(limit=limit, method=method, **kwargs)
|
||||
|
||||
def test_interpolate_invalid_float_limit(self, nontemporal_method):
|
||||
# GH 9217: make sure limit is an integer.
|
||||
s = Series([1, 2, np.nan, 4])
|
||||
method, kwargs = nontemporal_method
|
||||
limit = 2.0
|
||||
with pytest.raises(ValueError, match="Limit must be an integer"):
|
||||
s.interpolate(limit=limit, method=method, **kwargs)
|
||||
|
||||
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
|
||||
def test_interp_invalid_method(self, invalid_method):
|
||||
s = Series([1, 3, np.nan, 12, np.nan, 25])
|
||||
|
||||
msg = f"method must be one of.* Got '{invalid_method}' instead"
|
||||
if invalid_method is None:
|
||||
msg = "'method' should be a string, not None"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method=invalid_method)
|
||||
|
||||
# When an invalid method and invalid limit (such as -1) are
|
||||
# provided, the error message reflects the invalid method.
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method=invalid_method, limit=-1)
|
||||
|
||||
def test_interp_invalid_method_and_value(self):
|
||||
# GH#36624
|
||||
ser = Series([1, 3, np.nan, 12, np.nan, 25])
|
||||
|
||||
msg = "'fill_value' is not a valid keyword for Series.interpolate"
|
||||
msg2 = "Series.interpolate with method=pad"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg2):
|
||||
ser.interpolate(fill_value=3, method="pad")
|
||||
|
||||
def test_interp_limit_forward(self):
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
|
||||
|
||||
# Provide 'forward' (the default) explicitly here.
|
||||
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
|
||||
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_unlimited(self):
|
||||
# these test are for issue #16282 default Limit=None is unlimited
|
||||
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
|
||||
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
|
||||
result = s.interpolate(method="linear", limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
|
||||
result = s.interpolate(method="linear", limit_direction="forward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
|
||||
result = s.interpolate(method="linear", limit_direction="backward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit_bad_direction(self):
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
|
||||
|
||||
msg = (
|
||||
r"Invalid limit_direction: expecting one of \['forward', "
|
||||
r"'backward', 'both'\], got 'abc'"
|
||||
)
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="linear", limit=2, limit_direction="abc")
|
||||
|
||||
# raises an error even if no limit is specified.
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="linear", limit_direction="abc")
|
||||
|
||||
# limit_area introduced GH #16284
|
||||
def test_interp_limit_area(self):
|
||||
# These tests are for issue #9218 -- fill NaNs in both directions.
|
||||
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
|
||||
|
||||
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
|
||||
result = s.interpolate(method="linear", limit_area="inside")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series(
|
||||
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
|
||||
)
|
||||
result = s.interpolate(method="linear", limit_area="inside", limit=1)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
|
||||
result = s.interpolate(
|
||||
method="linear", limit_area="inside", limit_direction="both", limit=1
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
|
||||
result = s.interpolate(method="linear", limit_area="outside")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series(
|
||||
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
|
||||
)
|
||||
result = s.interpolate(method="linear", limit_area="outside", limit=1)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
|
||||
result = s.interpolate(
|
||||
method="linear", limit_area="outside", limit_direction="both", limit=1
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
|
||||
result = s.interpolate(
|
||||
method="linear", limit_area="outside", limit_direction="backward"
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# raises an error even if limit type is wrong.
|
||||
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="linear", limit_area="abc")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"method, limit_direction, expected",
|
||||
[
|
||||
("pad", "backward", "forward"),
|
||||
("ffill", "backward", "forward"),
|
||||
("backfill", "forward", "backward"),
|
||||
("bfill", "forward", "backward"),
|
||||
("pad", "both", "forward"),
|
||||
("ffill", "both", "forward"),
|
||||
("backfill", "both", "backward"),
|
||||
("bfill", "both", "backward"),
|
||||
],
|
||||
)
|
||||
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
|
||||
# https://github.com/pandas-dev/pandas/pull/34746
|
||||
s = Series([1, 2, 3])
|
||||
|
||||
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
|
||||
msg2 = "Series.interpolate with method="
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg2):
|
||||
s.interpolate(method=method, limit_direction=limit_direction)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, expected_data, kwargs",
|
||||
(
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan],
|
||||
{"method": "pad", "limit_area": "inside"},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan],
|
||||
{"method": "pad", "limit_area": "inside", "limit": 1},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0],
|
||||
{"method": "pad", "limit_area": "outside"},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan],
|
||||
{"method": "pad", "limit_area": "outside", "limit": 1},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
||||
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
|
||||
{"method": "pad", "limit_area": "outside", "limit": 1},
|
||||
),
|
||||
(
|
||||
range(5),
|
||||
range(5),
|
||||
{"method": "pad", "limit_area": "outside", "limit": 1},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_interp_limit_area_with_pad(self, data, expected_data, kwargs):
|
||||
# GH26796
|
||||
|
||||
s = Series(data)
|
||||
expected = Series(expected_data)
|
||||
msg = "Series.interpolate with method=pad"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(**kwargs)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data, expected_data, kwargs",
|
||||
(
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan],
|
||||
{"method": "bfill", "limit_area": "inside"},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan],
|
||||
{"method": "bfill", "limit_area": "inside", "limit": 1},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
|
||||
{"method": "bfill", "limit_area": "outside"},
|
||||
),
|
||||
(
|
||||
[np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
|
||||
[np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
|
||||
{"method": "bfill", "limit_area": "outside", "limit": 1},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs):
|
||||
# GH26796
|
||||
|
||||
s = Series(data)
|
||||
expected = Series(expected_data)
|
||||
msg = "Series.interpolate with method=bfill"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = s.interpolate(**kwargs)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit_direction(self):
|
||||
# These tests are for issue #9218 -- fill NaNs in both directions.
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
|
||||
|
||||
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
|
||||
result = s.interpolate(method="linear", limit=1, limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# Check that this works on a longer series of nans.
|
||||
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
|
||||
|
||||
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series(
|
||||
[1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]
|
||||
)
|
||||
result = s.interpolate(method="linear", limit=1, limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit_to_ends(self):
|
||||
# These test are for issue #10420 -- flow back to beginning.
|
||||
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
|
||||
|
||||
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0])
|
||||
result = s.interpolate(method="linear", limit=2, limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit_before_ends(self):
|
||||
# These test are for issue #11115 -- limit ends properly.
|
||||
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
|
||||
|
||||
expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan])
|
||||
result = s.interpolate(method="linear", limit=1, limit_direction="forward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, 5.0, 5.0, 7.0, np.nan, np.nan])
|
||||
result = s.interpolate(method="linear", limit=1, limit_direction="backward")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
expected = Series([np.nan, 5.0, 5.0, 7.0, 7.0, np.nan])
|
||||
result = s.interpolate(method="linear", limit=1, limit_direction="both")
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_all_good(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, 2, 3])
|
||||
result = s.interpolate(method="polynomial", order=1)
|
||||
tm.assert_series_equal(result, s)
|
||||
|
||||
# non-scipy
|
||||
result = s.interpolate()
|
||||
tm.assert_series_equal(result, s)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
|
||||
)
|
||||
def test_interp_multiIndex(self, check_scipy):
|
||||
idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])
|
||||
s = Series([1, 2, np.nan], index=idx)
|
||||
|
||||
expected = s.copy()
|
||||
expected.loc[2] = 2
|
||||
result = s.interpolate()
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
msg = "Only `method=linear` interpolation is supported on MultiIndexes"
|
||||
if check_scipy:
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="polynomial", order=1)
|
||||
|
||||
def test_interp_nonmono_raise(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, np.nan, 3], index=[0, 2, 1])
|
||||
msg = "krogh interpolation requires that the index be monotonic"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="krogh")
|
||||
|
||||
@pytest.mark.parametrize("method", ["nearest", "pad"])
|
||||
def test_interp_datetime64(self, method, tz_naive_fixture):
|
||||
pytest.importorskip("scipy")
|
||||
df = Series(
|
||||
[1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
|
||||
)
|
||||
warn = None if method == "nearest" else FutureWarning
|
||||
msg = "Series.interpolate with method=pad is deprecated"
|
||||
with tm.assert_produces_warning(warn, match=msg):
|
||||
result = df.interpolate(method=method)
|
||||
if warn is not None:
|
||||
# check the "use ffill instead" is equivalent
|
||||
alt = df.ffill()
|
||||
tm.assert_series_equal(result, alt)
|
||||
|
||||
expected = Series(
|
||||
[1.0, 1.0, 3.0],
|
||||
index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),
|
||||
)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_pad_datetime64tz_values(self):
|
||||
# GH#27628 missing.interpolate_2d should handle datetimetz values
|
||||
dti = date_range("2015-04-05", periods=3, tz="US/Central")
|
||||
ser = Series(dti)
|
||||
ser[1] = pd.NaT
|
||||
|
||||
msg = "Series.interpolate with method=pad is deprecated"
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg):
|
||||
result = ser.interpolate(method="pad")
|
||||
# check the "use ffill instead" is equivalent
|
||||
alt = ser.ffill()
|
||||
tm.assert_series_equal(result, alt)
|
||||
|
||||
expected = Series(dti)
|
||||
expected[1] = expected[0]
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interp_limit_no_nans(self):
|
||||
# GH 7173
|
||||
s = Series([1.0, 2.0, 3.0])
|
||||
result = s.interpolate(limit=1)
|
||||
expected = s
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize("method", ["polynomial", "spline"])
|
||||
def test_no_order(self, method):
|
||||
# see GH-10633, GH-24014
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([0, 1, np.nan, 3])
|
||||
msg = "You must specify the order of the spline or polynomial"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method=method)
|
||||
|
||||
@pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
|
||||
def test_interpolate_spline_invalid_order(self, order):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([0, 1, np.nan, 3])
|
||||
msg = "order needs to be specified and greater than 0"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
s.interpolate(method="spline", order=order)
|
||||
|
||||
def test_spline(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
|
||||
result = s.interpolate(method="spline", order=1)
|
||||
expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_spline_extrapolate(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
|
||||
result3 = s.interpolate(method="spline", order=1, ext=3)
|
||||
expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
|
||||
tm.assert_series_equal(result3, expected3)
|
||||
|
||||
result1 = s.interpolate(method="spline", order=1, ext=0)
|
||||
expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
|
||||
tm.assert_series_equal(result1, expected1)
|
||||
|
||||
def test_spline_smooth(self):
|
||||
pytest.importorskip("scipy")
|
||||
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
|
||||
assert (
|
||||
s.interpolate(method="spline", order=3, s=0)[5]
|
||||
!= s.interpolate(method="spline", order=3)[5]
|
||||
)
|
||||
|
||||
def test_spline_interpolation(self):
|
||||
# Explicit cast to float to avoid implicit cast when setting np.nan
|
||||
pytest.importorskip("scipy")
|
||||
s = Series(np.arange(10) ** 2, dtype="float")
|
||||
s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
|
||||
result1 = s.interpolate(method="spline", order=1)
|
||||
expected1 = s.interpolate(method="spline", order=1)
|
||||
tm.assert_series_equal(result1, expected1)
|
||||
|
||||
def test_interp_timedelta64(self):
|
||||
# GH 6424
|
||||
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 3]))
|
||||
result = df.interpolate(method="time")
|
||||
expected = Series([1.0, 2.0, 3.0], index=pd.to_timedelta([1, 2, 3]))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
# test for non uniform spacing
|
||||
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 4]))
|
||||
result = df.interpolate(method="time")
|
||||
expected = Series([1.0, 1.666667, 3.0], index=pd.to_timedelta([1, 2, 4]))
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_series_interpolate_method_values(self):
|
||||
# GH#1646
|
||||
rng = date_range("1/1/2000", "1/20/2000", freq="D")
|
||||
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
|
||||
|
||||
ts[::2] = np.nan
|
||||
|
||||
result = ts.interpolate(method="values")
|
||||
exp = ts.interpolate()
|
||||
tm.assert_series_equal(result, exp)
|
||||
|
||||
def test_series_interpolate_intraday(self):
|
||||
# #1698
|
||||
index = date_range("1/1/2012", periods=4, freq="12D")
|
||||
ts = Series([0, 12, 24, 36], index)
|
||||
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
|
||||
|
||||
exp = ts.reindex(new_index).interpolate(method="time")
|
||||
|
||||
index = date_range("1/1/2012", periods=4, freq="12H")
|
||||
ts = Series([0, 12, 24, 36], index)
|
||||
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
|
||||
result = ts.reindex(new_index).interpolate(method="time")
|
||||
|
||||
tm.assert_numpy_array_equal(result.values, exp.values)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ind",
|
||||
[
|
||||
["a", "b", "c", "d"],
|
||||
pd.period_range(start="2019-01-01", periods=4),
|
||||
pd.interval_range(start=0, end=4),
|
||||
],
|
||||
)
|
||||
def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
|
||||
# gh 21662
|
||||
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
|
||||
|
||||
method, kwargs = interp_methods_ind
|
||||
if method == "pchip":
|
||||
pytest.importorskip("scipy")
|
||||
|
||||
if method == "linear":
|
||||
result = df[0].interpolate(**kwargs)
|
||||
expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
|
||||
tm.assert_series_equal(result, expected)
|
||||
else:
|
||||
expected_error = (
|
||||
"Index column must be numeric or datetime type when "
|
||||
f"using {method} method other than linear. "
|
||||
"Try setting a numeric or datetime index column before "
|
||||
"interpolating."
|
||||
)
|
||||
with pytest.raises(ValueError, match=expected_error):
|
||||
df[0].interpolate(method=method, **kwargs)
|
||||
|
||||
def test_interpolate_timedelta_index(self, request, interp_methods_ind):
|
||||
"""
|
||||
Tests for non numerical index types - object, period, timedelta
|
||||
Note that all methods except time, index, nearest and values
|
||||
are tested here.
|
||||
"""
|
||||
# gh 21662
|
||||
pytest.importorskip("scipy")
|
||||
ind = pd.timedelta_range(start=1, periods=4)
|
||||
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
|
||||
|
||||
method, kwargs = interp_methods_ind
|
||||
|
||||
if method in {"cubic", "zero"}:
|
||||
request.node.add_marker(
|
||||
pytest.mark.xfail(
|
||||
reason=f"{method} interpolation is not supported for TimedeltaIndex"
|
||||
)
|
||||
)
|
||||
result = df[0].interpolate(method=method, **kwargs)
|
||||
expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ascending, expected_values",
|
||||
[(True, [1, 2, 3, 9, 10]), (False, [10, 9, 3, 2, 1])],
|
||||
)
|
||||
def test_interpolate_unsorted_index(self, ascending, expected_values):
|
||||
# GH 21037
|
||||
ts = Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1])
|
||||
result = ts.sort_index(ascending=ascending).interpolate(method="index")
|
||||
expected = Series(data=expected_values, index=expected_values, dtype=float)
|
||||
tm.assert_series_equal(result, expected)
|
||||
|
||||
def test_interpolate_asfreq_raises(self):
|
||||
ser = Series(["a", None, "b"], dtype=object)
|
||||
msg2 = "Series.interpolate with object dtype"
|
||||
msg = "Invalid fill method"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
with tm.assert_produces_warning(FutureWarning, match=msg2):
|
||||
ser.interpolate(method="asfreq")
|
@@ -0,0 +1,26 @@
|
||||
import numpy as np
|
||||
|
||||
from pandas import (
|
||||
Series,
|
||||
date_range,
|
||||
)
|
||||
|
||||
|
||||
class TestIsMonotonic:
|
||||
def test_is_monotonic_numeric(self):
|
||||
ser = Series(np.random.default_rng(2).integers(0, 10, size=1000))
|
||||
assert not ser.is_monotonic_increasing
|
||||
ser = Series(np.arange(1000))
|
||||
assert ser.is_monotonic_increasing is True
|
||||
assert ser.is_monotonic_increasing is True
|
||||
ser = Series(np.arange(1000, 0, -1))
|
||||
assert ser.is_monotonic_decreasing is True
|
||||
|
||||
def test_is_monotonic_dt64(self):
|
||||
ser = Series(date_range("20130101", periods=10))
|
||||
assert ser.is_monotonic_increasing is True
|
||||
assert ser.is_monotonic_increasing is True
|
||||
|
||||
ser = Series(list(reversed(ser)))
|
||||
assert ser.is_monotonic_increasing is False
|
||||
assert ser.is_monotonic_decreasing is True
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user