virtuelle Umgebungen teil20 und teil20a
This commit is contained in:
		
							
								
								
									
										353
									
								
								teil20/lib/python3.11/site-packages/pandas/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										353
									
								
								teil20/lib/python3.11/site-packages/pandas/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,353 @@
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
__docformat__ = "restructuredtext"
 | 
			
		||||
 | 
			
		||||
# Let users know if they're missing any of our hard dependencies
 | 
			
		||||
_hard_dependencies = ("numpy", "pytz", "dateutil")
 | 
			
		||||
_missing_dependencies = []
 | 
			
		||||
 | 
			
		||||
for _dependency in _hard_dependencies:
 | 
			
		||||
    try:
 | 
			
		||||
        __import__(_dependency)
 | 
			
		||||
    except ImportError as _e:  # pragma: no cover
 | 
			
		||||
        _missing_dependencies.append(f"{_dependency}: {_e}")
 | 
			
		||||
 | 
			
		||||
if _missing_dependencies:  # pragma: no cover
 | 
			
		||||
    raise ImportError(
 | 
			
		||||
        "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies)
 | 
			
		||||
    )
 | 
			
		||||
del _hard_dependencies, _dependency, _missing_dependencies
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    # numpy compat
 | 
			
		||||
    from pandas.compat import (
 | 
			
		||||
        is_numpy_dev as _is_numpy_dev,  # pyright: ignore[reportUnusedImport] # noqa: F401,E501
 | 
			
		||||
    )
 | 
			
		||||
except ImportError as _err:  # pragma: no cover
 | 
			
		||||
    _module = _err.name
 | 
			
		||||
    raise ImportError(
 | 
			
		||||
        f"C extension: {_module} not built. If you want to import "
 | 
			
		||||
        "pandas from the source directory, you may need to run "
 | 
			
		||||
        "'python setup.py build_ext' to build the C extensions first."
 | 
			
		||||
    ) from _err
 | 
			
		||||
 | 
			
		||||
from pandas._config import (
 | 
			
		||||
    get_option,
 | 
			
		||||
    set_option,
 | 
			
		||||
    reset_option,
 | 
			
		||||
    describe_option,
 | 
			
		||||
    option_context,
 | 
			
		||||
    options,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# let init-time option registration happen
 | 
			
		||||
import pandas.core.config_init  # pyright: ignore[reportUnusedImport] # noqa: F401
 | 
			
		||||
 | 
			
		||||
from pandas.core.api import (
 | 
			
		||||
    # dtype
 | 
			
		||||
    ArrowDtype,
 | 
			
		||||
    Int8Dtype,
 | 
			
		||||
    Int16Dtype,
 | 
			
		||||
    Int32Dtype,
 | 
			
		||||
    Int64Dtype,
 | 
			
		||||
    UInt8Dtype,
 | 
			
		||||
    UInt16Dtype,
 | 
			
		||||
    UInt32Dtype,
 | 
			
		||||
    UInt64Dtype,
 | 
			
		||||
    Float32Dtype,
 | 
			
		||||
    Float64Dtype,
 | 
			
		||||
    CategoricalDtype,
 | 
			
		||||
    PeriodDtype,
 | 
			
		||||
    IntervalDtype,
 | 
			
		||||
    DatetimeTZDtype,
 | 
			
		||||
    StringDtype,
 | 
			
		||||
    BooleanDtype,
 | 
			
		||||
    # missing
 | 
			
		||||
    NA,
 | 
			
		||||
    isna,
 | 
			
		||||
    isnull,
 | 
			
		||||
    notna,
 | 
			
		||||
    notnull,
 | 
			
		||||
    # indexes
 | 
			
		||||
    Index,
 | 
			
		||||
    CategoricalIndex,
 | 
			
		||||
    RangeIndex,
 | 
			
		||||
    MultiIndex,
 | 
			
		||||
    IntervalIndex,
 | 
			
		||||
    TimedeltaIndex,
 | 
			
		||||
    DatetimeIndex,
 | 
			
		||||
    PeriodIndex,
 | 
			
		||||
    IndexSlice,
 | 
			
		||||
    # tseries
 | 
			
		||||
    NaT,
 | 
			
		||||
    Period,
 | 
			
		||||
    period_range,
 | 
			
		||||
    Timedelta,
 | 
			
		||||
    timedelta_range,
 | 
			
		||||
    Timestamp,
 | 
			
		||||
    date_range,
 | 
			
		||||
    bdate_range,
 | 
			
		||||
    Interval,
 | 
			
		||||
    interval_range,
 | 
			
		||||
    DateOffset,
 | 
			
		||||
    # conversion
 | 
			
		||||
    to_numeric,
 | 
			
		||||
    to_datetime,
 | 
			
		||||
    to_timedelta,
 | 
			
		||||
    # misc
 | 
			
		||||
    Flags,
 | 
			
		||||
    Grouper,
 | 
			
		||||
    factorize,
 | 
			
		||||
    unique,
 | 
			
		||||
    value_counts,
 | 
			
		||||
    NamedAgg,
 | 
			
		||||
    array,
 | 
			
		||||
    Categorical,
 | 
			
		||||
    set_eng_float_format,
 | 
			
		||||
    Series,
 | 
			
		||||
    DataFrame,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas.core.dtypes.dtypes import SparseDtype
 | 
			
		||||
 | 
			
		||||
from pandas.tseries.api import infer_freq
 | 
			
		||||
from pandas.tseries import offsets
 | 
			
		||||
 | 
			
		||||
from pandas.core.computation.api import eval
 | 
			
		||||
 | 
			
		||||
from pandas.core.reshape.api import (
 | 
			
		||||
    concat,
 | 
			
		||||
    lreshape,
 | 
			
		||||
    melt,
 | 
			
		||||
    wide_to_long,
 | 
			
		||||
    merge,
 | 
			
		||||
    merge_asof,
 | 
			
		||||
    merge_ordered,
 | 
			
		||||
    crosstab,
 | 
			
		||||
    pivot,
 | 
			
		||||
    pivot_table,
 | 
			
		||||
    get_dummies,
 | 
			
		||||
    from_dummies,
 | 
			
		||||
    cut,
 | 
			
		||||
    qcut,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas import api, arrays, errors, io, plotting, tseries
 | 
			
		||||
from pandas import testing
 | 
			
		||||
from pandas.util._print_versions import show_versions
 | 
			
		||||
 | 
			
		||||
from pandas.io.api import (
 | 
			
		||||
    # excel
 | 
			
		||||
    ExcelFile,
 | 
			
		||||
    ExcelWriter,
 | 
			
		||||
    read_excel,
 | 
			
		||||
    # parsers
 | 
			
		||||
    read_csv,
 | 
			
		||||
    read_fwf,
 | 
			
		||||
    read_table,
 | 
			
		||||
    # pickle
 | 
			
		||||
    read_pickle,
 | 
			
		||||
    to_pickle,
 | 
			
		||||
    # pytables
 | 
			
		||||
    HDFStore,
 | 
			
		||||
    read_hdf,
 | 
			
		||||
    # sql
 | 
			
		||||
    read_sql,
 | 
			
		||||
    read_sql_query,
 | 
			
		||||
    read_sql_table,
 | 
			
		||||
    # misc
 | 
			
		||||
    read_clipboard,
 | 
			
		||||
    read_parquet,
 | 
			
		||||
    read_orc,
 | 
			
		||||
    read_feather,
 | 
			
		||||
    read_gbq,
 | 
			
		||||
    read_html,
 | 
			
		||||
    read_xml,
 | 
			
		||||
    read_json,
 | 
			
		||||
    read_stata,
 | 
			
		||||
    read_sas,
 | 
			
		||||
    read_spss,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas.io.json._normalize import json_normalize
 | 
			
		||||
 | 
			
		||||
from pandas.util._tester import test
 | 
			
		||||
 | 
			
		||||
# use the closest tagged version if possible
 | 
			
		||||
_built_with_meson = False
 | 
			
		||||
try:
 | 
			
		||||
    from pandas._version_meson import (  # pyright: ignore [reportMissingImports]
 | 
			
		||||
        __version__,
 | 
			
		||||
        __git_version__,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    _built_with_meson = True
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from pandas._version import get_versions
 | 
			
		||||
 | 
			
		||||
    v = get_versions()
 | 
			
		||||
    __version__ = v.get("closest-tag", v["version"])
 | 
			
		||||
    __git_version__ = v.get("full-revisionid")
 | 
			
		||||
    del get_versions, v
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# module level doc-string
 | 
			
		||||
__doc__ = """
 | 
			
		||||
pandas - a powerful data analysis and manipulation library for Python
 | 
			
		||||
=====================================================================
 | 
			
		||||
 | 
			
		||||
**pandas** is a Python package providing fast, flexible, and expressive data
 | 
			
		||||
structures designed to make working with "relational" or "labeled" data both
 | 
			
		||||
easy and intuitive. It aims to be the fundamental high-level building block for
 | 
			
		||||
doing practical, **real world** data analysis in Python. Additionally, it has
 | 
			
		||||
the broader goal of becoming **the most powerful and flexible open source data
 | 
			
		||||
analysis / manipulation tool available in any language**. It is already well on
 | 
			
		||||
its way toward this goal.
 | 
			
		||||
 | 
			
		||||
Main Features
 | 
			
		||||
-------------
 | 
			
		||||
Here are just a few of the things that pandas does well:
 | 
			
		||||
 | 
			
		||||
  - Easy handling of missing data in floating point as well as non-floating
 | 
			
		||||
    point data.
 | 
			
		||||
  - Size mutability: columns can be inserted and deleted from DataFrame and
 | 
			
		||||
    higher dimensional objects
 | 
			
		||||
  - Automatic and explicit data alignment: objects can be explicitly aligned
 | 
			
		||||
    to a set of labels, or the user can simply ignore the labels and let
 | 
			
		||||
    `Series`, `DataFrame`, etc. automatically align the data for you in
 | 
			
		||||
    computations.
 | 
			
		||||
  - Powerful, flexible group by functionality to perform split-apply-combine
 | 
			
		||||
    operations on data sets, for both aggregating and transforming data.
 | 
			
		||||
  - Make it easy to convert ragged, differently-indexed data in other Python
 | 
			
		||||
    and NumPy data structures into DataFrame objects.
 | 
			
		||||
  - Intelligent label-based slicing, fancy indexing, and subsetting of large
 | 
			
		||||
    data sets.
 | 
			
		||||
  - Intuitive merging and joining data sets.
 | 
			
		||||
  - Flexible reshaping and pivoting of data sets.
 | 
			
		||||
  - Hierarchical labeling of axes (possible to have multiple labels per tick).
 | 
			
		||||
  - Robust IO tools for loading data from flat files (CSV and delimited),
 | 
			
		||||
    Excel files, databases, and saving/loading data from the ultrafast HDF5
 | 
			
		||||
    format.
 | 
			
		||||
  - Time series-specific functionality: date range generation and frequency
 | 
			
		||||
    conversion, moving window statistics, date shifting and lagging.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
# Use __all__ to let type checkers know what is part of the public API.
 | 
			
		||||
# Pandas is not (yet) a py.typed library: the public API is determined
 | 
			
		||||
# based on the documentation.
 | 
			
		||||
__all__ = [
 | 
			
		||||
    "ArrowDtype",
 | 
			
		||||
    "BooleanDtype",
 | 
			
		||||
    "Categorical",
 | 
			
		||||
    "CategoricalDtype",
 | 
			
		||||
    "CategoricalIndex",
 | 
			
		||||
    "DataFrame",
 | 
			
		||||
    "DateOffset",
 | 
			
		||||
    "DatetimeIndex",
 | 
			
		||||
    "DatetimeTZDtype",
 | 
			
		||||
    "ExcelFile",
 | 
			
		||||
    "ExcelWriter",
 | 
			
		||||
    "Flags",
 | 
			
		||||
    "Float32Dtype",
 | 
			
		||||
    "Float64Dtype",
 | 
			
		||||
    "Grouper",
 | 
			
		||||
    "HDFStore",
 | 
			
		||||
    "Index",
 | 
			
		||||
    "IndexSlice",
 | 
			
		||||
    "Int16Dtype",
 | 
			
		||||
    "Int32Dtype",
 | 
			
		||||
    "Int64Dtype",
 | 
			
		||||
    "Int8Dtype",
 | 
			
		||||
    "Interval",
 | 
			
		||||
    "IntervalDtype",
 | 
			
		||||
    "IntervalIndex",
 | 
			
		||||
    "MultiIndex",
 | 
			
		||||
    "NA",
 | 
			
		||||
    "NaT",
 | 
			
		||||
    "NamedAgg",
 | 
			
		||||
    "Period",
 | 
			
		||||
    "PeriodDtype",
 | 
			
		||||
    "PeriodIndex",
 | 
			
		||||
    "RangeIndex",
 | 
			
		||||
    "Series",
 | 
			
		||||
    "SparseDtype",
 | 
			
		||||
    "StringDtype",
 | 
			
		||||
    "Timedelta",
 | 
			
		||||
    "TimedeltaIndex",
 | 
			
		||||
    "Timestamp",
 | 
			
		||||
    "UInt16Dtype",
 | 
			
		||||
    "UInt32Dtype",
 | 
			
		||||
    "UInt64Dtype",
 | 
			
		||||
    "UInt8Dtype",
 | 
			
		||||
    "api",
 | 
			
		||||
    "array",
 | 
			
		||||
    "arrays",
 | 
			
		||||
    "bdate_range",
 | 
			
		||||
    "concat",
 | 
			
		||||
    "crosstab",
 | 
			
		||||
    "cut",
 | 
			
		||||
    "date_range",
 | 
			
		||||
    "describe_option",
 | 
			
		||||
    "errors",
 | 
			
		||||
    "eval",
 | 
			
		||||
    "factorize",
 | 
			
		||||
    "get_dummies",
 | 
			
		||||
    "from_dummies",
 | 
			
		||||
    "get_option",
 | 
			
		||||
    "infer_freq",
 | 
			
		||||
    "interval_range",
 | 
			
		||||
    "io",
 | 
			
		||||
    "isna",
 | 
			
		||||
    "isnull",
 | 
			
		||||
    "json_normalize",
 | 
			
		||||
    "lreshape",
 | 
			
		||||
    "melt",
 | 
			
		||||
    "merge",
 | 
			
		||||
    "merge_asof",
 | 
			
		||||
    "merge_ordered",
 | 
			
		||||
    "notna",
 | 
			
		||||
    "notnull",
 | 
			
		||||
    "offsets",
 | 
			
		||||
    "option_context",
 | 
			
		||||
    "options",
 | 
			
		||||
    "period_range",
 | 
			
		||||
    "pivot",
 | 
			
		||||
    "pivot_table",
 | 
			
		||||
    "plotting",
 | 
			
		||||
    "qcut",
 | 
			
		||||
    "read_clipboard",
 | 
			
		||||
    "read_csv",
 | 
			
		||||
    "read_excel",
 | 
			
		||||
    "read_feather",
 | 
			
		||||
    "read_fwf",
 | 
			
		||||
    "read_gbq",
 | 
			
		||||
    "read_hdf",
 | 
			
		||||
    "read_html",
 | 
			
		||||
    "read_json",
 | 
			
		||||
    "read_orc",
 | 
			
		||||
    "read_parquet",
 | 
			
		||||
    "read_pickle",
 | 
			
		||||
    "read_sas",
 | 
			
		||||
    "read_spss",
 | 
			
		||||
    "read_sql",
 | 
			
		||||
    "read_sql_query",
 | 
			
		||||
    "read_sql_table",
 | 
			
		||||
    "read_stata",
 | 
			
		||||
    "read_table",
 | 
			
		||||
    "read_xml",
 | 
			
		||||
    "reset_option",
 | 
			
		||||
    "set_eng_float_format",
 | 
			
		||||
    "set_option",
 | 
			
		||||
    "show_versions",
 | 
			
		||||
    "test",
 | 
			
		||||
    "testing",
 | 
			
		||||
    "timedelta_range",
 | 
			
		||||
    "to_datetime",
 | 
			
		||||
    "to_numeric",
 | 
			
		||||
    "to_pickle",
 | 
			
		||||
    "to_timedelta",
 | 
			
		||||
    "tseries",
 | 
			
		||||
    "unique",
 | 
			
		||||
    "value_counts",
 | 
			
		||||
    "wide_to_long",
 | 
			
		||||
]
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							@@ -0,0 +1,45 @@
 | 
			
		||||
"""
 | 
			
		||||
pandas._config is considered explicitly upstream of everything else in pandas,
 | 
			
		||||
should have no intra-pandas dependencies.
 | 
			
		||||
 | 
			
		||||
importing `dates` and `display` ensures that keys needed by _libs
 | 
			
		||||
are initialized.
 | 
			
		||||
"""
 | 
			
		||||
__all__ = [
 | 
			
		||||
    "config",
 | 
			
		||||
    "detect_console_encoding",
 | 
			
		||||
    "get_option",
 | 
			
		||||
    "set_option",
 | 
			
		||||
    "reset_option",
 | 
			
		||||
    "describe_option",
 | 
			
		||||
    "option_context",
 | 
			
		||||
    "options",
 | 
			
		||||
    "using_copy_on_write",
 | 
			
		||||
]
 | 
			
		||||
from pandas._config import config
 | 
			
		||||
from pandas._config import dates  # pyright: ignore[reportUnusedImport]  # noqa: F401
 | 
			
		||||
from pandas._config.config import (
 | 
			
		||||
    _global_config,
 | 
			
		||||
    describe_option,
 | 
			
		||||
    get_option,
 | 
			
		||||
    option_context,
 | 
			
		||||
    options,
 | 
			
		||||
    reset_option,
 | 
			
		||||
    set_option,
 | 
			
		||||
)
 | 
			
		||||
from pandas._config.display import detect_console_encoding
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def using_copy_on_write() -> bool:
 | 
			
		||||
    _mode_options = _global_config["mode"]
 | 
			
		||||
    return _mode_options["copy_on_write"] and _mode_options["data_manager"] == "block"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def using_nullable_dtypes() -> bool:
 | 
			
		||||
    _mode_options = _global_config["mode"]
 | 
			
		||||
    return _mode_options["nullable_dtypes"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def using_pyarrow_string_dtype() -> bool:
 | 
			
		||||
    _mode_options = _global_config["future"]
 | 
			
		||||
    return _mode_options["infer_string"]
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										946
									
								
								teil20/lib/python3.11/site-packages/pandas/_config/config.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										946
									
								
								teil20/lib/python3.11/site-packages/pandas/_config/config.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,946 @@
 | 
			
		||||
"""
 | 
			
		||||
The config module holds package-wide configurables and provides
 | 
			
		||||
a uniform API for working with them.
 | 
			
		||||
 | 
			
		||||
Overview
 | 
			
		||||
========
 | 
			
		||||
 | 
			
		||||
This module supports the following requirements:
 | 
			
		||||
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
 | 
			
		||||
- keys are case-insensitive.
 | 
			
		||||
- functions should accept partial/regex keys, when unambiguous.
 | 
			
		||||
- options can be registered by modules at import time.
 | 
			
		||||
- options can be registered at init-time (via core.config_init)
 | 
			
		||||
- options have a default value, and (optionally) a description and
 | 
			
		||||
  validation function associated with them.
 | 
			
		||||
- options can be deprecated, in which case referencing them
 | 
			
		||||
  should produce a warning.
 | 
			
		||||
- deprecated options can optionally be rerouted to a replacement
 | 
			
		||||
  so that accessing a deprecated option reroutes to a differently
 | 
			
		||||
  named option.
 | 
			
		||||
- options can be reset to their default value.
 | 
			
		||||
- all option can be reset to their default value at once.
 | 
			
		||||
- all options in a certain sub - namespace can be reset at once.
 | 
			
		||||
- the user can set / get / reset or ask for the description of an option.
 | 
			
		||||
- a developer can register and mark an option as deprecated.
 | 
			
		||||
- you can register a callback to be invoked when the option value
 | 
			
		||||
  is set or reset. Changing the stored value is considered misuse, but
 | 
			
		||||
  is not verboten.
 | 
			
		||||
 | 
			
		||||
Implementation
 | 
			
		||||
==============
 | 
			
		||||
 | 
			
		||||
- Data is stored using nested dictionaries, and should be accessed
 | 
			
		||||
  through the provided API.
 | 
			
		||||
 | 
			
		||||
- "Registered options" and "Deprecated options" have metadata associated
 | 
			
		||||
  with them, which are stored in auxiliary dictionaries keyed on the
 | 
			
		||||
  fully-qualified key, e.g. "x.y.z.option".
 | 
			
		||||
 | 
			
		||||
- the config_init module is imported by the package's __init__.py file.
 | 
			
		||||
  placing any register_option() calls there will ensure those options
 | 
			
		||||
  are available as soon as pandas is loaded. If you use register_option
 | 
			
		||||
  in a module, it will only be available after that module is imported,
 | 
			
		||||
  which you should be aware of.
 | 
			
		||||
 | 
			
		||||
- `config_prefix` is a context_manager (for use with the `with` keyword)
 | 
			
		||||
  which can save developers some typing, see the docstring.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
from contextlib import (
 | 
			
		||||
    ContextDecorator,
 | 
			
		||||
    contextmanager,
 | 
			
		||||
)
 | 
			
		||||
import re
 | 
			
		||||
from typing import (
 | 
			
		||||
    TYPE_CHECKING,
 | 
			
		||||
    Any,
 | 
			
		||||
    Callable,
 | 
			
		||||
    Generic,
 | 
			
		||||
    NamedTuple,
 | 
			
		||||
    cast,
 | 
			
		||||
)
 | 
			
		||||
import warnings
 | 
			
		||||
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    F,
 | 
			
		||||
    T,
 | 
			
		||||
)
 | 
			
		||||
from pandas.util._exceptions import find_stack_level
 | 
			
		||||
 | 
			
		||||
if TYPE_CHECKING:
 | 
			
		||||
    from collections.abc import (
 | 
			
		||||
        Generator,
 | 
			
		||||
        Iterable,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DeprecatedOption(NamedTuple):
 | 
			
		||||
    key: str
 | 
			
		||||
    msg: str | None
 | 
			
		||||
    rkey: str | None
 | 
			
		||||
    removal_ver: str | None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RegisteredOption(NamedTuple):
 | 
			
		||||
    key: str
 | 
			
		||||
    defval: object
 | 
			
		||||
    doc: str
 | 
			
		||||
    validator: Callable[[object], Any] | None
 | 
			
		||||
    cb: Callable[[str], Any] | None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# holds deprecated option metadata
 | 
			
		||||
_deprecated_options: dict[str, DeprecatedOption] = {}
 | 
			
		||||
 | 
			
		||||
# holds registered option metadata
 | 
			
		||||
_registered_options: dict[str, RegisteredOption] = {}
 | 
			
		||||
 | 
			
		||||
# holds the current values for registered options
 | 
			
		||||
_global_config: dict[str, Any] = {}
 | 
			
		||||
 | 
			
		||||
# keys which have a special meaning
 | 
			
		||||
_reserved_keys: list[str] = ["all"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OptionError(AttributeError, KeyError):
 | 
			
		||||
    """
 | 
			
		||||
    Exception raised for pandas.options.
 | 
			
		||||
 | 
			
		||||
    Backwards compatible with KeyError checks.
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> pd.options.context
 | 
			
		||||
    Traceback (most recent call last):
 | 
			
		||||
    OptionError: No such option
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# User API
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_single_key(pat: str, silent: bool) -> str:
 | 
			
		||||
    keys = _select_options(pat)
 | 
			
		||||
    if len(keys) == 0:
 | 
			
		||||
        if not silent:
 | 
			
		||||
            _warn_if_deprecated(pat)
 | 
			
		||||
        raise OptionError(f"No such keys(s): {repr(pat)}")
 | 
			
		||||
    if len(keys) > 1:
 | 
			
		||||
        raise OptionError("Pattern matched multiple keys")
 | 
			
		||||
    key = keys[0]
 | 
			
		||||
 | 
			
		||||
    if not silent:
 | 
			
		||||
        _warn_if_deprecated(key)
 | 
			
		||||
 | 
			
		||||
    key = _translate_key(key)
 | 
			
		||||
 | 
			
		||||
    return key
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_option(pat: str, silent: bool = False) -> Any:
 | 
			
		||||
    key = _get_single_key(pat, silent)
 | 
			
		||||
 | 
			
		||||
    # walk the nested dict
 | 
			
		||||
    root, k = _get_root(key)
 | 
			
		||||
    return root[k]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _set_option(*args, **kwargs) -> None:
 | 
			
		||||
    # must at least 1 arg deal with constraints later
 | 
			
		||||
    nargs = len(args)
 | 
			
		||||
    if not nargs or nargs % 2 != 0:
 | 
			
		||||
        raise ValueError("Must provide an even number of non-keyword arguments")
 | 
			
		||||
 | 
			
		||||
    # default to false
 | 
			
		||||
    silent = kwargs.pop("silent", False)
 | 
			
		||||
 | 
			
		||||
    if kwargs:
 | 
			
		||||
        kwarg = next(iter(kwargs.keys()))
 | 
			
		||||
        raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
 | 
			
		||||
 | 
			
		||||
    for k, v in zip(args[::2], args[1::2]):
 | 
			
		||||
        key = _get_single_key(k, silent)
 | 
			
		||||
 | 
			
		||||
        o = _get_registered_option(key)
 | 
			
		||||
        if o and o.validator:
 | 
			
		||||
            o.validator(v)
 | 
			
		||||
 | 
			
		||||
        # walk the nested dict
 | 
			
		||||
        root, k_root = _get_root(key)
 | 
			
		||||
        root[k_root] = v
 | 
			
		||||
 | 
			
		||||
        if o.cb:
 | 
			
		||||
            if silent:
 | 
			
		||||
                with warnings.catch_warnings(record=True):
 | 
			
		||||
                    o.cb(key)
 | 
			
		||||
            else:
 | 
			
		||||
                o.cb(key)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
 | 
			
		||||
    keys = _select_options(pat)
 | 
			
		||||
    if len(keys) == 0:
 | 
			
		||||
        raise OptionError("No such keys(s)")
 | 
			
		||||
 | 
			
		||||
    s = "\n".join([_build_option_description(k) for k in keys])
 | 
			
		||||
 | 
			
		||||
    if _print_desc:
 | 
			
		||||
        print(s)
 | 
			
		||||
        return None
 | 
			
		||||
    return s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _reset_option(pat: str, silent: bool = False) -> None:
 | 
			
		||||
    keys = _select_options(pat)
 | 
			
		||||
 | 
			
		||||
    if len(keys) == 0:
 | 
			
		||||
        raise OptionError("No such keys(s)")
 | 
			
		||||
 | 
			
		||||
    if len(keys) > 1 and len(pat) < 4 and pat != "all":
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "You must specify at least 4 characters when "
 | 
			
		||||
            "resetting multiple keys, use the special keyword "
 | 
			
		||||
            '"all" to reset all the options to their default value'
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    for k in keys:
 | 
			
		||||
        _set_option(k, _registered_options[k].defval, silent=silent)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_default_val(pat: str):
 | 
			
		||||
    key = _get_single_key(pat, silent=True)
 | 
			
		||||
    return _get_registered_option(key).defval
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DictWrapper:
 | 
			
		||||
    """provide attribute-style access to a nested dict"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
 | 
			
		||||
        object.__setattr__(self, "d", d)
 | 
			
		||||
        object.__setattr__(self, "prefix", prefix)
 | 
			
		||||
 | 
			
		||||
    def __setattr__(self, key: str, val: Any) -> None:
 | 
			
		||||
        prefix = object.__getattribute__(self, "prefix")
 | 
			
		||||
        if prefix:
 | 
			
		||||
            prefix += "."
 | 
			
		||||
        prefix += key
 | 
			
		||||
        # you can't set new keys
 | 
			
		||||
        # can you can't overwrite subtrees
 | 
			
		||||
        if key in self.d and not isinstance(self.d[key], dict):
 | 
			
		||||
            _set_option(prefix, val)
 | 
			
		||||
        else:
 | 
			
		||||
            raise OptionError("You can only set the value of existing options")
 | 
			
		||||
 | 
			
		||||
    def __getattr__(self, key: str):
 | 
			
		||||
        prefix = object.__getattribute__(self, "prefix")
 | 
			
		||||
        if prefix:
 | 
			
		||||
            prefix += "."
 | 
			
		||||
        prefix += key
 | 
			
		||||
        try:
 | 
			
		||||
            v = object.__getattribute__(self, "d")[key]
 | 
			
		||||
        except KeyError as err:
 | 
			
		||||
            raise OptionError("No such option") from err
 | 
			
		||||
        if isinstance(v, dict):
 | 
			
		||||
            return DictWrapper(v, prefix)
 | 
			
		||||
        else:
 | 
			
		||||
            return _get_option(prefix)
 | 
			
		||||
 | 
			
		||||
    def __dir__(self) -> Iterable[str]:
 | 
			
		||||
        return list(self.d.keys())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# For user convenience,  we'd like to have the available options described
 | 
			
		||||
# in the docstring. For dev convenience we'd like to generate the docstrings
 | 
			
		||||
# dynamically instead of maintaining them by hand. To this, we use the
 | 
			
		||||
# class below which wraps functions inside a callable, and converts
 | 
			
		||||
# __doc__ into a property function. The doctsrings below are templates
 | 
			
		||||
# using the py2.6+ advanced formatting syntax to plug in a concise list
 | 
			
		||||
# of options, and option descriptions.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CallableDynamicDoc(Generic[T]):
 | 
			
		||||
    def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
 | 
			
		||||
        self.__doc_tmpl__ = doc_tmpl
 | 
			
		||||
        self.__func__ = func
 | 
			
		||||
 | 
			
		||||
    def __call__(self, *args, **kwds) -> T:
 | 
			
		||||
        return self.__func__(*args, **kwds)
 | 
			
		||||
 | 
			
		||||
    # error: Signature of "__doc__" incompatible with supertype "object"
 | 
			
		||||
    @property
 | 
			
		||||
    def __doc__(self) -> str:  # type: ignore[override]
 | 
			
		||||
        opts_desc = _describe_option("all", _print_desc=False)
 | 
			
		||||
        opts_list = pp_options_list(list(_registered_options.keys()))
 | 
			
		||||
        return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_get_option_tmpl = """
 | 
			
		||||
get_option(pat)
 | 
			
		||||
 | 
			
		||||
Retrieves the value of the specified option.
 | 
			
		||||
 | 
			
		||||
Available options:
 | 
			
		||||
 | 
			
		||||
{opts_list}
 | 
			
		||||
 | 
			
		||||
Parameters
 | 
			
		||||
----------
 | 
			
		||||
pat : str
 | 
			
		||||
    Regexp which should match a single option.
 | 
			
		||||
    Note: partial matches are supported for convenience, but unless you use the
 | 
			
		||||
    full option name (e.g. x.y.z.option_name), your code may break in future
 | 
			
		||||
    versions if new options with similar names are introduced.
 | 
			
		||||
 | 
			
		||||
Returns
 | 
			
		||||
-------
 | 
			
		||||
result : the value of the option
 | 
			
		||||
 | 
			
		||||
Raises
 | 
			
		||||
------
 | 
			
		||||
OptionError : if no such option exists
 | 
			
		||||
 | 
			
		||||
Notes
 | 
			
		||||
-----
 | 
			
		||||
Please reference the :ref:`User Guide <options>` for more information.
 | 
			
		||||
 | 
			
		||||
The available options with its descriptions:
 | 
			
		||||
 | 
			
		||||
{opts_desc}
 | 
			
		||||
 | 
			
		||||
Examples
 | 
			
		||||
--------
 | 
			
		||||
>>> pd.get_option('display.max_columns')  # doctest: +SKIP
 | 
			
		||||
4
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
_set_option_tmpl = """
 | 
			
		||||
set_option(pat, value)
 | 
			
		||||
 | 
			
		||||
Sets the value of the specified option.
 | 
			
		||||
 | 
			
		||||
Available options:
 | 
			
		||||
 | 
			
		||||
{opts_list}
 | 
			
		||||
 | 
			
		||||
Parameters
 | 
			
		||||
----------
 | 
			
		||||
pat : str
 | 
			
		||||
    Regexp which should match a single option.
 | 
			
		||||
    Note: partial matches are supported for convenience, but unless you use the
 | 
			
		||||
    full option name (e.g. x.y.z.option_name), your code may break in future
 | 
			
		||||
    versions if new options with similar names are introduced.
 | 
			
		||||
value : object
 | 
			
		||||
    New value of option.
 | 
			
		||||
 | 
			
		||||
Returns
 | 
			
		||||
-------
 | 
			
		||||
None
 | 
			
		||||
 | 
			
		||||
Raises
 | 
			
		||||
------
 | 
			
		||||
OptionError if no such option exists
 | 
			
		||||
 | 
			
		||||
Notes
 | 
			
		||||
-----
 | 
			
		||||
Please reference the :ref:`User Guide <options>` for more information.
 | 
			
		||||
 | 
			
		||||
The available options with its descriptions:
 | 
			
		||||
 | 
			
		||||
{opts_desc}
 | 
			
		||||
 | 
			
		||||
Examples
 | 
			
		||||
--------
 | 
			
		||||
>>> pd.set_option('display.max_columns', 4)
 | 
			
		||||
>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
 | 
			
		||||
>>> df
 | 
			
		||||
   0  1  ...  3   4
 | 
			
		||||
0  1  2  ...  4   5
 | 
			
		||||
1  6  7  ...  9  10
 | 
			
		||||
[2 rows x 5 columns]
 | 
			
		||||
>>> pd.reset_option('display.max_columns')
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
_describe_option_tmpl = """
 | 
			
		||||
describe_option(pat, _print_desc=False)
 | 
			
		||||
 | 
			
		||||
Prints the description for one or more registered options.
 | 
			
		||||
 | 
			
		||||
Call with no arguments to get a listing for all registered options.
 | 
			
		||||
 | 
			
		||||
Available options:
 | 
			
		||||
 | 
			
		||||
{opts_list}
 | 
			
		||||
 | 
			
		||||
Parameters
 | 
			
		||||
----------
 | 
			
		||||
pat : str
 | 
			
		||||
    Regexp pattern. All matching keys will have their description displayed.
 | 
			
		||||
_print_desc : bool, default True
 | 
			
		||||
    If True (default) the description(s) will be printed to stdout.
 | 
			
		||||
    Otherwise, the description(s) will be returned as a unicode string
 | 
			
		||||
    (for testing).
 | 
			
		||||
 | 
			
		||||
Returns
 | 
			
		||||
-------
 | 
			
		||||
None by default, the description(s) as a unicode string if _print_desc
 | 
			
		||||
is False
 | 
			
		||||
 | 
			
		||||
Notes
 | 
			
		||||
-----
 | 
			
		||||
Please reference the :ref:`User Guide <options>` for more information.
 | 
			
		||||
 | 
			
		||||
The available options with its descriptions:
 | 
			
		||||
 | 
			
		||||
{opts_desc}
 | 
			
		||||
 | 
			
		||||
Examples
 | 
			
		||||
--------
 | 
			
		||||
>>> pd.describe_option('display.max_columns')  # doctest: +SKIP
 | 
			
		||||
display.max_columns : int
 | 
			
		||||
    If max_cols is exceeded, switch to truncate view...
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
_reset_option_tmpl = """
 | 
			
		||||
reset_option(pat)
 | 
			
		||||
 | 
			
		||||
Reset one or more options to their default value.
 | 
			
		||||
 | 
			
		||||
Pass "all" as argument to reset all options.
 | 
			
		||||
 | 
			
		||||
Available options:
 | 
			
		||||
 | 
			
		||||
{opts_list}
 | 
			
		||||
 | 
			
		||||
Parameters
 | 
			
		||||
----------
 | 
			
		||||
pat : str/regex
 | 
			
		||||
    If specified only options matching `prefix*` will be reset.
 | 
			
		||||
    Note: partial matches are supported for convenience, but unless you
 | 
			
		||||
    use the full option name (e.g. x.y.z.option_name), your code may break
 | 
			
		||||
    in future versions if new options with similar names are introduced.
 | 
			
		||||
 | 
			
		||||
Returns
 | 
			
		||||
-------
 | 
			
		||||
None
 | 
			
		||||
 | 
			
		||||
Notes
 | 
			
		||||
-----
 | 
			
		||||
Please reference the :ref:`User Guide <options>` for more information.
 | 
			
		||||
 | 
			
		||||
The available options with its descriptions:
 | 
			
		||||
 | 
			
		||||
{opts_desc}
 | 
			
		||||
 | 
			
		||||
Examples
 | 
			
		||||
--------
 | 
			
		||||
>>> pd.reset_option('display.max_columns')  # doctest: +SKIP
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
# bind the functions with their docstrings into a Callable
 | 
			
		||||
# and use that as the functions exposed in pd.api
 | 
			
		||||
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
 | 
			
		||||
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
 | 
			
		||||
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
 | 
			
		||||
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
 | 
			
		||||
options = DictWrapper(_global_config)
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Functions for use by pandas developers, in addition to User - api
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class option_context(ContextDecorator):
 | 
			
		||||
    """
 | 
			
		||||
    Context manager to temporarily set options in the `with` statement context.
 | 
			
		||||
 | 
			
		||||
    You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> from pandas import option_context
 | 
			
		||||
    >>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
 | 
			
		||||
    ...     pass
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args) -> None:
 | 
			
		||||
        if len(args) % 2 != 0 or len(args) < 2:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Need to invoke as option_context(pat, val, [(pat, val), ...])."
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        self.ops = list(zip(args[::2], args[1::2]))
 | 
			
		||||
 | 
			
		||||
    def __enter__(self) -> None:
 | 
			
		||||
        self.undo = [(pat, _get_option(pat)) for pat, val in self.ops]
 | 
			
		||||
 | 
			
		||||
        for pat, val in self.ops:
 | 
			
		||||
            _set_option(pat, val, silent=True)
 | 
			
		||||
 | 
			
		||||
    def __exit__(self, *args) -> None:
 | 
			
		||||
        if self.undo:
 | 
			
		||||
            for pat, val in self.undo:
 | 
			
		||||
                _set_option(pat, val, silent=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def register_option(
 | 
			
		||||
    key: str,
 | 
			
		||||
    defval: object,
 | 
			
		||||
    doc: str = "",
 | 
			
		||||
    validator: Callable[[object], Any] | None = None,
 | 
			
		||||
    cb: Callable[[str], Any] | None = None,
 | 
			
		||||
) -> None:
 | 
			
		||||
    """
 | 
			
		||||
    Register an option in the package-wide pandas config object
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    key : str
 | 
			
		||||
        Fully-qualified key, e.g. "x.y.option - z".
 | 
			
		||||
    defval : object
 | 
			
		||||
        Default value of the option.
 | 
			
		||||
    doc : str
 | 
			
		||||
        Description of the option.
 | 
			
		||||
    validator : Callable, optional
 | 
			
		||||
        Function of a single argument, should raise `ValueError` if
 | 
			
		||||
        called with a value which is not a legal value for the option.
 | 
			
		||||
    cb
 | 
			
		||||
        a function of a single argument "key", which is called
 | 
			
		||||
        immediately after an option value is set/reset. key is
 | 
			
		||||
        the full name of the option.
 | 
			
		||||
 | 
			
		||||
    Raises
 | 
			
		||||
    ------
 | 
			
		||||
    ValueError if `validator` is specified and `defval` is not a valid value.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    import keyword
 | 
			
		||||
    import tokenize
 | 
			
		||||
 | 
			
		||||
    key = key.lower()
 | 
			
		||||
 | 
			
		||||
    if key in _registered_options:
 | 
			
		||||
        raise OptionError(f"Option '{key}' has already been registered")
 | 
			
		||||
    if key in _reserved_keys:
 | 
			
		||||
        raise OptionError(f"Option '{key}' is a reserved key")
 | 
			
		||||
 | 
			
		||||
    # the default value should be legal
 | 
			
		||||
    if validator:
 | 
			
		||||
        validator(defval)
 | 
			
		||||
 | 
			
		||||
    # walk the nested dict, creating dicts as needed along the path
 | 
			
		||||
    path = key.split(".")
 | 
			
		||||
 | 
			
		||||
    for k in path:
 | 
			
		||||
        if not re.match("^" + tokenize.Name + "$", k):
 | 
			
		||||
            raise ValueError(f"{k} is not a valid identifier")
 | 
			
		||||
        if keyword.iskeyword(k):
 | 
			
		||||
            raise ValueError(f"{k} is a python keyword")
 | 
			
		||||
 | 
			
		||||
    cursor = _global_config
 | 
			
		||||
    msg = "Path prefix to option '{option}' is already an option"
 | 
			
		||||
 | 
			
		||||
    for i, p in enumerate(path[:-1]):
 | 
			
		||||
        if not isinstance(cursor, dict):
 | 
			
		||||
            raise OptionError(msg.format(option=".".join(path[:i])))
 | 
			
		||||
        if p not in cursor:
 | 
			
		||||
            cursor[p] = {}
 | 
			
		||||
        cursor = cursor[p]
 | 
			
		||||
 | 
			
		||||
    if not isinstance(cursor, dict):
 | 
			
		||||
        raise OptionError(msg.format(option=".".join(path[:-1])))
 | 
			
		||||
 | 
			
		||||
    cursor[path[-1]] = defval  # initialize
 | 
			
		||||
 | 
			
		||||
    # save the option metadata
 | 
			
		||||
    _registered_options[key] = RegisteredOption(
 | 
			
		||||
        key=key, defval=defval, doc=doc, validator=validator, cb=cb
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def deprecate_option(
 | 
			
		||||
    key: str,
 | 
			
		||||
    msg: str | None = None,
 | 
			
		||||
    rkey: str | None = None,
 | 
			
		||||
    removal_ver: str | None = None,
 | 
			
		||||
) -> None:
 | 
			
		||||
    """
 | 
			
		||||
    Mark option `key` as deprecated, if code attempts to access this option,
 | 
			
		||||
    a warning will be produced, using `msg` if given, or a default message
 | 
			
		||||
    if not.
 | 
			
		||||
    if `rkey` is given, any access to the key will be re-routed to `rkey`.
 | 
			
		||||
 | 
			
		||||
    Neither the existence of `key` nor that if `rkey` is checked. If they
 | 
			
		||||
    do not exist, any subsequence access will fail as usual, after the
 | 
			
		||||
    deprecation warning is given.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    key : str
 | 
			
		||||
        Name of the option to be deprecated.
 | 
			
		||||
        must be a fully-qualified option name (e.g "x.y.z.rkey").
 | 
			
		||||
    msg : str, optional
 | 
			
		||||
        Warning message to output when the key is referenced.
 | 
			
		||||
        if no message is given a default message will be emitted.
 | 
			
		||||
    rkey : str, optional
 | 
			
		||||
        Name of an option to reroute access to.
 | 
			
		||||
        If specified, any referenced `key` will be
 | 
			
		||||
        re-routed to `rkey` including set/get/reset.
 | 
			
		||||
        rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
 | 
			
		||||
        used by the default message if no `msg` is specified.
 | 
			
		||||
    removal_ver : str, optional
 | 
			
		||||
        Specifies the version in which this option will
 | 
			
		||||
        be removed. used by the default message if no `msg` is specified.
 | 
			
		||||
 | 
			
		||||
    Raises
 | 
			
		||||
    ------
 | 
			
		||||
    OptionError
 | 
			
		||||
        If the specified key has already been deprecated.
 | 
			
		||||
    """
 | 
			
		||||
    key = key.lower()
 | 
			
		||||
 | 
			
		||||
    if key in _deprecated_options:
 | 
			
		||||
        raise OptionError(f"Option '{key}' has already been defined as deprecated.")
 | 
			
		||||
 | 
			
		||||
    _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# functions internal to the module
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _select_options(pat: str) -> list[str]:
 | 
			
		||||
    """
 | 
			
		||||
    returns a list of keys matching `pat`
 | 
			
		||||
 | 
			
		||||
    if pat=="all", returns all registered options
 | 
			
		||||
    """
 | 
			
		||||
    # short-circuit for exact key
 | 
			
		||||
    if pat in _registered_options:
 | 
			
		||||
        return [pat]
 | 
			
		||||
 | 
			
		||||
    # else look through all of them
 | 
			
		||||
    keys = sorted(_registered_options.keys())
 | 
			
		||||
    if pat == "all":  # reserved key
 | 
			
		||||
        return keys
 | 
			
		||||
 | 
			
		||||
    return [k for k in keys if re.search(pat, k, re.I)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_root(key: str) -> tuple[dict[str, Any], str]:
 | 
			
		||||
    path = key.split(".")
 | 
			
		||||
    cursor = _global_config
 | 
			
		||||
    for p in path[:-1]:
 | 
			
		||||
        cursor = cursor[p]
 | 
			
		||||
    return cursor, path[-1]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _is_deprecated(key: str) -> bool:
 | 
			
		||||
    """Returns True if the given option has been deprecated"""
 | 
			
		||||
    key = key.lower()
 | 
			
		||||
    return key in _deprecated_options
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_deprecated_option(key: str):
 | 
			
		||||
    """
 | 
			
		||||
    Retrieves the metadata for a deprecated option, if `key` is deprecated.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    DeprecatedOption (namedtuple) if key is deprecated, None otherwise
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        d = _deprecated_options[key]
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        return None
 | 
			
		||||
    else:
 | 
			
		||||
        return d
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_registered_option(key: str):
 | 
			
		||||
    """
 | 
			
		||||
    Retrieves the option metadata if `key` is a registered option.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    RegisteredOption (namedtuple) if key is deprecated, None otherwise
 | 
			
		||||
    """
 | 
			
		||||
    return _registered_options.get(key)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _translate_key(key: str) -> str:
 | 
			
		||||
    """
 | 
			
		||||
    if key id deprecated and a replacement key defined, will return the
 | 
			
		||||
    replacement key, otherwise returns `key` as - is
 | 
			
		||||
    """
 | 
			
		||||
    d = _get_deprecated_option(key)
 | 
			
		||||
    if d:
 | 
			
		||||
        return d.rkey or key
 | 
			
		||||
    else:
 | 
			
		||||
        return key
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _warn_if_deprecated(key: str) -> bool:
 | 
			
		||||
    """
 | 
			
		||||
    Checks if `key` is a deprecated option and if so, prints a warning.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    bool - True if `key` is deprecated, False otherwise.
 | 
			
		||||
    """
 | 
			
		||||
    d = _get_deprecated_option(key)
 | 
			
		||||
    if d:
 | 
			
		||||
        if d.msg:
 | 
			
		||||
            warnings.warn(
 | 
			
		||||
                d.msg,
 | 
			
		||||
                FutureWarning,
 | 
			
		||||
                stacklevel=find_stack_level(),
 | 
			
		||||
            )
 | 
			
		||||
        else:
 | 
			
		||||
            msg = f"'{key}' is deprecated"
 | 
			
		||||
            if d.removal_ver:
 | 
			
		||||
                msg += f" and will be removed in {d.removal_ver}"
 | 
			
		||||
            if d.rkey:
 | 
			
		||||
                msg += f", please use '{d.rkey}' instead."
 | 
			
		||||
            else:
 | 
			
		||||
                msg += ", please refrain from using it."
 | 
			
		||||
 | 
			
		||||
            warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
 | 
			
		||||
        return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _build_option_description(k: str) -> str:
 | 
			
		||||
    """Builds a formatted description of a registered option and prints it"""
 | 
			
		||||
    o = _get_registered_option(k)
 | 
			
		||||
    d = _get_deprecated_option(k)
 | 
			
		||||
 | 
			
		||||
    s = f"{k} "
 | 
			
		||||
 | 
			
		||||
    if o.doc:
 | 
			
		||||
        s += "\n".join(o.doc.strip().split("\n"))
 | 
			
		||||
    else:
 | 
			
		||||
        s += "No description available."
 | 
			
		||||
 | 
			
		||||
    if o:
 | 
			
		||||
        s += f"\n    [default: {o.defval}] [currently: {_get_option(k, True)}]"
 | 
			
		||||
 | 
			
		||||
    if d:
 | 
			
		||||
        rkey = d.rkey or ""
 | 
			
		||||
        s += "\n    (Deprecated"
 | 
			
		||||
        s += f", use `{rkey}` instead."
 | 
			
		||||
        s += ")"
 | 
			
		||||
 | 
			
		||||
    return s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
 | 
			
		||||
    """Builds a concise listing of available options, grouped by prefix"""
 | 
			
		||||
    from itertools import groupby
 | 
			
		||||
    from textwrap import wrap
 | 
			
		||||
 | 
			
		||||
    def pp(name: str, ks: Iterable[str]) -> list[str]:
 | 
			
		||||
        pfx = "- " + name + ".[" if name else ""
 | 
			
		||||
        ls = wrap(
 | 
			
		||||
            ", ".join(ks),
 | 
			
		||||
            width,
 | 
			
		||||
            initial_indent=pfx,
 | 
			
		||||
            subsequent_indent="  ",
 | 
			
		||||
            break_long_words=False,
 | 
			
		||||
        )
 | 
			
		||||
        if ls and ls[-1] and name:
 | 
			
		||||
            ls[-1] = ls[-1] + "]"
 | 
			
		||||
        return ls
 | 
			
		||||
 | 
			
		||||
    ls: list[str] = []
 | 
			
		||||
    singles = [x for x in sorted(keys) if x.find(".") < 0]
 | 
			
		||||
    if singles:
 | 
			
		||||
        ls += pp("", singles)
 | 
			
		||||
    keys = [x for x in keys if x.find(".") >= 0]
 | 
			
		||||
 | 
			
		||||
    for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
 | 
			
		||||
        ks = [x[len(k) + 1 :] for x in list(g)]
 | 
			
		||||
        ls += pp(k, ks)
 | 
			
		||||
    s = "\n".join(ls)
 | 
			
		||||
    if _print:
 | 
			
		||||
        print(s)
 | 
			
		||||
    else:
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# helpers
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@contextmanager
 | 
			
		||||
def config_prefix(prefix: str) -> Generator[None, None, None]:
 | 
			
		||||
    """
 | 
			
		||||
    contextmanager for multiple invocations of API with a common prefix
 | 
			
		||||
 | 
			
		||||
    supported API functions: (register / get / set )__option
 | 
			
		||||
 | 
			
		||||
    Warning: This is not thread - safe, and won't work properly if you import
 | 
			
		||||
    the API functions into your module using the "from x import y" construct.
 | 
			
		||||
 | 
			
		||||
    Example
 | 
			
		||||
    -------
 | 
			
		||||
    import pandas._config.config as cf
 | 
			
		||||
    with cf.config_prefix("display.font"):
 | 
			
		||||
        cf.register_option("color", "red")
 | 
			
		||||
        cf.register_option("size", " 5 pt")
 | 
			
		||||
        cf.set_option(size, " 6 pt")
 | 
			
		||||
        cf.get_option(size)
 | 
			
		||||
        ...
 | 
			
		||||
 | 
			
		||||
        etc'
 | 
			
		||||
 | 
			
		||||
    will register options "display.font.color", "display.font.size", set the
 | 
			
		||||
    value of "display.font.size"... and so on.
 | 
			
		||||
    """
 | 
			
		||||
    # Note: reset_option relies on set_option, and on key directly
 | 
			
		||||
    # it does not fit in to this monkey-patching scheme
 | 
			
		||||
 | 
			
		||||
    global register_option, get_option, set_option
 | 
			
		||||
 | 
			
		||||
    def wrap(func: F) -> F:
 | 
			
		||||
        def inner(key: str, *args, **kwds):
 | 
			
		||||
            pkey = f"{prefix}.{key}"
 | 
			
		||||
            return func(pkey, *args, **kwds)
 | 
			
		||||
 | 
			
		||||
        return cast(F, inner)
 | 
			
		||||
 | 
			
		||||
    _register_option = register_option
 | 
			
		||||
    _get_option = get_option
 | 
			
		||||
    _set_option = set_option
 | 
			
		||||
    set_option = wrap(set_option)
 | 
			
		||||
    get_option = wrap(get_option)
 | 
			
		||||
    register_option = wrap(register_option)
 | 
			
		||||
    try:
 | 
			
		||||
        yield
 | 
			
		||||
    finally:
 | 
			
		||||
        set_option = _set_option
 | 
			
		||||
        get_option = _get_option
 | 
			
		||||
        register_option = _register_option
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# These factories and methods are handy for use as the validator
 | 
			
		||||
# arg in register_option
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    `_type` - a type to be compared against (e.g. type(x) == `_type`)
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    validator - a function of a single argument x , which raises
 | 
			
		||||
                ValueError if type(x) is not equal to `_type`
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def inner(x) -> None:
 | 
			
		||||
        if type(x) != _type:
 | 
			
		||||
            raise ValueError(f"Value must have type '{_type}'")
 | 
			
		||||
 | 
			
		||||
    return inner
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_instance_factory(_type) -> Callable[[Any], None]:
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    `_type` - the type to be checked against
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    validator - a function of a single argument x , which raises
 | 
			
		||||
                ValueError if x is not an instance of `_type`
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if isinstance(_type, (tuple, list)):
 | 
			
		||||
        _type = tuple(_type)
 | 
			
		||||
        type_repr = "|".join(map(str, _type))
 | 
			
		||||
    else:
 | 
			
		||||
        type_repr = f"'{_type}'"
 | 
			
		||||
 | 
			
		||||
    def inner(x) -> None:
 | 
			
		||||
        if not isinstance(x, _type):
 | 
			
		||||
            raise ValueError(f"Value must be an instance of {type_repr}")
 | 
			
		||||
 | 
			
		||||
    return inner
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_one_of_factory(legal_values) -> Callable[[Any], None]:
 | 
			
		||||
    callables = [c for c in legal_values if callable(c)]
 | 
			
		||||
    legal_values = [c for c in legal_values if not callable(c)]
 | 
			
		||||
 | 
			
		||||
    def inner(x) -> None:
 | 
			
		||||
        if x not in legal_values:
 | 
			
		||||
            if not any(c(x) for c in callables):
 | 
			
		||||
                uvals = [str(lval) for lval in legal_values]
 | 
			
		||||
                pp_values = "|".join(uvals)
 | 
			
		||||
                msg = f"Value must be one of {pp_values}"
 | 
			
		||||
                if len(callables):
 | 
			
		||||
                    msg += " or a callable"
 | 
			
		||||
                raise ValueError(msg)
 | 
			
		||||
 | 
			
		||||
    return inner
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_nonnegative_int(value: object) -> None:
 | 
			
		||||
    """
 | 
			
		||||
    Verify that value is None or a positive int.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    value : None or int
 | 
			
		||||
            The `value` to be checked.
 | 
			
		||||
 | 
			
		||||
    Raises
 | 
			
		||||
    ------
 | 
			
		||||
    ValueError
 | 
			
		||||
        When the value is not None or is a negative integer
 | 
			
		||||
    """
 | 
			
		||||
    if value is None:
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    elif isinstance(value, int):
 | 
			
		||||
        if value >= 0:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
    msg = "Value must be a nonnegative integer or None"
 | 
			
		||||
    raise ValueError(msg)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# common type validators, for convenience
 | 
			
		||||
# usage: register_option(... , validator = is_int)
 | 
			
		||||
is_int = is_type_factory(int)
 | 
			
		||||
is_bool = is_type_factory(bool)
 | 
			
		||||
is_float = is_type_factory(float)
 | 
			
		||||
is_str = is_type_factory(str)
 | 
			
		||||
is_text = is_instance_factory((str, bytes))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_callable(obj) -> bool:
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    `obj` - the object to be checked
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    validator - returns True if object is callable
 | 
			
		||||
        raises ValueError otherwise.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if not callable(obj):
 | 
			
		||||
        raise ValueError("Value must be a callable")
 | 
			
		||||
    return True
 | 
			
		||||
							
								
								
									
										25
									
								
								teil20/lib/python3.11/site-packages/pandas/_config/dates.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								teil20/lib/python3.11/site-packages/pandas/_config/dates.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,25 @@
 | 
			
		||||
"""
 | 
			
		||||
config for datetime formatting
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
from pandas._config import config as cf
 | 
			
		||||
 | 
			
		||||
pc_date_dayfirst_doc = """
 | 
			
		||||
: boolean
 | 
			
		||||
    When True, prints and parses dates with the day first, eg 20/01/2005
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
pc_date_yearfirst_doc = """
 | 
			
		||||
: boolean
 | 
			
		||||
    When True, prints and parses dates with the year first, eg 2005/01/20
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
with cf.config_prefix("display"):
 | 
			
		||||
    # Needed upstream of `_libs` because these are used in tslibs.parsing
 | 
			
		||||
    cf.register_option(
 | 
			
		||||
        "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool
 | 
			
		||||
    )
 | 
			
		||||
    cf.register_option(
 | 
			
		||||
        "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool
 | 
			
		||||
    )
 | 
			
		||||
@@ -0,0 +1,62 @@
 | 
			
		||||
"""
 | 
			
		||||
Unopinionated display configuration.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
import locale
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from pandas._config import config as cf
 | 
			
		||||
 | 
			
		||||
# -----------------------------------------------------------------------------
 | 
			
		||||
# Global formatting options
 | 
			
		||||
_initial_defencoding: str | None = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def detect_console_encoding() -> str:
 | 
			
		||||
    """
 | 
			
		||||
    Try to find the most capable encoding supported by the console.
 | 
			
		||||
    slightly modified from the way IPython handles the same issue.
 | 
			
		||||
    """
 | 
			
		||||
    global _initial_defencoding
 | 
			
		||||
 | 
			
		||||
    encoding = None
 | 
			
		||||
    try:
 | 
			
		||||
        encoding = sys.stdout.encoding or sys.stdin.encoding
 | 
			
		||||
    except (AttributeError, OSError):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    # try again for something better
 | 
			
		||||
    if not encoding or "ascii" in encoding.lower():
 | 
			
		||||
        try:
 | 
			
		||||
            encoding = locale.getpreferredencoding()
 | 
			
		||||
        except locale.Error:
 | 
			
		||||
            # can be raised by locale.setlocale(), which is
 | 
			
		||||
            #  called by getpreferredencoding
 | 
			
		||||
            #  (on some systems, see stdlib locale docs)
 | 
			
		||||
            pass
 | 
			
		||||
 | 
			
		||||
    # when all else fails. this will usually be "ascii"
 | 
			
		||||
    if not encoding or "ascii" in encoding.lower():
 | 
			
		||||
        encoding = sys.getdefaultencoding()
 | 
			
		||||
 | 
			
		||||
    # GH#3360, save the reported defencoding at import time
 | 
			
		||||
    # MPL backends may change it. Make available for debugging.
 | 
			
		||||
    if not _initial_defencoding:
 | 
			
		||||
        _initial_defencoding = sys.getdefaultencoding()
 | 
			
		||||
 | 
			
		||||
    return encoding
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
pc_encoding_doc = """
 | 
			
		||||
: str/unicode
 | 
			
		||||
    Defaults to the detected encoding of the console.
 | 
			
		||||
    Specifies the encoding to be used for strings returned by to_string,
 | 
			
		||||
    these are generally strings meant to be displayed on the console.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
with cf.config_prefix("display"):
 | 
			
		||||
    cf.register_option(
 | 
			
		||||
        "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
 | 
			
		||||
    )
 | 
			
		||||
@@ -0,0 +1,172 @@
 | 
			
		||||
"""
 | 
			
		||||
Helpers for configuring locale settings.
 | 
			
		||||
 | 
			
		||||
Name `localization` is chosen to avoid overlap with builtin `locale` module.
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import annotations
 | 
			
		||||
 | 
			
		||||
from contextlib import contextmanager
 | 
			
		||||
import locale
 | 
			
		||||
import platform
 | 
			
		||||
import re
 | 
			
		||||
import subprocess
 | 
			
		||||
from typing import TYPE_CHECKING
 | 
			
		||||
 | 
			
		||||
from pandas._config.config import options
 | 
			
		||||
 | 
			
		||||
if TYPE_CHECKING:
 | 
			
		||||
    from collections.abc import Generator
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@contextmanager
 | 
			
		||||
def set_locale(
 | 
			
		||||
    new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
 | 
			
		||||
) -> Generator[str | tuple[str, str], None, None]:
 | 
			
		||||
    """
 | 
			
		||||
    Context manager for temporarily setting a locale.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    new_locale : str or tuple
 | 
			
		||||
        A string of the form <language_country>.<encoding>. For example to set
 | 
			
		||||
        the current locale to US English with a UTF8 encoding, you would pass
 | 
			
		||||
        "en_US.UTF-8".
 | 
			
		||||
    lc_var : int, default `locale.LC_ALL`
 | 
			
		||||
        The category of the locale being set.
 | 
			
		||||
 | 
			
		||||
    Notes
 | 
			
		||||
    -----
 | 
			
		||||
    This is useful when you want to run a particular block of code under a
 | 
			
		||||
    particular locale, without globally setting the locale. This probably isn't
 | 
			
		||||
    thread-safe.
 | 
			
		||||
    """
 | 
			
		||||
    # getlocale is not always compliant with setlocale, use setlocale. GH#46595
 | 
			
		||||
    current_locale = locale.setlocale(lc_var)
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        locale.setlocale(lc_var, new_locale)
 | 
			
		||||
        normalized_code, normalized_encoding = locale.getlocale()
 | 
			
		||||
        if normalized_code is not None and normalized_encoding is not None:
 | 
			
		||||
            yield f"{normalized_code}.{normalized_encoding}"
 | 
			
		||||
        else:
 | 
			
		||||
            yield new_locale
 | 
			
		||||
    finally:
 | 
			
		||||
        locale.setlocale(lc_var, current_locale)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
 | 
			
		||||
    """
 | 
			
		||||
    Check to see if we can set a locale, and subsequently get the locale,
 | 
			
		||||
    without raising an Exception.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    lc : str
 | 
			
		||||
        The locale to attempt to set.
 | 
			
		||||
    lc_var : int, default `locale.LC_ALL`
 | 
			
		||||
        The category of the locale being set.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    bool
 | 
			
		||||
        Whether the passed locale can be set
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        with set_locale(lc, lc_var=lc_var):
 | 
			
		||||
            pass
 | 
			
		||||
    except (ValueError, locale.Error):
 | 
			
		||||
        # horrible name for a Exception subclass
 | 
			
		||||
        return False
 | 
			
		||||
    else:
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:
 | 
			
		||||
    """
 | 
			
		||||
    Return a list of normalized locales that do not throw an ``Exception``
 | 
			
		||||
    when set.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    locales : str
 | 
			
		||||
        A string where each locale is separated by a newline.
 | 
			
		||||
    normalize : bool
 | 
			
		||||
        Whether to call ``locale.normalize`` on each locale.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    valid_locales : list
 | 
			
		||||
        A list of valid locales.
 | 
			
		||||
    """
 | 
			
		||||
    return [
 | 
			
		||||
        loc
 | 
			
		||||
        for loc in (
 | 
			
		||||
            locale.normalize(loc.strip()) if normalize else loc.strip()
 | 
			
		||||
            for loc in locales
 | 
			
		||||
        )
 | 
			
		||||
        if can_set_locale(loc)
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_locales(
 | 
			
		||||
    prefix: str | None = None,
 | 
			
		||||
    normalize: bool = True,
 | 
			
		||||
) -> list[str]:
 | 
			
		||||
    """
 | 
			
		||||
    Get all the locales that are available on the system.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    prefix : str
 | 
			
		||||
        If not ``None`` then return only those locales with the prefix
 | 
			
		||||
        provided. For example to get all English language locales (those that
 | 
			
		||||
        start with ``"en"``), pass ``prefix="en"``.
 | 
			
		||||
    normalize : bool
 | 
			
		||||
        Call ``locale.normalize`` on the resulting list of available locales.
 | 
			
		||||
        If ``True``, only locales that can be set without throwing an
 | 
			
		||||
        ``Exception`` are returned.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    locales : list of strings
 | 
			
		||||
        A list of locale strings that can be set with ``locale.setlocale()``.
 | 
			
		||||
        For example::
 | 
			
		||||
 | 
			
		||||
            locale.setlocale(locale.LC_ALL, locale_string)
 | 
			
		||||
 | 
			
		||||
    On error will return an empty list (no locale available, e.g. Windows)
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if platform.system() in ("Linux", "Darwin"):
 | 
			
		||||
        raw_locales = subprocess.check_output(["locale", "-a"])
 | 
			
		||||
    else:
 | 
			
		||||
        # Other platforms e.g. windows platforms don't define "locale -a"
 | 
			
		||||
        #  Note: is_platform_windows causes circular import here
 | 
			
		||||
        return []
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        # raw_locales is "\n" separated list of locales
 | 
			
		||||
        # it may contain non-decodable parts, so split
 | 
			
		||||
        # extract what we can and then rejoin.
 | 
			
		||||
        split_raw_locales = raw_locales.split(b"\n")
 | 
			
		||||
        out_locales = []
 | 
			
		||||
        for x in split_raw_locales:
 | 
			
		||||
            try:
 | 
			
		||||
                out_locales.append(str(x, encoding=options.display.encoding))
 | 
			
		||||
            except UnicodeError:
 | 
			
		||||
                # 'locale -a' is used to populated 'raw_locales' and on
 | 
			
		||||
                # Redhat 7 Linux (and maybe others) prints locale names
 | 
			
		||||
                # using windows-1252 encoding.  Bug only triggered by
 | 
			
		||||
                # a few special characters and when there is an
 | 
			
		||||
                # extensive list of installed locales.
 | 
			
		||||
                out_locales.append(str(x, encoding="windows-1252"))
 | 
			
		||||
 | 
			
		||||
    except TypeError:
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    if prefix is None:
 | 
			
		||||
        return _valid_locales(out_locales, normalize)
 | 
			
		||||
 | 
			
		||||
    pattern = re.compile(f"{prefix}.*")
 | 
			
		||||
    found = pattern.findall("\n".join(out_locales))
 | 
			
		||||
    return _valid_locales(found, normalize)
 | 
			
		||||
							
								
								
									
										27
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,27 @@
 | 
			
		||||
__all__ = [
 | 
			
		||||
    "NaT",
 | 
			
		||||
    "NaTType",
 | 
			
		||||
    "OutOfBoundsDatetime",
 | 
			
		||||
    "Period",
 | 
			
		||||
    "Timedelta",
 | 
			
		||||
    "Timestamp",
 | 
			
		||||
    "iNaT",
 | 
			
		||||
    "Interval",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Below imports needs to happen first to ensure pandas top level
 | 
			
		||||
# module gets monkeypatched with the pandas_datetime_CAPI
 | 
			
		||||
# see pandas_datetime_exec in pd_datetime.c
 | 
			
		||||
import pandas._libs.pandas_parser  # noqa: E501 # isort: skip # type: ignore[reportUnusedImport]
 | 
			
		||||
import pandas._libs.pandas_datetime  # noqa: F401,E501 # isort: skip # type: ignore[reportUnusedImport]
 | 
			
		||||
from pandas._libs.interval import Interval
 | 
			
		||||
from pandas._libs.tslibs import (
 | 
			
		||||
    NaT,
 | 
			
		||||
    NaTType,
 | 
			
		||||
    OutOfBoundsDatetime,
 | 
			
		||||
    Period,
 | 
			
		||||
    Timedelta,
 | 
			
		||||
    Timestamp,
 | 
			
		||||
    iNaT,
 | 
			
		||||
)
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										22
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
from pandas._libs.dtypes cimport (
 | 
			
		||||
    numeric_object_t,
 | 
			
		||||
    numeric_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef numeric_t kth_smallest_c(numeric_t* arr, Py_ssize_t k, Py_ssize_t n) noexcept nogil
 | 
			
		||||
 | 
			
		||||
cdef enum TiebreakEnumType:
 | 
			
		||||
    TIEBREAK_AVERAGE
 | 
			
		||||
    TIEBREAK_MIN,
 | 
			
		||||
    TIEBREAK_MAX
 | 
			
		||||
    TIEBREAK_FIRST
 | 
			
		||||
    TIEBREAK_FIRST_DESCENDING
 | 
			
		||||
    TIEBREAK_DENSE
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef numeric_object_t get_rank_nan_fill_val(
 | 
			
		||||
    bint rank_nans_highest,
 | 
			
		||||
    numeric_object_t val,
 | 
			
		||||
    bint is_datetimelike=*,
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										416
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										416
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,416 @@
 | 
			
		||||
from typing import Any
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
class Infinity:
 | 
			
		||||
    def __eq__(self, other) -> bool: ...
 | 
			
		||||
    def __ne__(self, other) -> bool: ...
 | 
			
		||||
    def __lt__(self, other) -> bool: ...
 | 
			
		||||
    def __le__(self, other) -> bool: ...
 | 
			
		||||
    def __gt__(self, other) -> bool: ...
 | 
			
		||||
    def __ge__(self, other) -> bool: ...
 | 
			
		||||
 | 
			
		||||
class NegInfinity:
 | 
			
		||||
    def __eq__(self, other) -> bool: ...
 | 
			
		||||
    def __ne__(self, other) -> bool: ...
 | 
			
		||||
    def __lt__(self, other) -> bool: ...
 | 
			
		||||
    def __le__(self, other) -> bool: ...
 | 
			
		||||
    def __gt__(self, other) -> bool: ...
 | 
			
		||||
    def __ge__(self, other) -> bool: ...
 | 
			
		||||
 | 
			
		||||
def unique_deltas(
 | 
			
		||||
    arr: np.ndarray,  # const int64_t[:]
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[np.int64, ndim=1]
 | 
			
		||||
def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ...
 | 
			
		||||
def groupsort_indexer(
 | 
			
		||||
    index: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
) -> tuple[
 | 
			
		||||
    np.ndarray,  # ndarray[int64_t, ndim=1]
 | 
			
		||||
    np.ndarray,  # ndarray[int64_t, ndim=1]
 | 
			
		||||
]: ...
 | 
			
		||||
def kth_smallest(
 | 
			
		||||
    arr: np.ndarray,  # numeric[:]
 | 
			
		||||
    k: int,
 | 
			
		||||
) -> Any: ...  # numeric
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# Pairwise correlation/covariance
 | 
			
		||||
 | 
			
		||||
def nancorr(
 | 
			
		||||
    mat: npt.NDArray[np.float64],  # const float64_t[:, :]
 | 
			
		||||
    cov: bool = ...,
 | 
			
		||||
    minp: int | None = ...,
 | 
			
		||||
) -> npt.NDArray[np.float64]: ...  # ndarray[float64_t, ndim=2]
 | 
			
		||||
def nancorr_spearman(
 | 
			
		||||
    mat: npt.NDArray[np.float64],  # ndarray[float64_t, ndim=2]
 | 
			
		||||
    minp: int = ...,
 | 
			
		||||
) -> npt.NDArray[np.float64]: ...  # ndarray[float64_t, ndim=2]
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
def validate_limit(nobs: int | None, limit=...) -> int: ...
 | 
			
		||||
def get_fill_indexer(
 | 
			
		||||
    mask: npt.NDArray[np.bool_],
 | 
			
		||||
    limit: int | None = None,
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
def pad(
 | 
			
		||||
    old: np.ndarray,  # ndarray[numeric_object_t]
 | 
			
		||||
    new: np.ndarray,  # ndarray[numeric_object_t]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...  # np.ndarray[np.intp, ndim=1]
 | 
			
		||||
def pad_inplace(
 | 
			
		||||
    values: np.ndarray,  # numeric_object_t[:]
 | 
			
		||||
    mask: np.ndarray,  # uint8_t[:]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def pad_2d_inplace(
 | 
			
		||||
    values: np.ndarray,  # numeric_object_t[:, :]
 | 
			
		||||
    mask: np.ndarray,  # const uint8_t[:, :]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def backfill(
 | 
			
		||||
    old: np.ndarray,  # ndarray[numeric_object_t]
 | 
			
		||||
    new: np.ndarray,  # ndarray[numeric_object_t]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...  # np.ndarray[np.intp, ndim=1]
 | 
			
		||||
def backfill_inplace(
 | 
			
		||||
    values: np.ndarray,  # numeric_object_t[:]
 | 
			
		||||
    mask: np.ndarray,  # uint8_t[:]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def backfill_2d_inplace(
 | 
			
		||||
    values: np.ndarray,  # numeric_object_t[:, :]
 | 
			
		||||
    mask: np.ndarray,  # const uint8_t[:, :]
 | 
			
		||||
    limit=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def is_monotonic(
 | 
			
		||||
    arr: np.ndarray,  # ndarray[numeric_object_t, ndim=1]
 | 
			
		||||
    timelike: bool,
 | 
			
		||||
) -> tuple[bool, bool, bool]: ...
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# rank_1d, rank_2d
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
def rank_1d(
 | 
			
		||||
    values: np.ndarray,  # ndarray[numeric_object_t, ndim=1]
 | 
			
		||||
    labels: np.ndarray | None = ...,  # const int64_t[:]=None
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
    ties_method=...,
 | 
			
		||||
    ascending: bool = ...,
 | 
			
		||||
    pct: bool = ...,
 | 
			
		||||
    na_option=...,
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[float64_t, ndim=1]
 | 
			
		||||
def rank_2d(
 | 
			
		||||
    in_arr: np.ndarray,  # ndarray[numeric_object_t, ndim=2]
 | 
			
		||||
    axis: int = ...,
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
    ties_method=...,
 | 
			
		||||
    ascending: bool = ...,
 | 
			
		||||
    na_option=...,
 | 
			
		||||
    pct: bool = ...,
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[float64_t, ndim=1]
 | 
			
		||||
def diff_2d(
 | 
			
		||||
    arr: np.ndarray,  # ndarray[diff_t, ndim=2]
 | 
			
		||||
    out: np.ndarray,  # ndarray[out_t, ndim=2]
 | 
			
		||||
    periods: int,
 | 
			
		||||
    axis: int,
 | 
			
		||||
    datetimelike: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ...
 | 
			
		||||
def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ...
 | 
			
		||||
def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ...
 | 
			
		||||
def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ...
 | 
			
		||||
def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ...
 | 
			
		||||
def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ...
 | 
			
		||||
def take_1d_int8_int8(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int8_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int8_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int8_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int16_int16(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int16_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int16_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int16_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int32_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int32_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int64_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_int64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_float32_float32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_float32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_float64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_object_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_bool_bool(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_1d_bool_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int8_int8(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int8_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int8_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int8_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int16_int16(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int16_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int16_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int16_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int32_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int32_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int64_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_int64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_float32_float32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_float32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_float64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_object_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_bool_bool(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis0_bool_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int8_int8(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int8_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int8_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int8_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int16_int16(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int16_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int16_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int16_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int32_int32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int32_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int64_int64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_int64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_float32_float32(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_float32_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_float64_float64(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_object_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_bool_bool(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_axis1_bool_object(
 | 
			
		||||
    values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int8_int8(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int8_int32(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int8_int64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int8_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int16_int16(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int16_int32(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int16_int64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int16_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int32_int32(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int32_int64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int32_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int64_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_float32_float32(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_float32_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_float64_float64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_object_object(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_bool_bool(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_bool_object(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def take_2d_multi_int64_int64(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
 | 
			
		||||
    out: np.ndarray,
 | 
			
		||||
    fill_value=...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
							
								
								
									
										1575
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1575
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/algos.pyx
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -0,0 +1,73 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for each `dtype` helper function using 1-d template
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# ensure_dtype
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_platform_int(object arr):
 | 
			
		||||
    # GH3033, GH1392
 | 
			
		||||
    # platform int is the size of the int pointer, e.g. np.intp
 | 
			
		||||
    if util.is_array(arr):
 | 
			
		||||
        if (<ndarray>arr).descr.type_num == cnp.NPY_INTP:
 | 
			
		||||
            return arr
 | 
			
		||||
        else:
 | 
			
		||||
            # equiv: arr.astype(np.intp)
 | 
			
		||||
            return cnp.PyArray_Cast(<ndarray>arr, cnp.NPY_INTP)
 | 
			
		||||
    else:
 | 
			
		||||
        return np.array(arr, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_object(object arr):
 | 
			
		||||
    if util.is_array(arr):
 | 
			
		||||
        if (<ndarray>arr).descr.type_num == NPY_OBJECT:
 | 
			
		||||
            return arr
 | 
			
		||||
        else:
 | 
			
		||||
            # equiv: arr.astype(object)
 | 
			
		||||
            return cnp.PyArray_Cast(<ndarray>arr, NPY_OBJECT)
 | 
			
		||||
    else:
 | 
			
		||||
        return np.array(arr, dtype=np.object_)
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# name, c_type, dtype
 | 
			
		||||
dtypes = [('float64', 'FLOAT64', 'float64'),
 | 
			
		||||
          # ('float32', 'FLOAT32', 'float32'),  # disabling bc unused
 | 
			
		||||
          ('int8', 'INT8', 'int8'),
 | 
			
		||||
          ('int16', 'INT16', 'int16'),
 | 
			
		||||
          ('int32', 'INT32', 'int32'),
 | 
			
		||||
          ('int64', 'INT64', 'int64'),
 | 
			
		||||
          ('uint64', 'UINT64', 'uint64'),
 | 
			
		||||
          # Disabling uint and complex dtypes because we do not use them
 | 
			
		||||
          #  (and compiling them increases wheel size) (except uint64)
 | 
			
		||||
          # ('uint8', 'UINT8', 'uint8'),
 | 
			
		||||
          # ('uint16', 'UINT16', 'uint16'),
 | 
			
		||||
          # ('uint32', 'UINT32', 'uint32'),
 | 
			
		||||
          # ('complex64', 'COMPLEX64', 'complex64'),
 | 
			
		||||
          # ('complex128', 'COMPLEX128', 'complex128')
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
def get_dispatch(dtypes):
 | 
			
		||||
 | 
			
		||||
    for name, c_type, dtype in dtypes:
 | 
			
		||||
        yield name, c_type, dtype
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
{{for name, c_type, dtype in get_dispatch(dtypes)}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_{{name}}(object arr):
 | 
			
		||||
    if util.is_array(arr):
 | 
			
		||||
        if (<ndarray>arr).descr.type_num == NPY_{{c_type}}:
 | 
			
		||||
            return arr
 | 
			
		||||
        else:
 | 
			
		||||
            # equiv: arr.astype(np.{{dtype}})
 | 
			
		||||
            return cnp.PyArray_Cast(<ndarray>arr, cnp.NPY_{{c_type}})
 | 
			
		||||
    else:
 | 
			
		||||
        return np.asarray(arr, dtype=np.{{dtype}})
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
@@ -0,0 +1,232 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for each `dtype` helper function for take
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# take_1d, take_2d
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# c_type_in, c_type_out
 | 
			
		||||
dtypes = [
 | 
			
		||||
    ('uint8_t', 'uint8_t'),
 | 
			
		||||
    ('uint8_t', 'object'),
 | 
			
		||||
    ('int8_t', 'int8_t'),
 | 
			
		||||
    ('int8_t', 'int32_t'),
 | 
			
		||||
    ('int8_t', 'int64_t'),
 | 
			
		||||
    ('int8_t', 'float64_t'),
 | 
			
		||||
    ('int16_t', 'int16_t'),
 | 
			
		||||
    ('int16_t', 'int32_t'),
 | 
			
		||||
    ('int16_t', 'int64_t'),
 | 
			
		||||
    ('int16_t', 'float64_t'),
 | 
			
		||||
    ('int32_t', 'int32_t'),
 | 
			
		||||
    ('int32_t', 'int64_t'),
 | 
			
		||||
    ('int32_t', 'float64_t'),
 | 
			
		||||
    ('int64_t', 'int64_t'),
 | 
			
		||||
    ('int64_t', 'float64_t'),
 | 
			
		||||
    ('float32_t', 'float32_t'),
 | 
			
		||||
    ('float32_t', 'float64_t'),
 | 
			
		||||
    ('float64_t', 'float64_t'),
 | 
			
		||||
    ('object', 'object'),
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_dispatch(dtypes):
 | 
			
		||||
 | 
			
		||||
    for (c_type_in, c_type_out) in dtypes:
 | 
			
		||||
 | 
			
		||||
        def get_name(dtype_name):
 | 
			
		||||
            if dtype_name == "object":
 | 
			
		||||
                return "object"
 | 
			
		||||
            if dtype_name == "uint8_t":
 | 
			
		||||
                return "bool"
 | 
			
		||||
            return dtype_name[:-2]
 | 
			
		||||
 | 
			
		||||
        name = get_name(c_type_in)
 | 
			
		||||
        dest = get_name(c_type_out)
 | 
			
		||||
 | 
			
		||||
        args = dict(name=name, dest=dest, c_type_in=c_type_in,
 | 
			
		||||
                    c_type_out=c_type_out)
 | 
			
		||||
 | 
			
		||||
        yield (name, dest, c_type_in, c_type_out)
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
{{for name, dest, c_type_in, c_type_out in get_dispatch(dtypes)}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if c_type_in != "object"}}
 | 
			
		||||
def take_1d_{{name}}_{{dest}}(const {{c_type_in}}[:] values,
 | 
			
		||||
{{else}}
 | 
			
		||||
def take_1d_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=1] values,
 | 
			
		||||
{{endif}}
 | 
			
		||||
                              const intp_t[:] indexer,
 | 
			
		||||
                              {{c_type_out}}[:] out,
 | 
			
		||||
                              fill_value=np.nan):
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n, idx
 | 
			
		||||
        {{c_type_out}} fv
 | 
			
		||||
 | 
			
		||||
    n = indexer.shape[0]
 | 
			
		||||
 | 
			
		||||
    fv = fill_value
 | 
			
		||||
 | 
			
		||||
    {{if c_type_out != "object"}}
 | 
			
		||||
    with nogil:
 | 
			
		||||
    {{else}}
 | 
			
		||||
    if True:
 | 
			
		||||
    {{endif}}
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            idx = indexer[i]
 | 
			
		||||
            if idx == -1:
 | 
			
		||||
                out[i] = fv
 | 
			
		||||
            else:
 | 
			
		||||
                {{if c_type_in == "uint8_t" and c_type_out == "object"}}
 | 
			
		||||
                out[i] = True if values[idx] > 0 else False
 | 
			
		||||
                {{else}}
 | 
			
		||||
                out[i] = values[idx]
 | 
			
		||||
                {{endif}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if c_type_in != "object"}}
 | 
			
		||||
def take_2d_axis0_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values,
 | 
			
		||||
{{else}}
 | 
			
		||||
def take_2d_axis0_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
 | 
			
		||||
{{endif}}
 | 
			
		||||
                                    ndarray[intp_t, ndim=1] indexer,
 | 
			
		||||
                                    {{c_type_out}}[:, :] out,
 | 
			
		||||
                                    fill_value=np.nan):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, n, idx
 | 
			
		||||
        {{c_type_out}} fv
 | 
			
		||||
        {{if c_type_in == c_type_out != "object"}}
 | 
			
		||||
        const {{c_type_out}} *v
 | 
			
		||||
        {{c_type_out}} *o
 | 
			
		||||
        {{endif}}
 | 
			
		||||
 | 
			
		||||
    n = len(indexer)
 | 
			
		||||
    k = values.shape[1]
 | 
			
		||||
 | 
			
		||||
    fv = fill_value
 | 
			
		||||
 | 
			
		||||
    {{if c_type_in == c_type_out != "object"}}
 | 
			
		||||
    # GH#3130
 | 
			
		||||
    with nogil:
 | 
			
		||||
        if (values.strides[1] == out.strides[1] and
 | 
			
		||||
            values.strides[1] == sizeof({{c_type_out}}) and
 | 
			
		||||
            sizeof({{c_type_out}}) * n >= 256):
 | 
			
		||||
 | 
			
		||||
            for i in range(n):
 | 
			
		||||
                idx = indexer[i]
 | 
			
		||||
                if idx == -1:
 | 
			
		||||
                    for j in range(k):
 | 
			
		||||
                        out[i, j] = fv
 | 
			
		||||
                else:
 | 
			
		||||
                    v = &values[idx, 0]
 | 
			
		||||
                    o = &out[i, 0]
 | 
			
		||||
                    memmove(o, v, <size_t>(sizeof({{c_type_out}}) * k))
 | 
			
		||||
        else:
 | 
			
		||||
            for i in range(n):
 | 
			
		||||
                idx = indexer[i]
 | 
			
		||||
                if idx == -1:
 | 
			
		||||
                    for j in range(k):
 | 
			
		||||
                        out[i, j] = fv
 | 
			
		||||
                else:
 | 
			
		||||
                    for j in range(k):
 | 
			
		||||
                        out[i, j] = values[idx, j]
 | 
			
		||||
    {{else}}
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        idx = indexer[i]
 | 
			
		||||
        if idx == -1:
 | 
			
		||||
            for j in range(k):
 | 
			
		||||
                out[i, j] = fv
 | 
			
		||||
        else:
 | 
			
		||||
            for j in range(k):
 | 
			
		||||
                {{if c_type_in == "uint8_t" and c_type_out == "object"}}
 | 
			
		||||
                out[i, j] = True if values[idx, j] > 0 else False
 | 
			
		||||
                {{else}}
 | 
			
		||||
                out[i, j] = values[idx, j]
 | 
			
		||||
                {{endif}}
 | 
			
		||||
    {{endif}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if c_type_in != "object"}}
 | 
			
		||||
def take_2d_axis1_{{name}}_{{dest}}(const {{c_type_in}}[:, :] values,
 | 
			
		||||
{{else}}
 | 
			
		||||
def take_2d_axis1_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
 | 
			
		||||
{{endif}}
 | 
			
		||||
                                    ndarray[intp_t, ndim=1] indexer,
 | 
			
		||||
                                    {{c_type_out}}[:, :] out,
 | 
			
		||||
                                    fill_value=np.nan):
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, n, idx
 | 
			
		||||
        {{c_type_out}} fv
 | 
			
		||||
 | 
			
		||||
    n = len(values)
 | 
			
		||||
    k = len(indexer)
 | 
			
		||||
 | 
			
		||||
    if n == 0 or k == 0:
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    fv = fill_value
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        for j in range(k):
 | 
			
		||||
            idx = indexer[j]
 | 
			
		||||
            if idx == -1:
 | 
			
		||||
                out[i, j] = fv
 | 
			
		||||
            else:
 | 
			
		||||
                {{if c_type_in == "uint8_t" and c_type_out == "object"}}
 | 
			
		||||
                out[i, j] = True if values[i, idx] > 0 else False
 | 
			
		||||
                {{else}}
 | 
			
		||||
                out[i, j] = values[i, idx]
 | 
			
		||||
                {{endif}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def take_2d_multi_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
 | 
			
		||||
                                    indexer,
 | 
			
		||||
                                    ndarray[{{c_type_out}}, ndim=2] out,
 | 
			
		||||
                                    fill_value=np.nan):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, n, idx
 | 
			
		||||
        ndarray[intp_t, ndim=1] idx0 = indexer[0]
 | 
			
		||||
        ndarray[intp_t, ndim=1] idx1 = indexer[1]
 | 
			
		||||
        {{c_type_out}} fv
 | 
			
		||||
 | 
			
		||||
    n = len(idx0)
 | 
			
		||||
    k = len(idx1)
 | 
			
		||||
 | 
			
		||||
    fv = fill_value
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        idx = idx0[i]
 | 
			
		||||
        if idx == -1:
 | 
			
		||||
            for j in range(k):
 | 
			
		||||
                out[i, j] = fv
 | 
			
		||||
        else:
 | 
			
		||||
            for j in range(k):
 | 
			
		||||
                if idx1[j] == -1:
 | 
			
		||||
                    out[i, j] = fv
 | 
			
		||||
                else:
 | 
			
		||||
                    {{if c_type_in == "uint8_t" and c_type_out == "object"}}
 | 
			
		||||
                    out[i, j] = True if values[idx, idx1[j]] > 0 else False
 | 
			
		||||
                    {{else}}
 | 
			
		||||
                    out[i, j] = values[idx, idx1[j]]
 | 
			
		||||
                    {{endif}}
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										11
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
 | 
			
		||||
from numpy cimport ndarray
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class NDArrayBacked:
 | 
			
		||||
    cdef:
 | 
			
		||||
        readonly ndarray _ndarray
 | 
			
		||||
        readonly object _dtype
 | 
			
		||||
 | 
			
		||||
    cpdef NDArrayBacked _from_backing_data(self, ndarray values)
 | 
			
		||||
    cpdef __setstate__(self, state)
 | 
			
		||||
							
								
								
									
										40
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
from typing import Sequence
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    AxisInt,
 | 
			
		||||
    DtypeObj,
 | 
			
		||||
    Self,
 | 
			
		||||
    Shape,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
class NDArrayBacked:
 | 
			
		||||
    _dtype: DtypeObj
 | 
			
		||||
    _ndarray: np.ndarray
 | 
			
		||||
    def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ...
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ...
 | 
			
		||||
    def _from_backing_data(self, values: np.ndarray): ...
 | 
			
		||||
    def __setstate__(self, state): ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def shape(self) -> Shape: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def ndim(self) -> int: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def size(self) -> int: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def nbytes(self) -> int: ...
 | 
			
		||||
    def copy(self): ...
 | 
			
		||||
    def delete(self, loc, axis=...): ...
 | 
			
		||||
    def swapaxes(self, axis1, axis2): ...
 | 
			
		||||
    def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ...
 | 
			
		||||
    def reshape(self, *args, **kwargs): ...
 | 
			
		||||
    def ravel(self, order=...): ...
 | 
			
		||||
    @property
 | 
			
		||||
    def T(self): ...
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _concat_same_type(
 | 
			
		||||
        cls, to_concat: Sequence[Self], axis: AxisInt = ...
 | 
			
		||||
    ) -> Self: ...
 | 
			
		||||
							
								
								
									
										191
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										191
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/arrays.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,191 @@
 | 
			
		||||
"""
 | 
			
		||||
Cython implementations for internal ExtensionArrays.
 | 
			
		||||
"""
 | 
			
		||||
cimport cython
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from cpython cimport PyErr_Clear
 | 
			
		||||
from numpy cimport ndarray
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.freelist(16)
 | 
			
		||||
cdef class NDArrayBacked:
 | 
			
		||||
    """
 | 
			
		||||
    Implementing these methods in cython improves performance quite a bit.
 | 
			
		||||
 | 
			
		||||
    import pandas as pd
 | 
			
		||||
 | 
			
		||||
    from pandas._libs.arrays import NDArrayBacked as cls
 | 
			
		||||
 | 
			
		||||
    dti = pd.date_range("2016-01-01", periods=3)
 | 
			
		||||
    dta = dti._data
 | 
			
		||||
    arr = dta._ndarray
 | 
			
		||||
 | 
			
		||||
    obj = cls._simple_new(arr, arr.dtype)
 | 
			
		||||
 | 
			
		||||
    # for foo in [arr, dta, obj]: ...
 | 
			
		||||
 | 
			
		||||
    %timeit foo.copy()
 | 
			
		||||
    299 ns ± 30 ns per loop     # <-- arr underlying ndarray (for reference)
 | 
			
		||||
    530 ns ± 9.24 ns per loop   # <-- dta with cython NDArrayBacked
 | 
			
		||||
    1.66 µs ± 46.3 ns per loop  # <-- dta without cython NDArrayBacked
 | 
			
		||||
    328 ns ± 5.29 ns per loop   # <-- obj with NDArrayBacked.__cinit__
 | 
			
		||||
    371 ns ± 6.97 ns per loop   # <-- obj with NDArrayBacked._simple_new
 | 
			
		||||
 | 
			
		||||
    %timeit foo.T
 | 
			
		||||
    125 ns ± 6.27 ns per loop   # <-- arr underlying ndarray (for reference)
 | 
			
		||||
    226 ns ± 7.66 ns per loop   # <-- dta with cython NDArrayBacked
 | 
			
		||||
    911 ns ± 16.6 ns per loop   # <-- dta without cython NDArrayBacked
 | 
			
		||||
    215 ns ± 4.54 ns per loop   # <-- obj with NDArrayBacked._simple_new
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    # TODO: implement take in terms of cnp.PyArray_TakeFrom
 | 
			
		||||
    # TODO: implement concat_same_type in terms of cnp.PyArray_Concatenate
 | 
			
		||||
 | 
			
		||||
    # cdef:
 | 
			
		||||
    #    readonly ndarray _ndarray
 | 
			
		||||
    #    readonly object _dtype
 | 
			
		||||
 | 
			
		||||
    def __init__(self, ndarray values, object dtype):
 | 
			
		||||
        self._ndarray = values
 | 
			
		||||
        self._dtype = dtype
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _simple_new(cls, ndarray values, object dtype):
 | 
			
		||||
        cdef:
 | 
			
		||||
            NDArrayBacked obj
 | 
			
		||||
        obj = NDArrayBacked.__new__(cls)
 | 
			
		||||
        obj._ndarray = values
 | 
			
		||||
        obj._dtype = dtype
 | 
			
		||||
        return obj
 | 
			
		||||
 | 
			
		||||
    cpdef NDArrayBacked _from_backing_data(self, ndarray values):
 | 
			
		||||
        """
 | 
			
		||||
        Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
 | 
			
		||||
 | 
			
		||||
        This should round-trip:
 | 
			
		||||
            self == self._from_backing_data(self._ndarray)
 | 
			
		||||
        """
 | 
			
		||||
        # TODO: re-reuse simple_new if/when it can be cpdef
 | 
			
		||||
        cdef:
 | 
			
		||||
            NDArrayBacked obj
 | 
			
		||||
        obj = NDArrayBacked.__new__(type(self))
 | 
			
		||||
        obj._ndarray = values
 | 
			
		||||
        obj._dtype = self._dtype
 | 
			
		||||
        return obj
 | 
			
		||||
 | 
			
		||||
    cpdef __setstate__(self, state):
 | 
			
		||||
        if isinstance(state, dict):
 | 
			
		||||
            if "_data" in state:
 | 
			
		||||
                data = state.pop("_data")
 | 
			
		||||
            elif "_ndarray" in state:
 | 
			
		||||
                data = state.pop("_ndarray")
 | 
			
		||||
            else:
 | 
			
		||||
                raise ValueError  # pragma: no cover
 | 
			
		||||
            self._ndarray = data
 | 
			
		||||
            self._dtype = state.pop("_dtype")
 | 
			
		||||
 | 
			
		||||
            for key, val in state.items():
 | 
			
		||||
                setattr(self, key, val)
 | 
			
		||||
        elif isinstance(state, tuple):
 | 
			
		||||
            if len(state) != 3:
 | 
			
		||||
                if len(state) == 1 and isinstance(state[0], dict):
 | 
			
		||||
                    self.__setstate__(state[0])
 | 
			
		||||
                    return
 | 
			
		||||
                raise NotImplementedError(state)  # pragma: no cover
 | 
			
		||||
 | 
			
		||||
            data, dtype = state[:2]
 | 
			
		||||
            if isinstance(dtype, np.ndarray):
 | 
			
		||||
                dtype, data = data, dtype
 | 
			
		||||
            self._ndarray = data
 | 
			
		||||
            self._dtype = dtype
 | 
			
		||||
 | 
			
		||||
            if isinstance(state[2], dict):
 | 
			
		||||
                for key, val in state[2].items():
 | 
			
		||||
                    setattr(self, key, val)
 | 
			
		||||
            else:
 | 
			
		||||
                raise NotImplementedError(state)  # pragma: no cover
 | 
			
		||||
        else:
 | 
			
		||||
            raise NotImplementedError(state)  # pragma: no cover
 | 
			
		||||
 | 
			
		||||
    def __len__(self) -> int:
 | 
			
		||||
        return len(self._ndarray)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def shape(self):
 | 
			
		||||
        # object cast bc _ndarray.shape is npy_intp*
 | 
			
		||||
        return (<object>(self._ndarray)).shape
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def ndim(self) -> int:
 | 
			
		||||
        return self._ndarray.ndim
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def size(self) -> int:
 | 
			
		||||
        # TODO(cython3): use self._ndarray.size
 | 
			
		||||
        return cnp.PyArray_SIZE(self._ndarray)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def nbytes(self) -> int:
 | 
			
		||||
        return cnp.PyArray_NBYTES(self._ndarray)
 | 
			
		||||
 | 
			
		||||
    def copy(self, order="C"):
 | 
			
		||||
        cdef:
 | 
			
		||||
            cnp.NPY_ORDER order_code
 | 
			
		||||
            int success
 | 
			
		||||
 | 
			
		||||
        success = cnp.PyArray_OrderConverter(order, &order_code)
 | 
			
		||||
        if not success:
 | 
			
		||||
            # clear exception so that we don't get a SystemError
 | 
			
		||||
            PyErr_Clear()
 | 
			
		||||
            # same message used by numpy
 | 
			
		||||
            msg = f"order must be one of 'C', 'F', 'A', or 'K' (got '{order}')"
 | 
			
		||||
            raise ValueError(msg)
 | 
			
		||||
 | 
			
		||||
        res_values = cnp.PyArray_NewCopy(self._ndarray, order_code)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    def delete(self, loc, axis=0):
 | 
			
		||||
        res_values = np.delete(self._ndarray, loc, axis=axis)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    def swapaxes(self, axis1, axis2):
 | 
			
		||||
        res_values = cnp.PyArray_SwapAxes(self._ndarray, axis1, axis2)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    # TODO: pass NPY_MAXDIMS equiv to axis=None?
 | 
			
		||||
    def repeat(self, repeats, axis: int | np.integer = 0):
 | 
			
		||||
        if axis is None:
 | 
			
		||||
            axis = 0
 | 
			
		||||
        res_values = cnp.PyArray_Repeat(self._ndarray, repeats, <int>axis)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    def reshape(self, *args, **kwargs):
 | 
			
		||||
        res_values = self._ndarray.reshape(*args, **kwargs)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    def ravel(self, order="C"):
 | 
			
		||||
        # cnp.PyArray_OrderConverter(PyObject* obj, NPY_ORDER* order)
 | 
			
		||||
        # res_values = cnp.PyArray_Ravel(self._ndarray, order)
 | 
			
		||||
        res_values = self._ndarray.ravel(order)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def T(self):
 | 
			
		||||
        res_values = self._ndarray.T
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    def transpose(self, *axes):
 | 
			
		||||
        res_values = self._ndarray.transpose(*axes)
 | 
			
		||||
        return self._from_backing_data(res_values)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _concat_same_type(cls, to_concat, axis=0):
 | 
			
		||||
        # NB: We are assuming at this point that dtypes all match
 | 
			
		||||
        new_values = [obj._ndarray for obj in to_concat]
 | 
			
		||||
        new_arr = cnp.PyArray_Concatenate(new_values, axis)
 | 
			
		||||
        return to_concat[0]._from_backing_data(new_arr)
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
 | 
			
		||||
def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
 | 
			
		||||
def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
 | 
			
		||||
def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
 | 
			
		||||
def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
 | 
			
		||||
@@ -0,0 +1,85 @@
 | 
			
		||||
"""
 | 
			
		||||
The following are faster versions of struct.unpack that avoid the overhead of Python
 | 
			
		||||
function calls.
 | 
			
		||||
 | 
			
		||||
In the SAS7BDAT parser, they may be called up to (n_rows * n_cols) times.
 | 
			
		||||
"""
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
from libc.stdint cimport (
 | 
			
		||||
    uint16_t,
 | 
			
		||||
    uint32_t,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
from libc.string cimport memcpy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_float_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
 | 
			
		||||
    cdef uint32_t value
 | 
			
		||||
    assert offset + sizeof(value) < len(data)
 | 
			
		||||
    cdef const void *ptr = <unsigned char*>(data) + offset
 | 
			
		||||
    memcpy(&value, ptr, sizeof(value))
 | 
			
		||||
    if byteswap:
 | 
			
		||||
        value = _byteswap4(value)
 | 
			
		||||
 | 
			
		||||
    cdef float res
 | 
			
		||||
    memcpy(&res, &value, sizeof(res))
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_double_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
 | 
			
		||||
    cdef uint64_t value
 | 
			
		||||
    assert offset + sizeof(value) < len(data)
 | 
			
		||||
    cdef const void *ptr = <unsigned char*>(data) + offset
 | 
			
		||||
    memcpy(&value, ptr, sizeof(value))
 | 
			
		||||
    if byteswap:
 | 
			
		||||
        value = _byteswap8(value)
 | 
			
		||||
 | 
			
		||||
    cdef double res
 | 
			
		||||
    memcpy(&res, &value, sizeof(res))
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_uint16_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
 | 
			
		||||
    cdef uint16_t res
 | 
			
		||||
    assert offset + sizeof(res) < len(data)
 | 
			
		||||
    memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
 | 
			
		||||
    if byteswap:
 | 
			
		||||
        res = _byteswap2(res)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_uint32_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
 | 
			
		||||
    cdef uint32_t res
 | 
			
		||||
    assert offset + sizeof(res) < len(data)
 | 
			
		||||
    memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
 | 
			
		||||
    if byteswap:
 | 
			
		||||
        res = _byteswap4(res)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_uint64_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
 | 
			
		||||
    cdef uint64_t res
 | 
			
		||||
    assert offset + sizeof(res) < len(data)
 | 
			
		||||
    memcpy(&res, <const unsigned char*>(data) + offset, sizeof(res))
 | 
			
		||||
    if byteswap:
 | 
			
		||||
        res = _byteswap8(res)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Byteswapping
 | 
			
		||||
 | 
			
		||||
cdef extern from *:
 | 
			
		||||
    """
 | 
			
		||||
    #ifdef _MSC_VER
 | 
			
		||||
        #define _byteswap2 _byteswap_ushort
 | 
			
		||||
        #define _byteswap4 _byteswap_ulong
 | 
			
		||||
        #define _byteswap8 _byteswap_uint64
 | 
			
		||||
    #else
 | 
			
		||||
        #define _byteswap2 __builtin_bswap16
 | 
			
		||||
        #define _byteswap4 __builtin_bswap32
 | 
			
		||||
        #define _byteswap8 __builtin_bswap64
 | 
			
		||||
    #endif
 | 
			
		||||
    """
 | 
			
		||||
    uint16_t _byteswap2(uint16_t)
 | 
			
		||||
    uint32_t _byteswap4(uint32_t)
 | 
			
		||||
    uint64_t _byteswap8(uint64_t)
 | 
			
		||||
							
								
								
									
										36
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/dtypes.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/dtypes.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
			
		||||
"""
 | 
			
		||||
Common location for shared fused types
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    float32_t,
 | 
			
		||||
    float64_t,
 | 
			
		||||
    int8_t,
 | 
			
		||||
    int16_t,
 | 
			
		||||
    int32_t,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
    uint16_t,
 | 
			
		||||
    uint32_t,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# All numeric types except complex
 | 
			
		||||
ctypedef fused numeric_t:
 | 
			
		||||
    int8_t
 | 
			
		||||
    int16_t
 | 
			
		||||
    int32_t
 | 
			
		||||
    int64_t
 | 
			
		||||
 | 
			
		||||
    uint8_t
 | 
			
		||||
    uint16_t
 | 
			
		||||
    uint32_t
 | 
			
		||||
    uint64_t
 | 
			
		||||
 | 
			
		||||
    float32_t
 | 
			
		||||
    float64_t
 | 
			
		||||
 | 
			
		||||
# All numeric types + object, doesn't include complex
 | 
			
		||||
ctypedef fused numeric_object_t:
 | 
			
		||||
    numeric_t
 | 
			
		||||
    object
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										203
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/groupby.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/groupby.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,203 @@
 | 
			
		||||
from typing import Literal
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
def group_median_float64(
 | 
			
		||||
    out: np.ndarray,  # ndarray[float64_t, ndim=2]
 | 
			
		||||
    counts: npt.NDArray[np.int64],
 | 
			
		||||
    values: np.ndarray,  # ndarray[float64_t, ndim=2]
 | 
			
		||||
    labels: npt.NDArray[np.int64],
 | 
			
		||||
    min_count: int = ...,  # Py_ssize_t
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_cumprod(
 | 
			
		||||
    out: np.ndarray,  # float64_t[:, ::1]
 | 
			
		||||
    values: np.ndarray,  # const float64_t[:, :]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_cumsum(
 | 
			
		||||
    out: np.ndarray,  # int64float_t[:, ::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[int64float_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_shift_indexer(
 | 
			
		||||
    out: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    periods: int,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_fillna_indexer(
 | 
			
		||||
    out: np.ndarray,  # ndarray[intp_t]
 | 
			
		||||
    labels: np.ndarray,  # ndarray[int64_t]
 | 
			
		||||
    sorted_labels: npt.NDArray[np.intp],
 | 
			
		||||
    mask: npt.NDArray[np.uint8],
 | 
			
		||||
    direction: Literal["ffill", "bfill"],
 | 
			
		||||
    limit: int,  # int64_t
 | 
			
		||||
    dropna: bool,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_any_all(
 | 
			
		||||
    out: np.ndarray,  # uint8_t[::1]
 | 
			
		||||
    values: np.ndarray,  # const uint8_t[::1]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    mask: np.ndarray,  # const uint8_t[::1]
 | 
			
		||||
    val_test: Literal["any", "all"],
 | 
			
		||||
    skipna: bool,
 | 
			
		||||
    nullable: bool,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_sum(
 | 
			
		||||
    out: np.ndarray,  # complexfloatingintuint_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[complexfloatingintuint_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    mask: np.ndarray | None,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    min_count: int = ...,
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_prod(
 | 
			
		||||
    out: np.ndarray,  # int64float_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[int64float_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    mask: np.ndarray | None,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    min_count: int = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_var(
 | 
			
		||||
    out: np.ndarray,  # floating[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[floating, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    min_count: int = ...,  # Py_ssize_t
 | 
			
		||||
    ddof: int = ...,  # int64_t
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
    name: str = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_skew(
 | 
			
		||||
    out: np.ndarray,  # float64_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[float64_T, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[::1]
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_mean(
 | 
			
		||||
    out: np.ndarray,  # floating[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[floating, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    min_count: int = ...,  # Py_ssize_t
 | 
			
		||||
    is_datetimelike: bool = ...,  # bint
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_ohlc(
 | 
			
		||||
    out: np.ndarray,  # floatingintuint_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[floatingintuint_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    min_count: int = ...,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_quantile(
 | 
			
		||||
    out: npt.NDArray[np.float64],
 | 
			
		||||
    values: np.ndarray,  # ndarray[numeric, ndim=1]
 | 
			
		||||
    labels: npt.NDArray[np.intp],
 | 
			
		||||
    mask: npt.NDArray[np.uint8],
 | 
			
		||||
    qs: npt.NDArray[np.float64],  # const
 | 
			
		||||
    starts: npt.NDArray[np.int64],
 | 
			
		||||
    ends: npt.NDArray[np.int64],
 | 
			
		||||
    interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
 | 
			
		||||
    result_mask: np.ndarray | None,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_last(
 | 
			
		||||
    out: np.ndarray,  # rank_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[rank_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None,
 | 
			
		||||
    result_mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
    min_count: int = ...,  # Py_ssize_t
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_nth(
 | 
			
		||||
    out: np.ndarray,  # rank_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[rank_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None,
 | 
			
		||||
    result_mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
    min_count: int = ...,  # int64_t
 | 
			
		||||
    rank: int = ...,  # int64_t
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_rank(
 | 
			
		||||
    out: np.ndarray,  # float64_t[:, ::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[rank_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
    ties_method: Literal["average", "min", "max", "first", "dense"] = ...,
 | 
			
		||||
    ascending: bool = ...,
 | 
			
		||||
    pct: bool = ...,
 | 
			
		||||
    na_option: Literal["keep", "top", "bottom"] = ...,
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_max(
 | 
			
		||||
    out: np.ndarray,  # groupby_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[groupby_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    min_count: int = ...,
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_min(
 | 
			
		||||
    out: np.ndarray,  # groupby_t[:, ::1]
 | 
			
		||||
    counts: np.ndarray,  # int64_t[::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[groupby_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    min_count: int = ...,
 | 
			
		||||
    is_datetimelike: bool = ...,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_cummin(
 | 
			
		||||
    out: np.ndarray,  # groupby_t[:, ::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[groupby_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
def group_cummax(
 | 
			
		||||
    out: np.ndarray,  # groupby_t[:, ::1]
 | 
			
		||||
    values: np.ndarray,  # ndarray[groupby_t, ndim=2]
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    ngroups: int,
 | 
			
		||||
    is_datetimelike: bool,
 | 
			
		||||
    mask: np.ndarray | None = ...,
 | 
			
		||||
    result_mask: np.ndarray | None = ...,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
) -> None: ...
 | 
			
		||||
							
								
								
									
										2011
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/groupby.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2011
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/groupby.pyx
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
										
											Binary file not shown.
										
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
def hash_object_array(
 | 
			
		||||
    arr: npt.NDArray[np.object_],
 | 
			
		||||
    key: str,
 | 
			
		||||
    encoding: str = ...,
 | 
			
		||||
) -> npt.NDArray[np.uint64]: ...
 | 
			
		||||
							
								
								
									
										194
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashing.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										194
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashing.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,194 @@
 | 
			
		||||
# Translated from the reference implementation
 | 
			
		||||
# at https://github.com/veorq/SipHash
 | 
			
		||||
 | 
			
		||||
cimport cython
 | 
			
		||||
from libc.stdlib cimport (
 | 
			
		||||
    free,
 | 
			
		||||
    malloc,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    import_array,
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import_array()
 | 
			
		||||
 | 
			
		||||
from pandas._libs.util cimport is_nan
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def hash_object_array(
 | 
			
		||||
    ndarray[object] arr, str key, str encoding="utf8"
 | 
			
		||||
) -> np.ndarray[np.uint64]:
 | 
			
		||||
    """
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    arr : 1-d object ndarray of objects
 | 
			
		||||
    key : hash key, must be 16 byte len encoded
 | 
			
		||||
    encoding : encoding for key & arr, default to 'utf8'
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    1-d uint64 ndarray of hashes.
 | 
			
		||||
 | 
			
		||||
    Raises
 | 
			
		||||
    ------
 | 
			
		||||
    TypeError
 | 
			
		||||
        If the array contains mixed types.
 | 
			
		||||
 | 
			
		||||
    Notes
 | 
			
		||||
    -----
 | 
			
		||||
    Allowed values must be strings, or nulls
 | 
			
		||||
    mixed array types will raise TypeError.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n
 | 
			
		||||
        uint64_t[::1] result
 | 
			
		||||
        bytes data, k
 | 
			
		||||
        uint8_t *kb
 | 
			
		||||
        uint64_t *lens
 | 
			
		||||
        char **vecs
 | 
			
		||||
        char *cdata
 | 
			
		||||
        object val
 | 
			
		||||
        list data_list = []
 | 
			
		||||
 | 
			
		||||
    k = <bytes>key.encode(encoding)
 | 
			
		||||
    kb = <uint8_t *>k
 | 
			
		||||
    if len(k) != 16:
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            f"key should be a 16-byte string encoded, got {k} (len {len(k)})"
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    n = len(arr)
 | 
			
		||||
 | 
			
		||||
    # create an array of bytes
 | 
			
		||||
    vecs = <char **>malloc(n * sizeof(char *))
 | 
			
		||||
    lens = <uint64_t*>malloc(n * sizeof(uint64_t))
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        val = arr[i]
 | 
			
		||||
        if isinstance(val, bytes):
 | 
			
		||||
            data = <bytes>val
 | 
			
		||||
        elif isinstance(val, str):
 | 
			
		||||
            data = <bytes>val.encode(encoding)
 | 
			
		||||
        elif val is None or is_nan(val):
 | 
			
		||||
            # null, stringify and encode
 | 
			
		||||
            data = <bytes>str(val).encode(encoding)
 | 
			
		||||
 | 
			
		||||
        elif isinstance(val, tuple):
 | 
			
		||||
            # GH#28969 we could have a tuple, but need to ensure that
 | 
			
		||||
            #  the tuple entries are themselves hashable before converting
 | 
			
		||||
            #  to str
 | 
			
		||||
            hash(val)
 | 
			
		||||
            data = <bytes>str(val).encode(encoding)
 | 
			
		||||
        else:
 | 
			
		||||
            raise TypeError(
 | 
			
		||||
                f"{val} of type {type(val)} is not a valid type for hashing, "
 | 
			
		||||
                "must be string or null"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        lens[i] = len(data)
 | 
			
		||||
        cdata = data
 | 
			
		||||
 | 
			
		||||
        # keep the references alive through the end of the
 | 
			
		||||
        # function
 | 
			
		||||
        data_list.append(data)
 | 
			
		||||
        vecs[i] = cdata
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=np.uint64)
 | 
			
		||||
    with nogil:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            result[i] = low_level_siphash(<uint8_t *>vecs[i], lens[i], kb)
 | 
			
		||||
 | 
			
		||||
    free(vecs)
 | 
			
		||||
    free(lens)
 | 
			
		||||
    return result.base  # .base to retrieve underlying np.ndarray
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef uint64_t _rotl(uint64_t x, uint64_t b) noexcept nogil:
 | 
			
		||||
    return (x << b) | (x >> (64 - b))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef uint64_t u8to64_le(uint8_t* p) noexcept nogil:
 | 
			
		||||
    return (<uint64_t>p[0] |
 | 
			
		||||
            <uint64_t>p[1] << 8 |
 | 
			
		||||
            <uint64_t>p[2] << 16 |
 | 
			
		||||
            <uint64_t>p[3] << 24 |
 | 
			
		||||
            <uint64_t>p[4] << 32 |
 | 
			
		||||
            <uint64_t>p[5] << 40 |
 | 
			
		||||
            <uint64_t>p[6] << 48 |
 | 
			
		||||
            <uint64_t>p[7] << 56)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef void _sipround(uint64_t* v0, uint64_t* v1,
 | 
			
		||||
                    uint64_t* v2, uint64_t* v3) noexcept nogil:
 | 
			
		||||
    v0[0] += v1[0]
 | 
			
		||||
    v1[0] = _rotl(v1[0], 13)
 | 
			
		||||
    v1[0] ^= v0[0]
 | 
			
		||||
    v0[0] = _rotl(v0[0], 32)
 | 
			
		||||
    v2[0] += v3[0]
 | 
			
		||||
    v3[0] = _rotl(v3[0], 16)
 | 
			
		||||
    v3[0] ^= v2[0]
 | 
			
		||||
    v0[0] += v3[0]
 | 
			
		||||
    v3[0] = _rotl(v3[0], 21)
 | 
			
		||||
    v3[0] ^= v0[0]
 | 
			
		||||
    v2[0] += v1[0]
 | 
			
		||||
    v1[0] = _rotl(v1[0], 17)
 | 
			
		||||
    v1[0] ^= v2[0]
 | 
			
		||||
    v2[0] = _rotl(v2[0], 32)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.cdivision(True)
 | 
			
		||||
cdef uint64_t low_level_siphash(uint8_t* data, size_t datalen,
 | 
			
		||||
                                uint8_t* key) noexcept nogil:
 | 
			
		||||
    cdef uint64_t v0 = 0x736f6d6570736575ULL
 | 
			
		||||
    cdef uint64_t v1 = 0x646f72616e646f6dULL
 | 
			
		||||
    cdef uint64_t v2 = 0x6c7967656e657261ULL
 | 
			
		||||
    cdef uint64_t v3 = 0x7465646279746573ULL
 | 
			
		||||
    cdef uint64_t b
 | 
			
		||||
    cdef uint64_t k0 = u8to64_le(key)
 | 
			
		||||
    cdef uint64_t k1 = u8to64_le(key + 8)
 | 
			
		||||
    cdef uint64_t m
 | 
			
		||||
    cdef int i
 | 
			
		||||
    cdef uint8_t* end = data + datalen - (datalen % sizeof(uint64_t))
 | 
			
		||||
    cdef int left = datalen & 7
 | 
			
		||||
    cdef int cROUNDS = 2
 | 
			
		||||
    cdef int dROUNDS = 4
 | 
			
		||||
 | 
			
		||||
    b = (<uint64_t>datalen) << 56
 | 
			
		||||
    v3 ^= k1
 | 
			
		||||
    v2 ^= k0
 | 
			
		||||
    v1 ^= k1
 | 
			
		||||
    v0 ^= k0
 | 
			
		||||
 | 
			
		||||
    while (data != end):
 | 
			
		||||
        m = u8to64_le(data)
 | 
			
		||||
        v3 ^= m
 | 
			
		||||
        for i in range(cROUNDS):
 | 
			
		||||
            _sipround(&v0, &v1, &v2, &v3)
 | 
			
		||||
        v0 ^= m
 | 
			
		||||
 | 
			
		||||
        data += sizeof(uint64_t)
 | 
			
		||||
 | 
			
		||||
    for i in range(left-1, -1, -1):
 | 
			
		||||
        b |= (<uint64_t>data[i]) << (i * 8)
 | 
			
		||||
 | 
			
		||||
    v3 ^= b
 | 
			
		||||
 | 
			
		||||
    for i in range(cROUNDS):
 | 
			
		||||
        _sipround(&v0, &v1, &v2, &v3)
 | 
			
		||||
 | 
			
		||||
    v0 ^= b
 | 
			
		||||
    v2 ^= 0xff
 | 
			
		||||
 | 
			
		||||
    for i in range(dROUNDS):
 | 
			
		||||
        _sipround(&v0, &v1, &v2, &v3)
 | 
			
		||||
 | 
			
		||||
    b = v0 ^ v1 ^ v2 ^ v3
 | 
			
		||||
 | 
			
		||||
    return b
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										189
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										189
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,189 @@
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    intp_t,
 | 
			
		||||
    ndarray,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas._libs.khash cimport (
 | 
			
		||||
    complex64_t,
 | 
			
		||||
    complex128_t,
 | 
			
		||||
    float32_t,
 | 
			
		||||
    float64_t,
 | 
			
		||||
    int8_t,
 | 
			
		||||
    int16_t,
 | 
			
		||||
    int32_t,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    kh_complex64_t,
 | 
			
		||||
    kh_complex128_t,
 | 
			
		||||
    kh_float32_t,
 | 
			
		||||
    kh_float64_t,
 | 
			
		||||
    kh_int8_t,
 | 
			
		||||
    kh_int16_t,
 | 
			
		||||
    kh_int32_t,
 | 
			
		||||
    kh_int64_t,
 | 
			
		||||
    kh_pymap_t,
 | 
			
		||||
    kh_str_t,
 | 
			
		||||
    kh_uint8_t,
 | 
			
		||||
    kh_uint16_t,
 | 
			
		||||
    kh_uint32_t,
 | 
			
		||||
    kh_uint64_t,
 | 
			
		||||
    khcomplex64_t,
 | 
			
		||||
    khcomplex128_t,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
    uint16_t,
 | 
			
		||||
    uint32_t,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# prototypes for sharing
 | 
			
		||||
 | 
			
		||||
cdef class HashTable:
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
cdef class UInt64HashTable(HashTable):
 | 
			
		||||
    cdef kh_uint64_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, uint64_t val)
 | 
			
		||||
    cpdef set_item(self, uint64_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Int64HashTable(HashTable):
 | 
			
		||||
    cdef kh_int64_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, int64_t val)
 | 
			
		||||
    cpdef set_item(self, int64_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class UInt32HashTable(HashTable):
 | 
			
		||||
    cdef kh_uint32_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, uint32_t val)
 | 
			
		||||
    cpdef set_item(self, uint32_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Int32HashTable(HashTable):
 | 
			
		||||
    cdef kh_int32_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, int32_t val)
 | 
			
		||||
    cpdef set_item(self, int32_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class UInt16HashTable(HashTable):
 | 
			
		||||
    cdef kh_uint16_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, uint16_t val)
 | 
			
		||||
    cpdef set_item(self, uint16_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Int16HashTable(HashTable):
 | 
			
		||||
    cdef kh_int16_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, int16_t val)
 | 
			
		||||
    cpdef set_item(self, int16_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class UInt8HashTable(HashTable):
 | 
			
		||||
    cdef kh_uint8_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, uint8_t val)
 | 
			
		||||
    cpdef set_item(self, uint8_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Int8HashTable(HashTable):
 | 
			
		||||
    cdef kh_int8_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, int8_t val)
 | 
			
		||||
    cpdef set_item(self, int8_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Float64HashTable(HashTable):
 | 
			
		||||
    cdef kh_float64_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, float64_t val)
 | 
			
		||||
    cpdef set_item(self, float64_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Float32HashTable(HashTable):
 | 
			
		||||
    cdef kh_float32_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, float32_t val)
 | 
			
		||||
    cpdef set_item(self, float32_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Complex64HashTable(HashTable):
 | 
			
		||||
    cdef kh_complex64_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, complex64_t val)
 | 
			
		||||
    cpdef set_item(self, complex64_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class Complex128HashTable(HashTable):
 | 
			
		||||
    cdef kh_complex128_t *table
 | 
			
		||||
    cdef int64_t na_position
 | 
			
		||||
    cdef bint uses_mask
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, complex128_t val)
 | 
			
		||||
    cpdef set_item(self, complex128_t key, Py_ssize_t val)
 | 
			
		||||
    cpdef get_na(self)
 | 
			
		||||
    cpdef set_na(self, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef class PyObjectHashTable(HashTable):
 | 
			
		||||
    cdef kh_pymap_t *table
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, object val)
 | 
			
		||||
    cpdef set_item(self, object key, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class StringHashTable(HashTable):
 | 
			
		||||
    cdef kh_str_t *table
 | 
			
		||||
 | 
			
		||||
    cpdef get_item(self, str val)
 | 
			
		||||
    cpdef set_item(self, str key, Py_ssize_t val)
 | 
			
		||||
 | 
			
		||||
cdef struct Int64VectorData:
 | 
			
		||||
    int64_t *data
 | 
			
		||||
    Py_ssize_t n, m
 | 
			
		||||
 | 
			
		||||
cdef class Vector:
 | 
			
		||||
    cdef bint external_view_exists
 | 
			
		||||
 | 
			
		||||
cdef class Int64Vector(Vector):
 | 
			
		||||
    cdef Int64VectorData *data
 | 
			
		||||
    cdef ndarray ao
 | 
			
		||||
 | 
			
		||||
    cdef resize(self)
 | 
			
		||||
    cpdef ndarray to_array(self)
 | 
			
		||||
    cdef void append(self, int64_t x) noexcept
 | 
			
		||||
    cdef extend(self, int64_t[:] x)
 | 
			
		||||
							
								
								
									
										251
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										251
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,251 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Any,
 | 
			
		||||
    Hashable,
 | 
			
		||||
    Literal,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
def unique_label_indices(
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
 | 
			
		||||
class Factorizer:
 | 
			
		||||
    count: int
 | 
			
		||||
    uniques: Any
 | 
			
		||||
    def __init__(self, size_hint: int) -> None: ...
 | 
			
		||||
    def get_count(self) -> int: ...
 | 
			
		||||
    def factorize(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,
 | 
			
		||||
        sort: bool = ...,
 | 
			
		||||
        na_sentinel=...,
 | 
			
		||||
        na_value=...,
 | 
			
		||||
        mask=...,
 | 
			
		||||
    ) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
 | 
			
		||||
class ObjectFactorizer(Factorizer):
 | 
			
		||||
    table: PyObjectHashTable
 | 
			
		||||
    uniques: ObjectVector
 | 
			
		||||
 | 
			
		||||
class Int64Factorizer(Factorizer):
 | 
			
		||||
    table: Int64HashTable
 | 
			
		||||
    uniques: Int64Vector
 | 
			
		||||
 | 
			
		||||
class UInt64Factorizer(Factorizer):
 | 
			
		||||
    table: UInt64HashTable
 | 
			
		||||
    uniques: UInt64Vector
 | 
			
		||||
 | 
			
		||||
class Int32Factorizer(Factorizer):
 | 
			
		||||
    table: Int32HashTable
 | 
			
		||||
    uniques: Int32Vector
 | 
			
		||||
 | 
			
		||||
class UInt32Factorizer(Factorizer):
 | 
			
		||||
    table: UInt32HashTable
 | 
			
		||||
    uniques: UInt32Vector
 | 
			
		||||
 | 
			
		||||
class Int16Factorizer(Factorizer):
 | 
			
		||||
    table: Int16HashTable
 | 
			
		||||
    uniques: Int16Vector
 | 
			
		||||
 | 
			
		||||
class UInt16Factorizer(Factorizer):
 | 
			
		||||
    table: UInt16HashTable
 | 
			
		||||
    uniques: UInt16Vector
 | 
			
		||||
 | 
			
		||||
class Int8Factorizer(Factorizer):
 | 
			
		||||
    table: Int8HashTable
 | 
			
		||||
    uniques: Int8Vector
 | 
			
		||||
 | 
			
		||||
class UInt8Factorizer(Factorizer):
 | 
			
		||||
    table: UInt8HashTable
 | 
			
		||||
    uniques: UInt8Vector
 | 
			
		||||
 | 
			
		||||
class Float64Factorizer(Factorizer):
 | 
			
		||||
    table: Float64HashTable
 | 
			
		||||
    uniques: Float64Vector
 | 
			
		||||
 | 
			
		||||
class Float32Factorizer(Factorizer):
 | 
			
		||||
    table: Float32HashTable
 | 
			
		||||
    uniques: Float32Vector
 | 
			
		||||
 | 
			
		||||
class Complex64Factorizer(Factorizer):
 | 
			
		||||
    table: Complex64HashTable
 | 
			
		||||
    uniques: Complex64Vector
 | 
			
		||||
 | 
			
		||||
class Complex128Factorizer(Factorizer):
 | 
			
		||||
    table: Complex128HashTable
 | 
			
		||||
    uniques: Complex128Vector
 | 
			
		||||
 | 
			
		||||
class Int64Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.int64]: ...
 | 
			
		||||
 | 
			
		||||
class Int32Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.int32]: ...
 | 
			
		||||
 | 
			
		||||
class Int16Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.int16]: ...
 | 
			
		||||
 | 
			
		||||
class Int8Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.int8]: ...
 | 
			
		||||
 | 
			
		||||
class UInt64Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.uint64]: ...
 | 
			
		||||
 | 
			
		||||
class UInt32Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.uint32]: ...
 | 
			
		||||
 | 
			
		||||
class UInt16Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.uint16]: ...
 | 
			
		||||
 | 
			
		||||
class UInt8Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.uint8]: ...
 | 
			
		||||
 | 
			
		||||
class Float64Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.float64]: ...
 | 
			
		||||
 | 
			
		||||
class Float32Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.float32]: ...
 | 
			
		||||
 | 
			
		||||
class Complex128Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.complex128]: ...
 | 
			
		||||
 | 
			
		||||
class Complex64Vector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.complex64]: ...
 | 
			
		||||
 | 
			
		||||
class StringVector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
 | 
			
		||||
class ObjectVector:
 | 
			
		||||
    def __init__(self, *args) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def to_array(self) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
 | 
			
		||||
class HashTable:
 | 
			
		||||
    # NB: The base HashTable class does _not_ actually have these methods;
 | 
			
		||||
    #  we are putting them here for the sake of mypy to avoid
 | 
			
		||||
    #  reproducing them in each subclass below.
 | 
			
		||||
    def __init__(self, size_hint: int = ..., uses_mask: bool = ...) -> None: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def __contains__(self, key: Hashable) -> bool: ...
 | 
			
		||||
    def sizeof(self, deep: bool = ...) -> int: ...
 | 
			
		||||
    def get_state(self) -> dict[str, int]: ...
 | 
			
		||||
    # TODO: `item` type is subclass-specific
 | 
			
		||||
    def get_item(self, item): ...  # TODO: return type?
 | 
			
		||||
    def set_item(self, item, val) -> None: ...
 | 
			
		||||
    def get_na(self): ...  # TODO: return type?
 | 
			
		||||
    def set_na(self, val) -> None: ...
 | 
			
		||||
    def map_locations(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
        mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    def lookup(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
        mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
    ) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def get_labels(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
        uniques,  # SubclassTypeVector
 | 
			
		||||
        count_prior: int = ...,
 | 
			
		||||
        na_sentinel: int = ...,
 | 
			
		||||
        na_value: object = ...,
 | 
			
		||||
        mask=...,
 | 
			
		||||
    ) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def unique(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
        return_inverse: bool = ...,
 | 
			
		||||
    ) -> (
 | 
			
		||||
        tuple[
 | 
			
		||||
            np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
            npt.NDArray[np.intp],
 | 
			
		||||
        ]
 | 
			
		||||
        | np.ndarray
 | 
			
		||||
    ): ...  # np.ndarray[subclass-specific]
 | 
			
		||||
    def factorize(
 | 
			
		||||
        self,
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[subclass-specific]
 | 
			
		||||
        na_sentinel: int = ...,
 | 
			
		||||
        na_value: object = ...,
 | 
			
		||||
        mask=...,
 | 
			
		||||
    ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ...  # np.ndarray[subclass-specific]
 | 
			
		||||
 | 
			
		||||
class Complex128HashTable(HashTable): ...
 | 
			
		||||
class Complex64HashTable(HashTable): ...
 | 
			
		||||
class Float64HashTable(HashTable): ...
 | 
			
		||||
class Float32HashTable(HashTable): ...
 | 
			
		||||
 | 
			
		||||
class Int64HashTable(HashTable):
 | 
			
		||||
    # Only Int64HashTable has get_labels_groupby, map_keys_to_values
 | 
			
		||||
    def get_labels_groupby(
 | 
			
		||||
        self,
 | 
			
		||||
        values: npt.NDArray[np.int64],  # const int64_t[:]
 | 
			
		||||
    ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ...
 | 
			
		||||
    def map_keys_to_values(
 | 
			
		||||
        self,
 | 
			
		||||
        keys: npt.NDArray[np.int64],
 | 
			
		||||
        values: npt.NDArray[np.int64],  # const int64_t[:]
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
 | 
			
		||||
class Int32HashTable(HashTable): ...
 | 
			
		||||
class Int16HashTable(HashTable): ...
 | 
			
		||||
class Int8HashTable(HashTable): ...
 | 
			
		||||
class UInt64HashTable(HashTable): ...
 | 
			
		||||
class UInt32HashTable(HashTable): ...
 | 
			
		||||
class UInt16HashTable(HashTable): ...
 | 
			
		||||
class UInt8HashTable(HashTable): ...
 | 
			
		||||
class StringHashTable(HashTable): ...
 | 
			
		||||
class PyObjectHashTable(HashTable): ...
 | 
			
		||||
class IntpHashTable(HashTable): ...
 | 
			
		||||
 | 
			
		||||
def duplicated(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    keep: Literal["last", "first", False] = ...,
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def mode(
 | 
			
		||||
    values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ...
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
def value_count(
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
    dropna: bool,
 | 
			
		||||
    mask: npt.NDArray[np.bool_] | None = ...,
 | 
			
		||||
) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ...  # np.ndarray[same-as-values]
 | 
			
		||||
 | 
			
		||||
# arr and values should have same dtype
 | 
			
		||||
def ismember(
 | 
			
		||||
    arr: np.ndarray,
 | 
			
		||||
    values: np.ndarray,
 | 
			
		||||
) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def object_hash(obj) -> int: ...
 | 
			
		||||
def objects_are_equal(a, b) -> bool: ...
 | 
			
		||||
							
								
								
									
										125
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/hashtable.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,125 @@
 | 
			
		||||
cimport cython
 | 
			
		||||
from cpython.mem cimport (
 | 
			
		||||
    PyMem_Free,
 | 
			
		||||
    PyMem_Malloc,
 | 
			
		||||
)
 | 
			
		||||
from cpython.ref cimport (
 | 
			
		||||
    Py_INCREF,
 | 
			
		||||
    PyObject,
 | 
			
		||||
)
 | 
			
		||||
from libc.stdlib cimport (
 | 
			
		||||
    free,
 | 
			
		||||
    malloc,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from numpy cimport ndarray
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from pandas._libs cimport util
 | 
			
		||||
from pandas._libs.dtypes cimport numeric_object_t
 | 
			
		||||
from pandas._libs.khash cimport (
 | 
			
		||||
    KHASH_TRACE_DOMAIN,
 | 
			
		||||
    are_equivalent_float32_t,
 | 
			
		||||
    are_equivalent_float64_t,
 | 
			
		||||
    are_equivalent_khcomplex64_t,
 | 
			
		||||
    are_equivalent_khcomplex128_t,
 | 
			
		||||
    kh_needed_n_buckets,
 | 
			
		||||
    kh_python_hash_equal,
 | 
			
		||||
    kh_python_hash_func,
 | 
			
		||||
    khiter_t,
 | 
			
		||||
)
 | 
			
		||||
from pandas._libs.missing cimport checknull
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_hashtable_trace_domain():
 | 
			
		||||
    return KHASH_TRACE_DOMAIN
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def object_hash(obj):
 | 
			
		||||
    return kh_python_hash_func(obj)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def objects_are_equal(a, b):
 | 
			
		||||
    return kh_python_hash_equal(a, b)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef int64_t NPY_NAT = util.get_nat()
 | 
			
		||||
SIZE_HINT_LIMIT = (1 << 20) + 7
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef Py_ssize_t _INIT_VEC_CAP = 128
 | 
			
		||||
 | 
			
		||||
include "hashtable_class_helper.pxi"
 | 
			
		||||
include "hashtable_func_helper.pxi"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# map derived hash-map types onto basic hash-map types:
 | 
			
		||||
if np.dtype(np.intp) == np.dtype(np.int64):
 | 
			
		||||
    IntpHashTable = Int64HashTable
 | 
			
		||||
    unique_label_indices = _unique_label_indices_int64
 | 
			
		||||
elif np.dtype(np.intp) == np.dtype(np.int32):
 | 
			
		||||
    IntpHashTable = Int32HashTable
 | 
			
		||||
    unique_label_indices = _unique_label_indices_int32
 | 
			
		||||
else:
 | 
			
		||||
    raise ValueError(np.dtype(np.intp))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class Factorizer:
 | 
			
		||||
    cdef readonly:
 | 
			
		||||
        Py_ssize_t count
 | 
			
		||||
 | 
			
		||||
    def __cinit__(self, size_hint: int):
 | 
			
		||||
        self.count = 0
 | 
			
		||||
 | 
			
		||||
    def get_count(self) -> int:
 | 
			
		||||
        return self.count
 | 
			
		||||
 | 
			
		||||
    def factorize(self, values, na_sentinel=-1, na_value=None, mask=None) -> np.ndarray:
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class ObjectFactorizer(Factorizer):
 | 
			
		||||
    cdef public:
 | 
			
		||||
        PyObjectHashTable table
 | 
			
		||||
        ObjectVector uniques
 | 
			
		||||
 | 
			
		||||
    def __cinit__(self, size_hint: int):
 | 
			
		||||
        self.table = PyObjectHashTable(size_hint)
 | 
			
		||||
        self.uniques = ObjectVector()
 | 
			
		||||
 | 
			
		||||
    def factorize(
 | 
			
		||||
        self, ndarray[object] values, na_sentinel=-1, na_value=None, mask=None
 | 
			
		||||
    ) -> np.ndarray:
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        np.ndarray[np.intp]
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        Factorize values with nans replaced by na_sentinel
 | 
			
		||||
 | 
			
		||||
        >>> fac = ObjectFactorizer(3)
 | 
			
		||||
        >>> fac.factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20)
 | 
			
		||||
        array([ 0,  1, 20])
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            ndarray[intp_t] labels
 | 
			
		||||
 | 
			
		||||
        if mask is not None:
 | 
			
		||||
            raise NotImplementedError("mask not supported for ObjectFactorizer.")
 | 
			
		||||
 | 
			
		||||
        if self.uniques.external_view_exists:
 | 
			
		||||
            uniques = ObjectVector()
 | 
			
		||||
            uniques.extend(self.uniques.to_array())
 | 
			
		||||
            self.uniques = uniques
 | 
			
		||||
        labels = self.table.get_labels(values, self.uniques,
 | 
			
		||||
                                       self.count, na_sentinel, na_value)
 | 
			
		||||
        self.count = len(self.uniques)
 | 
			
		||||
        return labels
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -0,0 +1,484 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for each `dtype` helper function for hashtable
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# name, dtype, ttype, c_type, to_c_type
 | 
			
		||||
dtypes = [('Complex128', 'complex128', 'complex128',
 | 
			
		||||
                         'khcomplex128_t', 'to_khcomplex128_t'),
 | 
			
		||||
          ('Complex64', 'complex64', 'complex64',
 | 
			
		||||
                        'khcomplex64_t', 'to_khcomplex64_t'),
 | 
			
		||||
          ('Float64', 'float64', 'float64', 'float64_t', ''),
 | 
			
		||||
          ('Float32', 'float32', 'float32', 'float32_t', ''),
 | 
			
		||||
          ('UInt64', 'uint64', 'uint64', 'uint64_t', ''),
 | 
			
		||||
          ('UInt32', 'uint32', 'uint32', 'uint32_t', ''),
 | 
			
		||||
          ('UInt16', 'uint16', 'uint16', 'uint16_t', ''),
 | 
			
		||||
          ('UInt8', 'uint8', 'uint8', 'uint8_t', ''),
 | 
			
		||||
          ('Object', 'object', 'pymap', 'object', '<PyObject*>'),
 | 
			
		||||
          ('Int64', 'int64', 'int64', 'int64_t', ''),
 | 
			
		||||
          ('Int32', 'int32', 'int32', 'int32_t', ''),
 | 
			
		||||
          ('Int16', 'int16', 'int16', 'int16_t', ''),
 | 
			
		||||
          ('Int8', 'int8', 'int8', 'int8_t', '')]
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
{{for name, dtype, ttype, c_type, to_c_type in dtypes}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if dtype == 'object'}}
 | 
			
		||||
cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, const uint8_t[:] mask=None):
 | 
			
		||||
{{else}}
 | 
			
		||||
cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8_t[:] mask=None):
 | 
			
		||||
{{endif}}
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i = 0
 | 
			
		||||
        Py_ssize_t n = len(values)
 | 
			
		||||
        kh_{{ttype}}_t *table
 | 
			
		||||
 | 
			
		||||
        # Don't use Py_ssize_t, since table.n_buckets is unsigned
 | 
			
		||||
        khiter_t k
 | 
			
		||||
 | 
			
		||||
        {{c_type}} val
 | 
			
		||||
 | 
			
		||||
        int ret = 0
 | 
			
		||||
        bint uses_mask = mask is not None
 | 
			
		||||
        bint isna_entry = False
 | 
			
		||||
 | 
			
		||||
    if uses_mask and not dropna:
 | 
			
		||||
        raise NotImplementedError("uses_mask not implemented with dropna=False")
 | 
			
		||||
 | 
			
		||||
    # we track the order in which keys are first seen (GH39009),
 | 
			
		||||
    # khash-map isn't insertion-ordered, thus:
 | 
			
		||||
    #    table maps keys to counts
 | 
			
		||||
    #    result_keys remembers the original order of keys
 | 
			
		||||
 | 
			
		||||
    result_keys = {{name}}Vector()
 | 
			
		||||
    table = kh_init_{{ttype}}()
 | 
			
		||||
 | 
			
		||||
    {{if dtype == 'object'}}
 | 
			
		||||
    if uses_mask:
 | 
			
		||||
        raise NotImplementedError("uses_mask not implemented with object dtype")
 | 
			
		||||
 | 
			
		||||
    kh_resize_{{ttype}}(table, n // 10)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        val = values[i]
 | 
			
		||||
        if not dropna or not checknull(val):
 | 
			
		||||
            k = kh_get_{{ttype}}(table, {{to_c_type}}val)
 | 
			
		||||
            if k != table.n_buckets:
 | 
			
		||||
                table.vals[k] += 1
 | 
			
		||||
            else:
 | 
			
		||||
                k = kh_put_{{ttype}}(table, {{to_c_type}}val, &ret)
 | 
			
		||||
                table.vals[k] = 1
 | 
			
		||||
                result_keys.append(val)
 | 
			
		||||
    {{else}}
 | 
			
		||||
    kh_resize_{{ttype}}(table, n)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        val = {{to_c_type}}(values[i])
 | 
			
		||||
 | 
			
		||||
        if dropna:
 | 
			
		||||
            if uses_mask:
 | 
			
		||||
                isna_entry = mask[i]
 | 
			
		||||
            else:
 | 
			
		||||
                isna_entry = is_nan_{{c_type}}(val)
 | 
			
		||||
 | 
			
		||||
        if not dropna or not isna_entry:
 | 
			
		||||
            k = kh_get_{{ttype}}(table, val)
 | 
			
		||||
            if k != table.n_buckets:
 | 
			
		||||
                table.vals[k] += 1
 | 
			
		||||
            else:
 | 
			
		||||
                k = kh_put_{{ttype}}(table, val, &ret)
 | 
			
		||||
                table.vals[k] = 1
 | 
			
		||||
                result_keys.append(val)
 | 
			
		||||
    {{endif}}
 | 
			
		||||
 | 
			
		||||
    # collect counts in the order corresponding to result_keys:
 | 
			
		||||
    cdef:
 | 
			
		||||
        int64_t[::1] result_counts = np.empty(table.size, dtype=np.int64)
 | 
			
		||||
 | 
			
		||||
    for i in range(table.size):
 | 
			
		||||
        {{if dtype == 'object'}}
 | 
			
		||||
        k = kh_get_{{ttype}}(table, result_keys.data[i])
 | 
			
		||||
        {{else}}
 | 
			
		||||
        k = kh_get_{{ttype}}(table, result_keys.data.data[i])
 | 
			
		||||
        {{endif}}
 | 
			
		||||
        result_counts[i] = table.vals[k]
 | 
			
		||||
 | 
			
		||||
    kh_destroy_{{ttype}}(table)
 | 
			
		||||
 | 
			
		||||
    return result_keys.to_array(), result_counts.base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if dtype == 'object'}}
 | 
			
		||||
cdef duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first', const uint8_t[:] mask=None):
 | 
			
		||||
{{else}}
 | 
			
		||||
cdef duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first', const uint8_t[:] mask=None):
 | 
			
		||||
{{endif}}
 | 
			
		||||
    cdef:
 | 
			
		||||
        int ret = 0
 | 
			
		||||
        {{if dtype != 'object'}}
 | 
			
		||||
        {{c_type}} value
 | 
			
		||||
        {{else}}
 | 
			
		||||
        PyObject* value
 | 
			
		||||
        {{endif}}
 | 
			
		||||
        Py_ssize_t i, n = len(values), first_na = -1
 | 
			
		||||
        khiter_t k
 | 
			
		||||
        kh_{{ttype}}_t *table = kh_init_{{ttype}}()
 | 
			
		||||
        ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool')
 | 
			
		||||
        bint seen_na = False, uses_mask = mask is not None
 | 
			
		||||
        bint seen_multiple_na = False
 | 
			
		||||
 | 
			
		||||
    kh_resize_{{ttype}}(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT))
 | 
			
		||||
 | 
			
		||||
    if keep not in ('last', 'first', False):
 | 
			
		||||
        raise ValueError('keep must be either "first", "last" or False')
 | 
			
		||||
 | 
			
		||||
    {{for cond, keep in [('if', '"last"'), ('elif', '"first"')]}}
 | 
			
		||||
    {{cond}} keep == {{keep}}:
 | 
			
		||||
        {{if dtype == 'object'}}
 | 
			
		||||
        if True:
 | 
			
		||||
        {{else}}
 | 
			
		||||
        with nogil:
 | 
			
		||||
        {{endif}}
 | 
			
		||||
            {{if keep == '"last"'}}
 | 
			
		||||
            for i in range(n - 1, -1, -1):
 | 
			
		||||
            {{else}}
 | 
			
		||||
            for i in range(n):
 | 
			
		||||
            {{endif}}
 | 
			
		||||
                if uses_mask and mask[i]:
 | 
			
		||||
                    if seen_na:
 | 
			
		||||
                        out[i] = True
 | 
			
		||||
                    else:
 | 
			
		||||
                        out[i] = False
 | 
			
		||||
                        seen_na = True
 | 
			
		||||
                else:
 | 
			
		||||
                    value = {{to_c_type}}(values[i])
 | 
			
		||||
                    kh_put_{{ttype}}(table, value, &ret)
 | 
			
		||||
                    out[i] = ret == 0
 | 
			
		||||
    {{endfor}}
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        {{if dtype == 'object'}}
 | 
			
		||||
        if True:
 | 
			
		||||
        {{else}}
 | 
			
		||||
        with nogil:
 | 
			
		||||
        {{endif}}
 | 
			
		||||
            for i in range(n):
 | 
			
		||||
                if uses_mask and mask[i]:
 | 
			
		||||
                    if not seen_na:
 | 
			
		||||
                        first_na = i
 | 
			
		||||
                        seen_na = True
 | 
			
		||||
                        out[i] = 0
 | 
			
		||||
                    elif not seen_multiple_na:
 | 
			
		||||
                        out[i] = 1
 | 
			
		||||
                        out[first_na] = 1
 | 
			
		||||
                        seen_multiple_na = True
 | 
			
		||||
                    else:
 | 
			
		||||
                        out[i] = 1
 | 
			
		||||
 | 
			
		||||
                else:
 | 
			
		||||
                    value = {{to_c_type}}(values[i])
 | 
			
		||||
                    k = kh_get_{{ttype}}(table, value)
 | 
			
		||||
                    if k != table.n_buckets:
 | 
			
		||||
                        out[table.vals[k]] = 1
 | 
			
		||||
                        out[i] = 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        k = kh_put_{{ttype}}(table, value, &ret)
 | 
			
		||||
                        table.vals[k] = i
 | 
			
		||||
                        out[i] = 0
 | 
			
		||||
 | 
			
		||||
    kh_destroy_{{ttype}}(table)
 | 
			
		||||
    return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# Membership
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
{{if dtype == 'object'}}
 | 
			
		||||
cdef ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values):
 | 
			
		||||
{{else}}
 | 
			
		||||
cdef ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values):
 | 
			
		||||
{{endif}}
 | 
			
		||||
    """
 | 
			
		||||
    Return boolean of values in arr on an
 | 
			
		||||
    element by-element basis
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    arr : {{dtype}} ndarray
 | 
			
		||||
    values : {{dtype}} ndarray
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    boolean ndarray len of (arr)
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n
 | 
			
		||||
        khiter_t k
 | 
			
		||||
        int ret = 0
 | 
			
		||||
        ndarray[uint8_t] result
 | 
			
		||||
 | 
			
		||||
        {{if dtype == "object"}}
 | 
			
		||||
        PyObject* val
 | 
			
		||||
        {{else}}
 | 
			
		||||
        {{c_type}} val
 | 
			
		||||
        {{endif}}
 | 
			
		||||
 | 
			
		||||
        kh_{{ttype}}_t *table = kh_init_{{ttype}}()
 | 
			
		||||
 | 
			
		||||
    # construct the table
 | 
			
		||||
    n = len(values)
 | 
			
		||||
    kh_resize_{{ttype}}(table, n)
 | 
			
		||||
 | 
			
		||||
    {{if dtype == 'object'}}
 | 
			
		||||
    if True:
 | 
			
		||||
    {{else}}
 | 
			
		||||
    with nogil:
 | 
			
		||||
    {{endif}}
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            val = {{to_c_type}}(values[i])
 | 
			
		||||
            kh_put_{{ttype}}(table, val, &ret)
 | 
			
		||||
 | 
			
		||||
    # test membership
 | 
			
		||||
    n = len(arr)
 | 
			
		||||
    result = np.empty(n, dtype=np.uint8)
 | 
			
		||||
 | 
			
		||||
    {{if dtype == 'object'}}
 | 
			
		||||
    if True:
 | 
			
		||||
    {{else}}
 | 
			
		||||
    with nogil:
 | 
			
		||||
    {{endif}}
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            val = {{to_c_type}}(arr[i])
 | 
			
		||||
            k = kh_get_{{ttype}}(table, val)
 | 
			
		||||
            result[i] = (k != table.n_buckets)
 | 
			
		||||
 | 
			
		||||
    kh_destroy_{{ttype}}(table)
 | 
			
		||||
    return result.view(np.bool_)
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# Mode Computations
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
ctypedef fused htfunc_t:
 | 
			
		||||
    numeric_object_t
 | 
			
		||||
    complex128_t
 | 
			
		||||
    complex64_t
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef value_count(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
 | 
			
		||||
    if htfunc_t is object:
 | 
			
		||||
        return value_count_object(values, dropna, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is int8_t:
 | 
			
		||||
        return value_count_int8(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is int16_t:
 | 
			
		||||
        return value_count_int16(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is int32_t:
 | 
			
		||||
        return value_count_int32(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is int64_t:
 | 
			
		||||
        return value_count_int64(values, dropna, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is uint8_t:
 | 
			
		||||
        return value_count_uint8(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint16_t:
 | 
			
		||||
        return value_count_uint16(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint32_t:
 | 
			
		||||
        return value_count_uint32(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint64_t:
 | 
			
		||||
        return value_count_uint64(values, dropna, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is float64_t:
 | 
			
		||||
        return value_count_float64(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is float32_t:
 | 
			
		||||
        return value_count_float32(values, dropna, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is complex128_t:
 | 
			
		||||
        return value_count_complex128(values, dropna, mask=mask)
 | 
			
		||||
    elif htfunc_t is complex64_t:
 | 
			
		||||
        return value_count_complex64(values, dropna, mask=mask)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        raise TypeError(values.dtype)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef duplicated(ndarray[htfunc_t] values, object keep="first", const uint8_t[:] mask=None):
 | 
			
		||||
    if htfunc_t is object:
 | 
			
		||||
        return duplicated_object(values, keep, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is int8_t:
 | 
			
		||||
        return duplicated_int8(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is int16_t:
 | 
			
		||||
        return duplicated_int16(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is int32_t:
 | 
			
		||||
        return duplicated_int32(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is int64_t:
 | 
			
		||||
        return duplicated_int64(values, keep, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is uint8_t:
 | 
			
		||||
        return duplicated_uint8(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint16_t:
 | 
			
		||||
        return duplicated_uint16(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint32_t:
 | 
			
		||||
        return duplicated_uint32(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is uint64_t:
 | 
			
		||||
        return duplicated_uint64(values, keep, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is float64_t:
 | 
			
		||||
        return duplicated_float64(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is float32_t:
 | 
			
		||||
        return duplicated_float32(values, keep, mask=mask)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is complex128_t:
 | 
			
		||||
        return duplicated_complex128(values, keep, mask=mask)
 | 
			
		||||
    elif htfunc_t is complex64_t:
 | 
			
		||||
        return duplicated_complex64(values, keep, mask=mask)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        raise TypeError(values.dtype)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef ismember(ndarray[htfunc_t] arr, ndarray[htfunc_t] values):
 | 
			
		||||
    if htfunc_t is object:
 | 
			
		||||
        return ismember_object(arr, values)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is int8_t:
 | 
			
		||||
        return ismember_int8(arr, values)
 | 
			
		||||
    elif htfunc_t is int16_t:
 | 
			
		||||
        return ismember_int16(arr, values)
 | 
			
		||||
    elif htfunc_t is int32_t:
 | 
			
		||||
        return ismember_int32(arr, values)
 | 
			
		||||
    elif htfunc_t is int64_t:
 | 
			
		||||
        return ismember_int64(arr, values)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is uint8_t:
 | 
			
		||||
        return ismember_uint8(arr, values)
 | 
			
		||||
    elif htfunc_t is uint16_t:
 | 
			
		||||
        return ismember_uint16(arr, values)
 | 
			
		||||
    elif htfunc_t is uint32_t:
 | 
			
		||||
        return ismember_uint32(arr, values)
 | 
			
		||||
    elif htfunc_t is uint64_t:
 | 
			
		||||
        return ismember_uint64(arr, values)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is float64_t:
 | 
			
		||||
        return ismember_float64(arr, values)
 | 
			
		||||
    elif htfunc_t is float32_t:
 | 
			
		||||
        return ismember_float32(arr, values)
 | 
			
		||||
 | 
			
		||||
    elif htfunc_t is complex128_t:
 | 
			
		||||
        return ismember_complex128(arr, values)
 | 
			
		||||
    elif htfunc_t is complex64_t:
 | 
			
		||||
        return ismember_complex64(arr, values)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        raise TypeError(values.dtype)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
 | 
			
		||||
    # TODO(cython3): use const htfunct_t[:]
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        ndarray[htfunc_t] keys
 | 
			
		||||
        ndarray[htfunc_t] modes
 | 
			
		||||
 | 
			
		||||
        int64_t[::1] counts
 | 
			
		||||
        int64_t count, max_count = -1
 | 
			
		||||
        Py_ssize_t nkeys, k, j = 0
 | 
			
		||||
 | 
			
		||||
    keys, counts = value_count(values, dropna, mask=mask)
 | 
			
		||||
    nkeys = len(keys)
 | 
			
		||||
 | 
			
		||||
    modes = np.empty(nkeys, dtype=values.dtype)
 | 
			
		||||
 | 
			
		||||
    if htfunc_t is not object:
 | 
			
		||||
        with nogil:
 | 
			
		||||
            for k in range(nkeys):
 | 
			
		||||
                count = counts[k]
 | 
			
		||||
                if count == max_count:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                elif count > max_count:
 | 
			
		||||
                    max_count = count
 | 
			
		||||
                    j = 0
 | 
			
		||||
                else:
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                modes[j] = keys[k]
 | 
			
		||||
    else:
 | 
			
		||||
        for k in range(nkeys):
 | 
			
		||||
            count = counts[k]
 | 
			
		||||
            if count == max_count:
 | 
			
		||||
                j += 1
 | 
			
		||||
            elif count > max_count:
 | 
			
		||||
                max_count = count
 | 
			
		||||
                j = 0
 | 
			
		||||
            else:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            modes[j] = keys[k]
 | 
			
		||||
 | 
			
		||||
    return modes[:j + 1]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# name, dtype, ttype, c_type
 | 
			
		||||
dtypes = [('Int64', 'int64', 'int64', 'int64_t'),
 | 
			
		||||
          ('Int32', 'int32', 'int32', 'int32_t'), ]
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
{{for name, dtype, ttype, c_type in dtypes}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def _unique_label_indices_{{dtype}}(const {{c_type}}[:] labels) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Indices of the first occurrences of the unique labels
 | 
			
		||||
    *excluding* -1. equivalent to:
 | 
			
		||||
        np.unique(labels, return_index=True)[1]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        int ret = 0
 | 
			
		||||
        Py_ssize_t i, n = len(labels)
 | 
			
		||||
        kh_{{ttype}}_t *table = kh_init_{{ttype}}()
 | 
			
		||||
        {{name}}Vector idx = {{name}}Vector()
 | 
			
		||||
        ndarray[{{c_type}}, ndim=1] arr
 | 
			
		||||
        {{name}}VectorData *ud = idx.data
 | 
			
		||||
 | 
			
		||||
    kh_resize_{{ttype}}(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT))
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            kh_put_{{ttype}}(table, labels[i], &ret)
 | 
			
		||||
            if ret != 0:
 | 
			
		||||
                if needs_resize(ud):
 | 
			
		||||
                    with gil:
 | 
			
		||||
                        idx.resize()
 | 
			
		||||
                append_data_{{ttype}}(ud, i)
 | 
			
		||||
 | 
			
		||||
    kh_destroy_{{ttype}}(table)
 | 
			
		||||
 | 
			
		||||
    arr = idx.to_array()
 | 
			
		||||
    arr = arr[np.asarray(labels)[arr].argsort()]
 | 
			
		||||
 | 
			
		||||
    return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
@@ -0,0 +1,30 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2020, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#define PY_SSIZE_T_CLEAN
 | 
			
		||||
#include <Python.h>
 | 
			
		||||
#include <numpy/ndarraytypes.h>
 | 
			
		||||
 | 
			
		||||
// Scales value inplace from nanosecond resolution to unit resolution
 | 
			
		||||
int scaleNanosecToUnit(npy_int64 *value, NPY_DATETIMEUNIT unit);
 | 
			
		||||
 | 
			
		||||
// Converts an int64 object representing a date to ISO format
 | 
			
		||||
// up to precision `base` e.g. base="s" yields 2020-01-03T00:00:00Z
 | 
			
		||||
// while base="ns" yields "2020-01-01T00:00:00.000000000Z"
 | 
			
		||||
// len is mutated to save the length of the returned string
 | 
			
		||||
char *int64ToIso(int64_t value,
 | 
			
		||||
                 NPY_DATETIMEUNIT valueUnit,
 | 
			
		||||
                 NPY_DATETIMEUNIT base,
 | 
			
		||||
                 size_t *len);
 | 
			
		||||
 | 
			
		||||
// TODO(username): this function doesn't do a lot; should augment or
 | 
			
		||||
// replace with scaleNanosecToUnit
 | 
			
		||||
npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base);
 | 
			
		||||
 | 
			
		||||
char *int64ToIsoDuration(int64_t value, size_t *len);
 | 
			
		||||
@@ -0,0 +1,113 @@
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
Written by Mark Wiebe (mwwiebe@gmail.com)
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2011 by Enthought, Inc.
 | 
			
		||||
Copyright (c) 2005-2011, NumPy Developers
 | 
			
		||||
 | 
			
		||||
All rights reserved.
 | 
			
		||||
See NUMPY_LICENSE.txt for the license.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifndef NPY_NO_DEPRECATED_API
 | 
			
		||||
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
 | 
			
		||||
#endif  // NPY_NO_DEPRECATED_API
 | 
			
		||||
 | 
			
		||||
#include <numpy/ndarraytypes.h>
 | 
			
		||||
#include "pandas/vendored/numpy/datetime/np_datetime.h"
 | 
			
		||||
#include "pandas/vendored/numpy/datetime/np_datetime_strings.h"
 | 
			
		||||
#include "pandas/datetime/date_conversions.h"
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
extern "C" {
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
  npy_datetime (*npy_datetimestruct_to_datetime)(NPY_DATETIMEUNIT,
 | 
			
		||||
                                                 const npy_datetimestruct *);
 | 
			
		||||
  int (*scaleNanosecToUnit)(npy_int64 *, NPY_DATETIMEUNIT);
 | 
			
		||||
  char *(*int64ToIso)(int64_t, NPY_DATETIMEUNIT, NPY_DATETIMEUNIT, size_t *);
 | 
			
		||||
  npy_datetime (*NpyDateTimeToEpoch)(npy_datetime, NPY_DATETIMEUNIT);
 | 
			
		||||
  char *(*PyDateTimeToIso)(PyObject *, NPY_DATETIMEUNIT, size_t *);
 | 
			
		||||
  npy_datetime (*PyDateTimeToEpoch)(PyObject *, NPY_DATETIMEUNIT);
 | 
			
		||||
  char *(*int64ToIsoDuration)(int64_t, size_t *);
 | 
			
		||||
  void (*pandas_datetime_to_datetimestruct)(npy_datetime, NPY_DATETIMEUNIT,
 | 
			
		||||
                                            npy_datetimestruct *);
 | 
			
		||||
  void (*pandas_timedelta_to_timedeltastruct)(npy_datetime, NPY_DATETIMEUNIT,
 | 
			
		||||
                                              pandas_timedeltastruct *);
 | 
			
		||||
  int (*convert_pydatetime_to_datetimestruct)(PyObject *, npy_datetimestruct *);
 | 
			
		||||
  int (*cmp_npy_datetimestruct)(const npy_datetimestruct *,
 | 
			
		||||
                                const npy_datetimestruct *);
 | 
			
		||||
  PyArray_DatetimeMetaData (*get_datetime_metadata_from_dtype)(PyArray_Descr *);
 | 
			
		||||
  int (*parse_iso_8601_datetime)(const char *, int, int, npy_datetimestruct *,
 | 
			
		||||
                                 NPY_DATETIMEUNIT *, int *, int *, const char *,
 | 
			
		||||
                                 int, FormatRequirement);
 | 
			
		||||
  int (*get_datetime_iso_8601_strlen)(int, NPY_DATETIMEUNIT);
 | 
			
		||||
  int (*make_iso_8601_datetime)(npy_datetimestruct *, char *, int, int,
 | 
			
		||||
                                NPY_DATETIMEUNIT);
 | 
			
		||||
  int (*make_iso_8601_timedelta)(pandas_timedeltastruct *, char *, size_t *);
 | 
			
		||||
} PandasDateTime_CAPI;
 | 
			
		||||
 | 
			
		||||
// The capsule name appears limited to module.attributename; see bpo-32414
 | 
			
		||||
// cpython has an open PR gh-6898 to fix, but hasn't had traction for years
 | 
			
		||||
#define PandasDateTime_CAPSULE_NAME "pandas._pandas_datetime_CAPI"
 | 
			
		||||
 | 
			
		||||
/* block used as part of public API */
 | 
			
		||||
#ifndef _PANDAS_DATETIME_IMPL
 | 
			
		||||
static PandasDateTime_CAPI *PandasDateTimeAPI = NULL;
 | 
			
		||||
 | 
			
		||||
#define PandasDateTime_IMPORT                                                  \
 | 
			
		||||
  PandasDateTimeAPI =                                                          \
 | 
			
		||||
      (PandasDateTime_CAPI *)PyCapsule_Import(PandasDateTime_CAPSULE_NAME, 0)
 | 
			
		||||
 | 
			
		||||
#define npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT, npy_datetimestruct)   \
 | 
			
		||||
  PandasDateTimeAPI->npy_datetimestruct_to_datetime((NPY_DATETIMEUNIT),        \
 | 
			
		||||
                                                    (npy_datetimestruct))
 | 
			
		||||
#define scaleNanosecToUnit(value, unit)                                        \
 | 
			
		||||
  PandasDateTimeAPI->scaleNanosecToUnit((value), (unit))
 | 
			
		||||
#define int64ToIso(value, valueUnit, base, len)                                \
 | 
			
		||||
  PandasDateTimeAPI->int64ToIso((value), (valueUnit), (base), (len))
 | 
			
		||||
#define NpyDateTimeToEpoch(dt, base)                                           \
 | 
			
		||||
  PandasDateTimeAPI->NpyDateTimeToEpoch((dt), (base))
 | 
			
		||||
#define PyDateTimeToIso(obj, base, len)                                        \
 | 
			
		||||
  PandasDateTimeAPI->PyDateTimeToIso((obj), (base), (len))
 | 
			
		||||
#define PyDateTimeToEpoch(dt, base)                                            \
 | 
			
		||||
  PandasDateTimeAPI->PyDateTimeToEpoch((dt), (base))
 | 
			
		||||
#define int64ToIsoDuration(value, len)                                         \
 | 
			
		||||
  PandasDateTimeAPI->int64ToIsoDuration((value), (len))
 | 
			
		||||
#define pandas_datetime_to_datetimestruct(dt, base, out)                       \
 | 
			
		||||
  PandasDateTimeAPI->pandas_datetime_to_datetimestruct((dt), (base), (out))
 | 
			
		||||
#define pandas_timedelta_to_timedeltastruct(td, base, out)                     \
 | 
			
		||||
  PandasDateTimeAPI->pandas_timedelta_to_timedeltastruct((td), (base), (out))
 | 
			
		||||
#define convert_pydatetime_to_datetimestruct(dtobj, out)                       \
 | 
			
		||||
  PandasDateTimeAPI->convert_pydatetime_to_datetimestruct((dtobj), (out))
 | 
			
		||||
#define cmp_npy_datetimestruct(a, b)                                           \
 | 
			
		||||
  PandasDateTimeAPI->cmp_npy_datetimestruct((a), (b))
 | 
			
		||||
#define get_datetime_metadata_from_dtype(dtype)                                \
 | 
			
		||||
  PandasDateTimeAPI->get_datetime_metadata_from_dtype((dtype))
 | 
			
		||||
#define parse_iso_8601_datetime(str, len, want_exc, out, out_bestunit,         \
 | 
			
		||||
                                out_local, out_tzoffset, format, format_len,   \
 | 
			
		||||
                                format_requirement)                            \
 | 
			
		||||
  PandasDateTimeAPI->parse_iso_8601_datetime(                                  \
 | 
			
		||||
      (str), (len), (want_exc), (out), (out_bestunit), (out_local),            \
 | 
			
		||||
      (out_tzoffset), (format), (format_len), (format_requirement))
 | 
			
		||||
#define get_datetime_iso_8601_strlen(local, base)                              \
 | 
			
		||||
  PandasDateTimeAPI->get_datetime_iso_8601_strlen((local), (base))
 | 
			
		||||
#define make_iso_8601_datetime(dts, outstr, outlen, utc, base)                 \
 | 
			
		||||
  PandasDateTimeAPI->make_iso_8601_datetime((dts), (outstr), (outlen), (utc),  \
 | 
			
		||||
                                            (base))
 | 
			
		||||
#define make_iso_8601_timedelta(tds, outstr, outlen)                           \
 | 
			
		||||
  PandasDateTimeAPI->make_iso_8601_timedelta((tds), (outstr), (outlen))
 | 
			
		||||
#endif /* !defined(_PANDAS_DATETIME_IMPL) */
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -0,0 +1,24 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifndef PANDAS_INLINE
 | 
			
		||||
  #if defined(__clang__)
 | 
			
		||||
    #define PANDAS_INLINE static __inline__ __attribute__ ((__unused__))
 | 
			
		||||
  #elif defined(__GNUC__)
 | 
			
		||||
    #define PANDAS_INLINE static __inline__
 | 
			
		||||
  #elif defined(_MSC_VER)
 | 
			
		||||
    #define PANDAS_INLINE static __inline
 | 
			
		||||
  #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
 | 
			
		||||
    #define PANDAS_INLINE static inline
 | 
			
		||||
  #else
 | 
			
		||||
    #define PANDAS_INLINE
 | 
			
		||||
  #endif  // __GNUC__
 | 
			
		||||
#endif  // PANDAS_INLINE
 | 
			
		||||
@@ -0,0 +1,31 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#define PY_SSIZE_T_CLEAN
 | 
			
		||||
#include <Python.h>
 | 
			
		||||
#include "tokenizer.h"
 | 
			
		||||
 | 
			
		||||
#define FS(source) ((file_source *)source)
 | 
			
		||||
 | 
			
		||||
typedef struct _rd_source {
 | 
			
		||||
    PyObject *obj;
 | 
			
		||||
    PyObject *buffer;
 | 
			
		||||
    size_t position;
 | 
			
		||||
} rd_source;
 | 
			
		||||
 | 
			
		||||
#define RDS(source) ((rd_source *)source)
 | 
			
		||||
 | 
			
		||||
void *new_rd_source(PyObject *obj);
 | 
			
		||||
 | 
			
		||||
int del_rd_source(void *src);
 | 
			
		||||
 | 
			
		||||
void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
 | 
			
		||||
                      int *status, const char *encoding_errors);
 | 
			
		||||
@@ -0,0 +1,111 @@
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2023, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
extern "C" {
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define PY_SSIZE_T_CLEAN
 | 
			
		||||
#include <Python.h>
 | 
			
		||||
#include "pandas/parser/tokenizer.h"
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
  int (*to_double)(char *, double *, char, char, int *);
 | 
			
		||||
  int (*floatify)(PyObject *, double *, int *);
 | 
			
		||||
  void *(*new_rd_source)(PyObject *);
 | 
			
		||||
  int (*del_rd_source)(void *);
 | 
			
		||||
  void *(*buffer_rd_bytes)(void *, size_t, size_t *, int *, const char *);
 | 
			
		||||
  void (*uint_state_init)(uint_state *);
 | 
			
		||||
  int (*uint64_conflict)(uint_state *);
 | 
			
		||||
  void (*coliter_setup)(coliter_t *, parser_t *, int64_t, int64_t);
 | 
			
		||||
  parser_t *(*parser_new)(void);
 | 
			
		||||
  int (*parser_init)(parser_t *);
 | 
			
		||||
  void (*parser_free)(parser_t *);
 | 
			
		||||
  void (*parser_del)(parser_t *);
 | 
			
		||||
  int (*parser_add_skiprow)(parser_t *, int64_t);
 | 
			
		||||
  int (*parser_set_skipfirstnrows)(parser_t *, int64_t);
 | 
			
		||||
  void (*parser_set_default_options)(parser_t *);
 | 
			
		||||
  int (*parser_consume_rows)(parser_t *, size_t);
 | 
			
		||||
  int (*parser_trim_buffers)(parser_t *);
 | 
			
		||||
  int (*tokenize_all_rows)(parser_t *, const char *);
 | 
			
		||||
  int (*tokenize_nrows)(parser_t *, size_t, const char *);
 | 
			
		||||
  int64_t (*str_to_int64)(const char *, int64_t, int64_t, int *, char);
 | 
			
		||||
  uint64_t (*str_to_uint64)(uint_state *, const char *, int64_t, uint64_t,
 | 
			
		||||
                            int *, char);
 | 
			
		||||
  double (*xstrtod)(const char *, char **, char, char, char, int, int *, int *);
 | 
			
		||||
  double (*precise_xstrtod)(const char *, char **, char, char, char, int, int *,
 | 
			
		||||
                            int *);
 | 
			
		||||
  double (*round_trip)(const char *, char **, char, char, char, int, int *,
 | 
			
		||||
                       int *);
 | 
			
		||||
  int (*to_boolean)(const char *, uint8_t *);
 | 
			
		||||
} PandasParser_CAPI;
 | 
			
		||||
 | 
			
		||||
#define PandasParser_CAPSULE_NAME "pandas._pandas_parser_CAPI"
 | 
			
		||||
 | 
			
		||||
#ifndef _PANDAS_PARSER_IMPL
 | 
			
		||||
static PandasParser_CAPI *PandasParserAPI = NULL;
 | 
			
		||||
 | 
			
		||||
#define PandasParser_IMPORT                                                    \
 | 
			
		||||
  PandasParserAPI =                                                            \
 | 
			
		||||
      (PandasParser_CAPI *)PyCapsule_Import(PandasParser_CAPSULE_NAME, 0)
 | 
			
		||||
 | 
			
		||||
#define to_double(item, p_value, sci, decimal, maybe_int)                      \
 | 
			
		||||
  PandasParserAPI->to_double((item), (p_value), (sci), (decimal), (maybe_int))
 | 
			
		||||
#define floatify(str, result, maybe_int)                                       \
 | 
			
		||||
  PandasParserAPI->floatify((str), (result), (maybe_int))
 | 
			
		||||
#define new_rd_source(obj) PandasParserAPI->new_rd_source((obj))
 | 
			
		||||
#define del_rd_source(src) PandasParserAPI->del_rd_source((src))
 | 
			
		||||
#define buffer_rd_bytes(source, nbytes, bytes_read, status, encoding_errors)   \
 | 
			
		||||
  PandasParserAPI->buffer_rd_bytes((source), (nbytes), (bytes_read), (status), \
 | 
			
		||||
                                   (encoding_errors))
 | 
			
		||||
#define uint_state_init(self) PandasParserAPI->uint_state_init((self))
 | 
			
		||||
#define uint64_conflict(self) PandasParserAPI->uint64_conflict((self))
 | 
			
		||||
#define coliter_setup(self, parser, i, start)                                  \
 | 
			
		||||
  PandasParserAPI->coliter_setup((self), (parser), (i), (start))
 | 
			
		||||
#define parser_new PandasParserAPI->parser_new
 | 
			
		||||
#define parser_init(self) PandasParserAPI->parser_init((self))
 | 
			
		||||
#define parser_free(self) PandasParserAPI->parser_free((self))
 | 
			
		||||
#define parser_del(self) PandasParserAPI->parser_del((self))
 | 
			
		||||
#define parser_add_skiprow(self, row)                                          \
 | 
			
		||||
  PandasParserAPI->parser_add_skiprow((self), (row))
 | 
			
		||||
#define parser_set_skipfirstnrows(self, nrows)                                 \
 | 
			
		||||
  PandasParserAPI->parser_set_skipfirstnrows((self), (nrows))
 | 
			
		||||
#define parser_set_default_options(self)                                       \
 | 
			
		||||
  PandasParserAPI->parser_set_default_options((self))
 | 
			
		||||
#define parser_consume_rows(self, nrows)                                       \
 | 
			
		||||
  PandasParserAPI->parser_consume_rows((self), (nrows))
 | 
			
		||||
#define parser_trim_buffers(self)                                              \
 | 
			
		||||
  PandasParserAPI->parser_trim_buffers((self))
 | 
			
		||||
#define tokenize_all_rows(self, encoding_errors)                        \
 | 
			
		||||
  PandasParserAPI->tokenize_all_rows((self), (encoding_errors))
 | 
			
		||||
#define tokenize_nrows(self, nrows, encoding_errors)                    \
 | 
			
		||||
  PandasParserAPI->tokenize_nrows((self), (nrows), (encoding_errors))
 | 
			
		||||
#define str_to_int64(p_item, int_min, int_max, error, t_sep)                   \
 | 
			
		||||
  PandasParserAPI->str_to_int64((p_item), (int_min), (int_max), (error),       \
 | 
			
		||||
                                (t_sep))
 | 
			
		||||
#define str_to_uint64(state, p_item, int_max, uint_max, error, t_sep)          \
 | 
			
		||||
  PandasParserAPI->str_to_uint64((state), (p_item), (int_max), (uint_max),     \
 | 
			
		||||
                                 (error), (t_sep))
 | 
			
		||||
#define xstrtod(p, q, decimal, sci, tsep, skip_trailing, error, maybe_int)     \
 | 
			
		||||
  PandasParserAPI->xstrtod((p), (q), (decimal), (sci), (tsep),                 \
 | 
			
		||||
                           (skip_trailing), (error), (maybe_int))
 | 
			
		||||
#define precise_xstrtod(p, q, decimal, sci, tsep, skip_trailing, error,        \
 | 
			
		||||
                        maybe_int)                                             \
 | 
			
		||||
  PandasParserAPI->precise_xstrtod((p), (q), (decimal), (sci), (tsep),         \
 | 
			
		||||
                                   (skip_trailing), (error), (maybe_int))
 | 
			
		||||
#define round_trip(p, q, decimal, sci, tsep, skip_trailing, error, maybe_int)  \
 | 
			
		||||
  PandasParserAPI->round_trip((p), (q), (decimal), (sci), (tsep),              \
 | 
			
		||||
                              (skip_trailing), (error), (maybe_int))
 | 
			
		||||
#define to_boolean(item, val) PandasParserAPI->to_boolean((item), (val))
 | 
			
		||||
#endif  /* !defined(_PANDAS_PARSER_IMPL) */
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -0,0 +1,233 @@
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2012, Lambda Foundry, Inc., except where noted
 | 
			
		||||
 | 
			
		||||
Incorporates components of WarrenWeckesser/textreader, licensed under 3-clause
 | 
			
		||||
BSD
 | 
			
		||||
 | 
			
		||||
See LICENSE for the license
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#define PY_SSIZE_T_CLEAN
 | 
			
		||||
#include <Python.h>
 | 
			
		||||
 | 
			
		||||
#define ERROR_NO_DIGITS 1
 | 
			
		||||
#define ERROR_OVERFLOW 2
 | 
			
		||||
#define ERROR_INVALID_CHARS 3
 | 
			
		||||
 | 
			
		||||
#include <stdint.h>
 | 
			
		||||
#include "pandas/inline_helper.h"
 | 
			
		||||
#include "pandas/portable.h"
 | 
			
		||||
 | 
			
		||||
#include "pandas/vendored/klib/khash.h"
 | 
			
		||||
 | 
			
		||||
#define STREAM_INIT_SIZE 32
 | 
			
		||||
 | 
			
		||||
#define REACHED_EOF 1
 | 
			
		||||
#define CALLING_READ_FAILED 2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
  C flat file parsing low level code for pandas / NumPy
 | 
			
		||||
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 *  Common set of error types for the read_rows() and tokenize()
 | 
			
		||||
 *  functions.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
// #define VERBOSE
 | 
			
		||||
#if defined(VERBOSE)
 | 
			
		||||
#define TRACE(X) printf X;
 | 
			
		||||
#else
 | 
			
		||||
#define TRACE(X)
 | 
			
		||||
#endif  // VERBOSE
 | 
			
		||||
 | 
			
		||||
#define PARSER_OUT_OF_MEMORY -1
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 *  TODO: Might want to couple count_rows() with read_rows() to avoid
 | 
			
		||||
 *        duplication of some file I/O.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
typedef enum {
 | 
			
		||||
    START_RECORD,
 | 
			
		||||
    START_FIELD,
 | 
			
		||||
    ESCAPED_CHAR,
 | 
			
		||||
    IN_FIELD,
 | 
			
		||||
    IN_QUOTED_FIELD,
 | 
			
		||||
    ESCAPE_IN_QUOTED_FIELD,
 | 
			
		||||
    QUOTE_IN_QUOTED_FIELD,
 | 
			
		||||
    EAT_CRNL,
 | 
			
		||||
    EAT_CRNL_NOP,
 | 
			
		||||
    EAT_WHITESPACE,
 | 
			
		||||
    EAT_COMMENT,
 | 
			
		||||
    EAT_LINE_COMMENT,
 | 
			
		||||
    WHITESPACE_LINE,
 | 
			
		||||
    START_FIELD_IN_SKIP_LINE,
 | 
			
		||||
    IN_FIELD_IN_SKIP_LINE,
 | 
			
		||||
    IN_QUOTED_FIELD_IN_SKIP_LINE,
 | 
			
		||||
    QUOTE_IN_QUOTED_FIELD_IN_SKIP_LINE,
 | 
			
		||||
    FINISHED
 | 
			
		||||
} ParserState;
 | 
			
		||||
 | 
			
		||||
typedef enum {
 | 
			
		||||
    QUOTE_MINIMAL,
 | 
			
		||||
    QUOTE_ALL,
 | 
			
		||||
    QUOTE_NONNUMERIC,
 | 
			
		||||
    QUOTE_NONE
 | 
			
		||||
} QuoteStyle;
 | 
			
		||||
 | 
			
		||||
typedef enum {
 | 
			
		||||
    ERROR,
 | 
			
		||||
    WARN,
 | 
			
		||||
    SKIP
 | 
			
		||||
} BadLineHandleMethod;
 | 
			
		||||
 | 
			
		||||
typedef void *(*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
 | 
			
		||||
                             int *status, const char *encoding_errors);
 | 
			
		||||
typedef int (*io_cleanup)(void *src);
 | 
			
		||||
 | 
			
		||||
typedef struct parser_t {
 | 
			
		||||
    void *source;
 | 
			
		||||
    io_callback cb_io;
 | 
			
		||||
    io_cleanup cb_cleanup;
 | 
			
		||||
 | 
			
		||||
    int64_t chunksize;      // Number of bytes to prepare for each chunk
 | 
			
		||||
    char *data;             // pointer to data to be processed
 | 
			
		||||
    int64_t datalen;        // amount of data available
 | 
			
		||||
    int64_t datapos;
 | 
			
		||||
 | 
			
		||||
    // where to write out tokenized data
 | 
			
		||||
    char *stream;
 | 
			
		||||
    uint64_t stream_len;
 | 
			
		||||
    uint64_t stream_cap;
 | 
			
		||||
 | 
			
		||||
    // Store words in (potentially ragged) matrix for now, hmm
 | 
			
		||||
    char **words;
 | 
			
		||||
    int64_t *word_starts;   // where we are in the stream
 | 
			
		||||
    uint64_t words_len;
 | 
			
		||||
    uint64_t words_cap;
 | 
			
		||||
    uint64_t max_words_cap;  // maximum word cap encountered
 | 
			
		||||
 | 
			
		||||
    char *pword_start;      // pointer to stream start of current field
 | 
			
		||||
    int64_t word_start;     // position start of current field
 | 
			
		||||
 | 
			
		||||
    int64_t *line_start;    // position in words for start of line
 | 
			
		||||
    int64_t *line_fields;   // Number of fields in each line
 | 
			
		||||
    uint64_t lines;         // Number of (good) lines observed
 | 
			
		||||
    uint64_t file_lines;    // Number of lines (including bad or skipped)
 | 
			
		||||
    uint64_t lines_cap;     // Vector capacity
 | 
			
		||||
 | 
			
		||||
    // Tokenizing stuff
 | 
			
		||||
    ParserState state;
 | 
			
		||||
    int doublequote;      /* is " represented by ""? */
 | 
			
		||||
    char delimiter;       /* field separator */
 | 
			
		||||
    int delim_whitespace; /* delimit by consuming space/tabs instead */
 | 
			
		||||
    char quotechar;       /* quote character */
 | 
			
		||||
    char escapechar;      /* escape character */
 | 
			
		||||
    char lineterminator;
 | 
			
		||||
    int skipinitialspace; /* ignore spaces following delimiter? */
 | 
			
		||||
    int quoting;          /* style of quoting to write */
 | 
			
		||||
 | 
			
		||||
    char commentchar;
 | 
			
		||||
    int allow_embedded_newline;
 | 
			
		||||
 | 
			
		||||
    int usecols;  // Boolean: 1: usecols provided, 0: none provided
 | 
			
		||||
 | 
			
		||||
    Py_ssize_t expected_fields;
 | 
			
		||||
    BadLineHandleMethod on_bad_lines;
 | 
			
		||||
 | 
			
		||||
    // floating point options
 | 
			
		||||
    char decimal;
 | 
			
		||||
    char sci;
 | 
			
		||||
 | 
			
		||||
    // thousands separator (comma, period)
 | 
			
		||||
    char thousands;
 | 
			
		||||
 | 
			
		||||
    int header;            // Boolean: 1: has header, 0: no header
 | 
			
		||||
    int64_t header_start;  // header row start
 | 
			
		||||
    uint64_t header_end;   // header row end
 | 
			
		||||
 | 
			
		||||
    void *skipset;
 | 
			
		||||
    PyObject *skipfunc;
 | 
			
		||||
    int64_t skip_first_N_rows;
 | 
			
		||||
    int64_t skip_footer;
 | 
			
		||||
    double (*double_converter)(const char *, char **,
 | 
			
		||||
                               char, char, char, int, int *, int *);
 | 
			
		||||
 | 
			
		||||
    // error handling
 | 
			
		||||
    char *warn_msg;
 | 
			
		||||
    char *error_msg;
 | 
			
		||||
 | 
			
		||||
    int skip_empty_lines;
 | 
			
		||||
} parser_t;
 | 
			
		||||
 | 
			
		||||
typedef struct coliter_t {
 | 
			
		||||
    char **words;
 | 
			
		||||
    int64_t *line_start;
 | 
			
		||||
    int64_t col;
 | 
			
		||||
} coliter_t;
 | 
			
		||||
 | 
			
		||||
void coliter_setup(coliter_t *self, parser_t *parser, int64_t i, int64_t start);
 | 
			
		||||
 | 
			
		||||
#define COLITER_NEXT(iter, word)                           \
 | 
			
		||||
    do {                                                   \
 | 
			
		||||
        const int64_t i = *iter.line_start++ + iter.col;   \
 | 
			
		||||
        word = i >= *iter.line_start ? "" : iter.words[i]; \
 | 
			
		||||
    } while (0)
 | 
			
		||||
 | 
			
		||||
parser_t *parser_new(void);
 | 
			
		||||
 | 
			
		||||
int parser_init(parser_t *self);
 | 
			
		||||
 | 
			
		||||
int parser_consume_rows(parser_t *self, size_t nrows);
 | 
			
		||||
 | 
			
		||||
int parser_trim_buffers(parser_t *self);
 | 
			
		||||
 | 
			
		||||
int parser_add_skiprow(parser_t *self, int64_t row);
 | 
			
		||||
 | 
			
		||||
int parser_set_skipfirstnrows(parser_t *self, int64_t nrows);
 | 
			
		||||
 | 
			
		||||
void parser_free(parser_t *self);
 | 
			
		||||
 | 
			
		||||
void parser_del(parser_t *self);
 | 
			
		||||
 | 
			
		||||
void parser_set_default_options(parser_t *self);
 | 
			
		||||
 | 
			
		||||
int tokenize_nrows(parser_t *self, size_t nrows, const char *encoding_errors);
 | 
			
		||||
 | 
			
		||||
int tokenize_all_rows(parser_t *self, const char *encoding_errors);
 | 
			
		||||
 | 
			
		||||
// Have parsed / type-converted a chunk of data
 | 
			
		||||
// and want to free memory from the token stream
 | 
			
		||||
 | 
			
		||||
typedef struct uint_state {
 | 
			
		||||
    int seen_sint;
 | 
			
		||||
    int seen_uint;
 | 
			
		||||
    int seen_null;
 | 
			
		||||
} uint_state;
 | 
			
		||||
 | 
			
		||||
void uint_state_init(uint_state *self);
 | 
			
		||||
 | 
			
		||||
int uint64_conflict(uint_state *self);
 | 
			
		||||
 | 
			
		||||
uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max,
 | 
			
		||||
                       uint64_t uint_max, int *error, char tsep);
 | 
			
		||||
int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max,
 | 
			
		||||
                     int *error, char tsep);
 | 
			
		||||
double xstrtod(const char *p, char **q, char decimal, char sci, char tsep,
 | 
			
		||||
               int skip_trailing, int *error, int *maybe_int);
 | 
			
		||||
double precise_xstrtod(const char *p, char **q, char decimal,
 | 
			
		||||
                       char sci, char tsep, int skip_trailing,
 | 
			
		||||
                       int *error, int *maybe_int);
 | 
			
		||||
 | 
			
		||||
// GH-15140 - round_trip requires and acquires the GIL on its own
 | 
			
		||||
double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
 | 
			
		||||
                  int skip_trailing, int *error, int *maybe_int);
 | 
			
		||||
int to_boolean(const char *item, uint8_t *val);
 | 
			
		||||
@@ -0,0 +1,24 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <string.h>
 | 
			
		||||
 | 
			
		||||
#if defined(_MSC_VER)
 | 
			
		||||
#define strcasecmp(s1, s2) _stricmp(s1, s2)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
// GH-23516 - works around locale perf issues
 | 
			
		||||
// from MUSL libc, MIT Licensed - see LICENSES
 | 
			
		||||
#define isdigit_ascii(c) (((unsigned)(c) - '0') < 10u)
 | 
			
		||||
#define getdigit_ascii(c, default) (isdigit_ascii(c) ? ((int)((c) - '0')) : default)
 | 
			
		||||
#define isspace_ascii(c) (((c) == ' ') || (((unsigned)(c) - '\t') < 5))
 | 
			
		||||
#define toupper_ascii(c) ((((unsigned)(c) - 'a') < 26) ? ((c) & 0x5f) : (c))
 | 
			
		||||
#define tolower_ascii(c) ((((unsigned)(c) - 'A') < 26) ? ((c) | 0x20) : (c))
 | 
			
		||||
@@ -0,0 +1,297 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
 | 
			
		||||
Flexibly-sized, index-able skiplist data structure for maintaining a sorted
 | 
			
		||||
list of values
 | 
			
		||||
 | 
			
		||||
Port of Wes McKinney's Cython version of Raymond Hettinger's original pure
 | 
			
		||||
Python recipe (https://rhettinger.wordpress.com/2010/02/06/lost-knowledge/)
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <math.h>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include "pandas/inline_helper.h"
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE float __skiplist_nanf(void) {
 | 
			
		||||
    const union {
 | 
			
		||||
        int __i;
 | 
			
		||||
        float __f;
 | 
			
		||||
    } __bint = {0x7fc00000UL};
 | 
			
		||||
    return __bint.__f;
 | 
			
		||||
}
 | 
			
		||||
#define PANDAS_NAN ((double)__skiplist_nanf())
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE double Log2(double val) { return log(val) / log(2.); }
 | 
			
		||||
 | 
			
		||||
typedef struct node_t node_t;
 | 
			
		||||
 | 
			
		||||
struct node_t {
 | 
			
		||||
    node_t **next;
 | 
			
		||||
    int *width;
 | 
			
		||||
    double value;
 | 
			
		||||
    int is_nil;
 | 
			
		||||
    int levels;
 | 
			
		||||
    int ref_count;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
    node_t *head;
 | 
			
		||||
    node_t **tmp_chain;
 | 
			
		||||
    int *tmp_steps;
 | 
			
		||||
    int size;
 | 
			
		||||
    int maxlevels;
 | 
			
		||||
} skiplist_t;
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE double urand(void) {
 | 
			
		||||
    return ((double)rand() + 1) / ((double)RAND_MAX + 2);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE int int_min(int a, int b) { return a < b ? a : b; }
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE node_t *node_init(double value, int levels) {
 | 
			
		||||
    node_t *result;
 | 
			
		||||
    result = (node_t *)malloc(sizeof(node_t));
 | 
			
		||||
    if (result) {
 | 
			
		||||
        result->value = value;
 | 
			
		||||
        result->levels = levels;
 | 
			
		||||
        result->is_nil = 0;
 | 
			
		||||
        result->ref_count = 0;
 | 
			
		||||
        result->next = (node_t **)malloc(levels * sizeof(node_t *));
 | 
			
		||||
        result->width = (int *)malloc(levels * sizeof(int));
 | 
			
		||||
        if (!(result->next && result->width) && (levels != 0)) {
 | 
			
		||||
            free(result->next);
 | 
			
		||||
            free(result->width);
 | 
			
		||||
            free(result);
 | 
			
		||||
            return NULL;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// do this ourselves
 | 
			
		||||
PANDAS_INLINE void node_incref(node_t *node) { ++(node->ref_count); }
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE void node_decref(node_t *node) { --(node->ref_count); }
 | 
			
		||||
 | 
			
		||||
static void node_destroy(node_t *node) {
 | 
			
		||||
    int i;
 | 
			
		||||
    if (node) {
 | 
			
		||||
        if (node->ref_count <= 1) {
 | 
			
		||||
            for (i = 0; i < node->levels; ++i) {
 | 
			
		||||
                node_destroy(node->next[i]);
 | 
			
		||||
            }
 | 
			
		||||
            free(node->next);
 | 
			
		||||
            free(node->width);
 | 
			
		||||
            // printf("Reference count was 1, freeing\n");
 | 
			
		||||
            free(node);
 | 
			
		||||
        } else {
 | 
			
		||||
            node_decref(node);
 | 
			
		||||
        }
 | 
			
		||||
        // pretty sure that freeing the struct above will be enough
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE void skiplist_destroy(skiplist_t *skp) {
 | 
			
		||||
    if (skp) {
 | 
			
		||||
        node_destroy(skp->head);
 | 
			
		||||
        free(skp->tmp_steps);
 | 
			
		||||
        free(skp->tmp_chain);
 | 
			
		||||
        free(skp);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE skiplist_t *skiplist_init(int expected_size) {
 | 
			
		||||
    skiplist_t *result;
 | 
			
		||||
    node_t *NIL, *head;
 | 
			
		||||
    int maxlevels, i;
 | 
			
		||||
 | 
			
		||||
    maxlevels = 1 + Log2((double)expected_size);
 | 
			
		||||
    result = (skiplist_t *)malloc(sizeof(skiplist_t));
 | 
			
		||||
    if (!result) {
 | 
			
		||||
        return NULL;
 | 
			
		||||
    }
 | 
			
		||||
    result->tmp_chain = (node_t **)malloc(maxlevels * sizeof(node_t *));
 | 
			
		||||
    result->tmp_steps = (int *)malloc(maxlevels * sizeof(int));
 | 
			
		||||
    result->maxlevels = maxlevels;
 | 
			
		||||
    result->size = 0;
 | 
			
		||||
 | 
			
		||||
    head = result->head = node_init(PANDAS_NAN, maxlevels);
 | 
			
		||||
    NIL = node_init(0.0, 0);
 | 
			
		||||
 | 
			
		||||
    if (!(result->tmp_chain && result->tmp_steps && result->head && NIL)) {
 | 
			
		||||
        skiplist_destroy(result);
 | 
			
		||||
        node_destroy(NIL);
 | 
			
		||||
        return NULL;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    node_incref(head);
 | 
			
		||||
 | 
			
		||||
    NIL->is_nil = 1;
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < maxlevels; ++i) {
 | 
			
		||||
        head->next[i] = NIL;
 | 
			
		||||
        head->width[i] = 1;
 | 
			
		||||
        node_incref(NIL);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// 1 if left < right, 0 if left == right, -1 if left > right
 | 
			
		||||
PANDAS_INLINE int _node_cmp(node_t *node, double value) {
 | 
			
		||||
    if (node->is_nil || node->value > value) {
 | 
			
		||||
        return -1;
 | 
			
		||||
    } else if (node->value < value) {
 | 
			
		||||
        return 1;
 | 
			
		||||
    } else {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE double skiplist_get(skiplist_t *skp, int i, int *ret) {
 | 
			
		||||
    node_t *node;
 | 
			
		||||
    int level;
 | 
			
		||||
 | 
			
		||||
    if (i < 0 || i >= skp->size) {
 | 
			
		||||
        *ret = 0;
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    node = skp->head;
 | 
			
		||||
    ++i;
 | 
			
		||||
    for (level = skp->maxlevels - 1; level >= 0; --level) {
 | 
			
		||||
        while (node->width[level] <= i) {
 | 
			
		||||
            i -= node->width[level];
 | 
			
		||||
            node = node->next[level];
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    *ret = 1;
 | 
			
		||||
    return node->value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns the lowest rank of all elements with value `value`, as opposed to the
 | 
			
		||||
// highest rank returned by `skiplist_insert`.
 | 
			
		||||
PANDAS_INLINE int skiplist_min_rank(skiplist_t *skp, double value) {
 | 
			
		||||
    node_t *node;
 | 
			
		||||
    int level, rank = 0;
 | 
			
		||||
 | 
			
		||||
    node = skp->head;
 | 
			
		||||
    for (level = skp->maxlevels - 1; level >= 0; --level) {
 | 
			
		||||
        while (_node_cmp(node->next[level], value) > 0) {
 | 
			
		||||
            rank += node->width[level];
 | 
			
		||||
            node = node->next[level];
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return rank + 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns the rank of the inserted element. When there are duplicates,
 | 
			
		||||
// `rank` is the highest of the group, i.e. the 'max' method of
 | 
			
		||||
// https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rank.html
 | 
			
		||||
PANDAS_INLINE int skiplist_insert(skiplist_t *skp, double value) {
 | 
			
		||||
    node_t *node, *prevnode, *newnode, *next_at_level;
 | 
			
		||||
    int *steps_at_level;
 | 
			
		||||
    int size, steps, level, rank = 0;
 | 
			
		||||
    node_t **chain;
 | 
			
		||||
 | 
			
		||||
    chain = skp->tmp_chain;
 | 
			
		||||
 | 
			
		||||
    steps_at_level = skp->tmp_steps;
 | 
			
		||||
    memset(steps_at_level, 0, skp->maxlevels * sizeof(int));
 | 
			
		||||
 | 
			
		||||
    node = skp->head;
 | 
			
		||||
 | 
			
		||||
    for (level = skp->maxlevels - 1; level >= 0; --level) {
 | 
			
		||||
        next_at_level = node->next[level];
 | 
			
		||||
        while (_node_cmp(next_at_level, value) >= 0) {
 | 
			
		||||
            steps_at_level[level] += node->width[level];
 | 
			
		||||
            rank += node->width[level];
 | 
			
		||||
            node = next_at_level;
 | 
			
		||||
            next_at_level = node->next[level];
 | 
			
		||||
        }
 | 
			
		||||
        chain[level] = node;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size = int_min(skp->maxlevels, 1 - ((int)Log2(urand())));
 | 
			
		||||
 | 
			
		||||
    newnode = node_init(value, size);
 | 
			
		||||
    if (!newnode) {
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    steps = 0;
 | 
			
		||||
 | 
			
		||||
    for (level = 0; level < size; ++level) {
 | 
			
		||||
        prevnode = chain[level];
 | 
			
		||||
        newnode->next[level] = prevnode->next[level];
 | 
			
		||||
 | 
			
		||||
        prevnode->next[level] = newnode;
 | 
			
		||||
        node_incref(newnode);  // increment the reference count
 | 
			
		||||
 | 
			
		||||
        newnode->width[level] = prevnode->width[level] - steps;
 | 
			
		||||
        prevnode->width[level] = steps + 1;
 | 
			
		||||
 | 
			
		||||
        steps += steps_at_level[level];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (level = size; level < skp->maxlevels; ++level) {
 | 
			
		||||
        chain[level]->width[level] += 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ++(skp->size);
 | 
			
		||||
 | 
			
		||||
    return rank + 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE int skiplist_remove(skiplist_t *skp, double value) {
 | 
			
		||||
    int level, size;
 | 
			
		||||
    node_t *node, *prevnode, *tmpnode, *next_at_level;
 | 
			
		||||
    node_t **chain;
 | 
			
		||||
 | 
			
		||||
    chain = skp->tmp_chain;
 | 
			
		||||
    node = skp->head;
 | 
			
		||||
 | 
			
		||||
    for (level = skp->maxlevels - 1; level >= 0; --level) {
 | 
			
		||||
        next_at_level = node->next[level];
 | 
			
		||||
        while (_node_cmp(next_at_level, value) > 0) {
 | 
			
		||||
            node = next_at_level;
 | 
			
		||||
            next_at_level = node->next[level];
 | 
			
		||||
        }
 | 
			
		||||
        chain[level] = node;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (value != chain[0]->next[0]->value) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size = chain[0]->next[0]->levels;
 | 
			
		||||
 | 
			
		||||
    for (level = 0; level < size; ++level) {
 | 
			
		||||
        prevnode = chain[level];
 | 
			
		||||
 | 
			
		||||
        tmpnode = prevnode->next[level];
 | 
			
		||||
 | 
			
		||||
        prevnode->width[level] += tmpnode->width[level] - 1;
 | 
			
		||||
        prevnode->next[level] = tmpnode->next[level];
 | 
			
		||||
 | 
			
		||||
        tmpnode->next[level] = NULL;
 | 
			
		||||
        node_destroy(tmpnode);  // decrement refcount or free
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (level = size; level < skp->maxlevels; ++level) {
 | 
			
		||||
        --(chain[level]->width[level]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    --(skp->size);
 | 
			
		||||
    return 1;
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,719 @@
 | 
			
		||||
/* The MIT License
 | 
			
		||||
 | 
			
		||||
   Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
 | 
			
		||||
 | 
			
		||||
   Permission is hereby granted, free of charge, to any person obtaining
 | 
			
		||||
   a copy of this software and associated documentation files (the
 | 
			
		||||
   "Software"), to deal in the Software without restriction, including
 | 
			
		||||
   without limitation the rights to use, copy, modify, merge, publish,
 | 
			
		||||
   distribute, sublicense, and/or sell copies of the Software, and to
 | 
			
		||||
   permit persons to whom the Software is furnished to do so, subject to
 | 
			
		||||
   the following conditions:
 | 
			
		||||
 | 
			
		||||
   The above copyright notice and this permission notice shall be
 | 
			
		||||
   included in all copies or substantial portions of the Software.
 | 
			
		||||
 | 
			
		||||
   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 | 
			
		||||
   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 | 
			
		||||
   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 | 
			
		||||
   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 | 
			
		||||
   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 | 
			
		||||
   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 | 
			
		||||
   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 | 
			
		||||
   SOFTWARE.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
  An example:
 | 
			
		||||
 | 
			
		||||
#include "khash.h"
 | 
			
		||||
KHASH_MAP_INIT_INT(32, char)
 | 
			
		||||
int main() {
 | 
			
		||||
	int ret, is_missing;
 | 
			
		||||
	khiter_t k;
 | 
			
		||||
	khash_t(32) *h = kh_init(32);
 | 
			
		||||
	k = kh_put(32, h, 5, &ret);
 | 
			
		||||
	if (!ret) kh_del(32, h, k);
 | 
			
		||||
	kh_value(h, k) = 10;
 | 
			
		||||
	k = kh_get(32, h, 10);
 | 
			
		||||
	is_missing = (k == kh_end(h));
 | 
			
		||||
	k = kh_get(32, h, 5);
 | 
			
		||||
	kh_del(32, h, k);
 | 
			
		||||
	for (k = kh_begin(h); k != kh_end(h); ++k)
 | 
			
		||||
		if (kh_exist(h, k)) kh_value(h, k) = 1;
 | 
			
		||||
	kh_destroy(32, h);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
  2011-09-16 (0.2.6):
 | 
			
		||||
 | 
			
		||||
	* The capacity is a power of 2. This seems to dramatically improve the
 | 
			
		||||
	  speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
 | 
			
		||||
 | 
			
		||||
	   - https://github.com/stefanocasazza/ULib
 | 
			
		||||
	   - https://nothings.org/computer/judy/
 | 
			
		||||
 | 
			
		||||
	* Allow to optionally use linear probing which usually has better
 | 
			
		||||
	  performance for random input. Double hashing is still the default as it
 | 
			
		||||
	  is more robust to certain non-random input.
 | 
			
		||||
 | 
			
		||||
	* Added Wang's integer hash function (not used by default). This hash
 | 
			
		||||
	  function is more robust to certain non-random input.
 | 
			
		||||
 | 
			
		||||
  2011-02-14 (0.2.5):
 | 
			
		||||
 | 
			
		||||
    * Allow to declare global functions.
 | 
			
		||||
 | 
			
		||||
  2009-09-26 (0.2.4):
 | 
			
		||||
 | 
			
		||||
    * Improve portability
 | 
			
		||||
 | 
			
		||||
  2008-09-19 (0.2.3):
 | 
			
		||||
 | 
			
		||||
	* Corrected the example
 | 
			
		||||
	* Improved interfaces
 | 
			
		||||
 | 
			
		||||
  2008-09-11 (0.2.2):
 | 
			
		||||
 | 
			
		||||
	* Improved speed a little in kh_put()
 | 
			
		||||
 | 
			
		||||
  2008-09-10 (0.2.1):
 | 
			
		||||
 | 
			
		||||
	* Added kh_clear()
 | 
			
		||||
	* Fixed a compiling error
 | 
			
		||||
 | 
			
		||||
  2008-09-02 (0.2.0):
 | 
			
		||||
 | 
			
		||||
	* Changed to token concatenation which increases flexibility.
 | 
			
		||||
 | 
			
		||||
  2008-08-31 (0.1.2):
 | 
			
		||||
 | 
			
		||||
	* Fixed a bug in kh_get(), which has not been tested previously.
 | 
			
		||||
 | 
			
		||||
  2008-08-31 (0.1.1):
 | 
			
		||||
 | 
			
		||||
	* Added destructor
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifndef __AC_KHASH_H
 | 
			
		||||
#define __AC_KHASH_H
 | 
			
		||||
 | 
			
		||||
/*!
 | 
			
		||||
  @header
 | 
			
		||||
 | 
			
		||||
  Generic hash table library.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#define AC_VERSION_KHASH_H "0.2.6"
 | 
			
		||||
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include <limits.h>
 | 
			
		||||
#include "pandas/inline_helper.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// hooks for memory allocator, C-runtime allocator used per default
 | 
			
		||||
#ifndef KHASH_MALLOC
 | 
			
		||||
#define KHASH_MALLOC malloc
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef KHASH_REALLOC
 | 
			
		||||
#define KHASH_REALLOC realloc
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef KHASH_CALLOC
 | 
			
		||||
#define KHASH_CALLOC calloc
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef KHASH_FREE
 | 
			
		||||
#define KHASH_FREE free
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#if UINT_MAX == 0xffffffffu
 | 
			
		||||
typedef unsigned int khuint32_t;
 | 
			
		||||
typedef signed int khint32_t;
 | 
			
		||||
#elif ULONG_MAX == 0xffffffffu
 | 
			
		||||
typedef unsigned long khuint32_t;
 | 
			
		||||
typedef signed long khint32_t;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if ULONG_MAX == ULLONG_MAX
 | 
			
		||||
typedef unsigned long khuint64_t;
 | 
			
		||||
typedef signed long khint64_t;
 | 
			
		||||
#else
 | 
			
		||||
typedef unsigned long long khuint64_t;
 | 
			
		||||
typedef signed long long khint64_t;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if UINT_MAX == 0xffffu
 | 
			
		||||
typedef unsigned int khuint16_t;
 | 
			
		||||
typedef signed int khint16_t;
 | 
			
		||||
#elif USHRT_MAX == 0xffffu
 | 
			
		||||
typedef unsigned short khuint16_t;
 | 
			
		||||
typedef signed short khint16_t;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if UCHAR_MAX == 0xffu
 | 
			
		||||
typedef unsigned char khuint8_t;
 | 
			
		||||
typedef signed char khint8_t;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
typedef double khfloat64_t;
 | 
			
		||||
typedef float khfloat32_t;
 | 
			
		||||
 | 
			
		||||
typedef khuint32_t khuint_t;
 | 
			
		||||
typedef khuint_t khiter_t;
 | 
			
		||||
 | 
			
		||||
#define __ac_isempty(flag, i) ((flag[i>>5]>>(i&0x1fU))&1)
 | 
			
		||||
#define __ac_isdel(flag, i) (0)
 | 
			
		||||
#define __ac_iseither(flag, i) __ac_isempty(flag, i)
 | 
			
		||||
#define __ac_set_isdel_false(flag, i) (0)
 | 
			
		||||
#define __ac_set_isempty_false(flag, i) (flag[i>>5]&=~(1ul<<(i&0x1fU)))
 | 
			
		||||
#define __ac_set_isempty_true(flag, i) (flag[i>>5]|=(1ul<<(i&0x1fU)))
 | 
			
		||||
#define __ac_set_isboth_false(flag, i) __ac_set_isempty_false(flag, i)
 | 
			
		||||
#define __ac_set_isdel_true(flag, i) ((void)0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// specializations of https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp
 | 
			
		||||
khuint32_t PANDAS_INLINE murmur2_32to32(khuint32_t k){
 | 
			
		||||
    const khuint32_t SEED = 0xc70f6907UL;
 | 
			
		||||
    // 'm' and 'r' are mixing constants generated offline.
 | 
			
		||||
    // They're not really 'magic', they just happen to work well.
 | 
			
		||||
    const khuint32_t M_32 = 0x5bd1e995;
 | 
			
		||||
    const int R_32 = 24;
 | 
			
		||||
 | 
			
		||||
    // Initialize the hash to a 'random' value
 | 
			
		||||
    khuint32_t h = SEED ^ 4;
 | 
			
		||||
 | 
			
		||||
    //handle 4 bytes:
 | 
			
		||||
    k *= M_32;
 | 
			
		||||
    k ^= k >> R_32;
 | 
			
		||||
    k *= M_32;
 | 
			
		||||
 | 
			
		||||
    h *= M_32;
 | 
			
		||||
    h ^= k;
 | 
			
		||||
 | 
			
		||||
    // Do a few final mixes of the hash to ensure the "last few
 | 
			
		||||
    // bytes" are well-incorporated. (Really needed here?)
 | 
			
		||||
    h ^= h >> 13;
 | 
			
		||||
    h *= M_32;
 | 
			
		||||
    h ^= h >> 15;
 | 
			
		||||
    return h;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// it is possible to have a special x64-version, which would need less operations, but
 | 
			
		||||
// using 32bit version always has also some benefits:
 | 
			
		||||
//    - one code for 32bit and 64bit builds
 | 
			
		||||
//    - the same case for 32bit and 64bit builds
 | 
			
		||||
//    - no performance difference could be measured compared to a possible x64-version
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE murmur2_32_32to32(khuint32_t k1, khuint32_t k2){
 | 
			
		||||
    const khuint32_t SEED = 0xc70f6907UL;
 | 
			
		||||
    // 'm' and 'r' are mixing constants generated offline.
 | 
			
		||||
    // They're not really 'magic', they just happen to work well.
 | 
			
		||||
    const khuint32_t M_32 = 0x5bd1e995;
 | 
			
		||||
    const int R_32 = 24;
 | 
			
		||||
 | 
			
		||||
    // Initialize the hash to a 'random' value
 | 
			
		||||
    khuint32_t h = SEED ^ 4;
 | 
			
		||||
 | 
			
		||||
    //handle first 4 bytes:
 | 
			
		||||
    k1 *= M_32;
 | 
			
		||||
    k1 ^= k1 >> R_32;
 | 
			
		||||
    k1 *= M_32;
 | 
			
		||||
 | 
			
		||||
    h *= M_32;
 | 
			
		||||
    h ^= k1;
 | 
			
		||||
 | 
			
		||||
    //handle second 4 bytes:
 | 
			
		||||
    k2 *= M_32;
 | 
			
		||||
    k2 ^= k2 >> R_32;
 | 
			
		||||
    k2 *= M_32;
 | 
			
		||||
 | 
			
		||||
    h *= M_32;
 | 
			
		||||
    h ^= k2;
 | 
			
		||||
 | 
			
		||||
    // Do a few final mixes of the hash to ensure the "last few
 | 
			
		||||
    // bytes" are well-incorporated.
 | 
			
		||||
    h ^= h >> 13;
 | 
			
		||||
    h *= M_32;
 | 
			
		||||
    h ^= h >> 15;
 | 
			
		||||
    return h;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE murmur2_64to32(khuint64_t k){
 | 
			
		||||
    khuint32_t k1 = (khuint32_t)k;
 | 
			
		||||
    khuint32_t k2 = (khuint32_t)(k >> 32);
 | 
			
		||||
 | 
			
		||||
    return murmur2_32_32to32(k1, k2);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef KHASH_LINEAR
 | 
			
		||||
#define __ac_inc(k, m) 1
 | 
			
		||||
#else
 | 
			
		||||
#define __ac_inc(k, m) (murmur2_32to32(k) | 1) & (m)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define __ac_fsize(m) ((m) < 32? 1 : (m)>>5)
 | 
			
		||||
 | 
			
		||||
#ifndef kroundup32
 | 
			
		||||
#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static const double __ac_HASH_UPPER = 0.77;
 | 
			
		||||
 | 
			
		||||
#define KHASH_DECLARE(name, khkey_t, khval_t)		 					\
 | 
			
		||||
	typedef struct {													\
 | 
			
		||||
		khuint_t n_buckets, size, n_occupied, upper_bound;				\
 | 
			
		||||
		khuint32_t *flags;												\
 | 
			
		||||
		khkey_t *keys;													\
 | 
			
		||||
		khval_t *vals;													\
 | 
			
		||||
	} kh_##name##_t;													\
 | 
			
		||||
	extern kh_##name##_t *kh_init_##name();								\
 | 
			
		||||
	extern void kh_destroy_##name(kh_##name##_t *h);					\
 | 
			
		||||
	extern void kh_clear_##name(kh_##name##_t *h);						\
 | 
			
		||||
	extern khuint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); 	\
 | 
			
		||||
	extern void kh_resize_##name(kh_##name##_t *h, khuint_t new_n_buckets); \
 | 
			
		||||
	extern khuint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
 | 
			
		||||
	extern void kh_del_##name(kh_##name##_t *h, khuint_t x);
 | 
			
		||||
 | 
			
		||||
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
 | 
			
		||||
	typedef struct {													\
 | 
			
		||||
		khuint_t n_buckets, size, n_occupied, upper_bound;				\
 | 
			
		||||
		khuint32_t *flags;												\
 | 
			
		||||
		khkey_t *keys;													\
 | 
			
		||||
		khval_t *vals;													\
 | 
			
		||||
	} kh_##name##_t;													\
 | 
			
		||||
	SCOPE kh_##name##_t *kh_init_##name(void) {								\
 | 
			
		||||
		return (kh_##name##_t*)KHASH_CALLOC(1, sizeof(kh_##name##_t));		\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE void kh_destroy_##name(kh_##name##_t *h)						\
 | 
			
		||||
	{																	\
 | 
			
		||||
		if (h) {														\
 | 
			
		||||
			KHASH_FREE(h->keys); KHASH_FREE(h->flags);								\
 | 
			
		||||
			KHASH_FREE(h->vals);												\
 | 
			
		||||
			KHASH_FREE(h);													\
 | 
			
		||||
		}																\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE void kh_clear_##name(kh_##name##_t *h)						\
 | 
			
		||||
	{																	\
 | 
			
		||||
		if (h && h->flags) {											\
 | 
			
		||||
			memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khuint32_t)); \
 | 
			
		||||
			h->size = h->n_occupied = 0;								\
 | 
			
		||||
		}																\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE khuint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) 	\
 | 
			
		||||
	{																	\
 | 
			
		||||
		if (h->n_buckets) {												\
 | 
			
		||||
			khuint_t inc, k, i, last, mask;								\
 | 
			
		||||
			mask = h->n_buckets - 1;									\
 | 
			
		||||
			k = __hash_func(key); i = k & mask;							\
 | 
			
		||||
			inc = __ac_inc(k, mask); last = i; /* inc==1 for linear probing */ \
 | 
			
		||||
			while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
 | 
			
		||||
				i = (i + inc) & mask; 									\
 | 
			
		||||
				if (i == last) return h->n_buckets;						\
 | 
			
		||||
			}															\
 | 
			
		||||
			return __ac_iseither(h->flags, i)? h->n_buckets : i;		\
 | 
			
		||||
		} else return 0;												\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE void kh_resize_##name(kh_##name##_t *h, khuint_t new_n_buckets) \
 | 
			
		||||
	{ /* This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
 | 
			
		||||
		khuint32_t *new_flags = 0;										\
 | 
			
		||||
		khuint_t j = 1;													\
 | 
			
		||||
		{																\
 | 
			
		||||
			kroundup32(new_n_buckets); 									\
 | 
			
		||||
			if (new_n_buckets < 4) new_n_buckets = 4;					\
 | 
			
		||||
			if (h->size >= (khuint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0;	/* requested size is too small */ \
 | 
			
		||||
			else { /* hash table size to be changed (shrink or expand); rehash */ \
 | 
			
		||||
				new_flags = (khuint32_t*)KHASH_MALLOC(__ac_fsize(new_n_buckets) * sizeof(khuint32_t));	\
 | 
			
		||||
				memset(new_flags, 0xff, __ac_fsize(new_n_buckets) * sizeof(khuint32_t)); \
 | 
			
		||||
				if (h->n_buckets < new_n_buckets) {	/* expand */		\
 | 
			
		||||
					h->keys = (khkey_t*)KHASH_REALLOC(h->keys, new_n_buckets * sizeof(khkey_t)); \
 | 
			
		||||
					if (kh_is_map) h->vals = (khval_t*)KHASH_REALLOC(h->vals, new_n_buckets * sizeof(khval_t)); \
 | 
			
		||||
				} /* otherwise shrink */								\
 | 
			
		||||
			}															\
 | 
			
		||||
		}																\
 | 
			
		||||
		if (j) { /* rehashing is needed */								\
 | 
			
		||||
			for (j = 0; j != h->n_buckets; ++j) {						\
 | 
			
		||||
				if (__ac_iseither(h->flags, j) == 0) {					\
 | 
			
		||||
					khkey_t key = h->keys[j];							\
 | 
			
		||||
					khval_t val;										\
 | 
			
		||||
					khuint_t new_mask;									\
 | 
			
		||||
					new_mask = new_n_buckets - 1; 						\
 | 
			
		||||
					if (kh_is_map) val = h->vals[j];					\
 | 
			
		||||
					__ac_set_isempty_true(h->flags, j);					\
 | 
			
		||||
					while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
 | 
			
		||||
						khuint_t inc, k, i;								\
 | 
			
		||||
						k = __hash_func(key);							\
 | 
			
		||||
						i = k & new_mask;								\
 | 
			
		||||
						inc = __ac_inc(k, new_mask);					\
 | 
			
		||||
						while (!__ac_isempty(new_flags, i)) i = (i + inc) & new_mask; \
 | 
			
		||||
						__ac_set_isempty_false(new_flags, i);			\
 | 
			
		||||
						if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
 | 
			
		||||
							{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
 | 
			
		||||
							if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
 | 
			
		||||
							__ac_set_isempty_true(h->flags, i); /* mark it as deleted in the old hash table */ \
 | 
			
		||||
						} else { /* write the element and jump out of the loop */ \
 | 
			
		||||
							h->keys[i] = key;							\
 | 
			
		||||
							if (kh_is_map) h->vals[i] = val;			\
 | 
			
		||||
							break;										\
 | 
			
		||||
						}												\
 | 
			
		||||
					}													\
 | 
			
		||||
				}														\
 | 
			
		||||
			}															\
 | 
			
		||||
			if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
 | 
			
		||||
				h->keys = (khkey_t*)KHASH_REALLOC(h->keys, new_n_buckets * sizeof(khkey_t)); \
 | 
			
		||||
				if (kh_is_map) h->vals = (khval_t*)KHASH_REALLOC(h->vals, new_n_buckets * sizeof(khval_t)); \
 | 
			
		||||
			}															\
 | 
			
		||||
			KHASH_FREE(h->flags); /* free the working space */				\
 | 
			
		||||
			h->flags = new_flags;										\
 | 
			
		||||
			h->n_buckets = new_n_buckets;								\
 | 
			
		||||
			h->n_occupied = h->size;									\
 | 
			
		||||
			h->upper_bound = (khuint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
 | 
			
		||||
		}																\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE khuint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
 | 
			
		||||
	{																	\
 | 
			
		||||
		khuint_t x;														\
 | 
			
		||||
		if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
 | 
			
		||||
			if (h->n_buckets > (h->size<<1)) kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
 | 
			
		||||
			else kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
 | 
			
		||||
		} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
 | 
			
		||||
		{																\
 | 
			
		||||
			khuint_t inc, k, i, site, last, mask = h->n_buckets - 1;		\
 | 
			
		||||
			x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
 | 
			
		||||
			if (__ac_isempty(h->flags, i)) x = i; /* for speed up */	\
 | 
			
		||||
			else {														\
 | 
			
		||||
				inc = __ac_inc(k, mask); last = i;						\
 | 
			
		||||
				while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
 | 
			
		||||
					if (__ac_isdel(h->flags, i)) site = i;				\
 | 
			
		||||
					i = (i + inc) & mask; 								\
 | 
			
		||||
					if (i == last) { x = site; break; }					\
 | 
			
		||||
				}														\
 | 
			
		||||
				if (x == h->n_buckets) {								\
 | 
			
		||||
					if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
 | 
			
		||||
					else x = i;											\
 | 
			
		||||
				}														\
 | 
			
		||||
			}															\
 | 
			
		||||
		}																\
 | 
			
		||||
		if (__ac_isempty(h->flags, x)) { /* not present at all */		\
 | 
			
		||||
			h->keys[x] = key;											\
 | 
			
		||||
			__ac_set_isboth_false(h->flags, x);							\
 | 
			
		||||
			++h->size; ++h->n_occupied;									\
 | 
			
		||||
			*ret = 1;													\
 | 
			
		||||
		} else if (__ac_isdel(h->flags, x)) { /* deleted */				\
 | 
			
		||||
			h->keys[x] = key;											\
 | 
			
		||||
			__ac_set_isboth_false(h->flags, x);							\
 | 
			
		||||
			++h->size;													\
 | 
			
		||||
			*ret = 2;													\
 | 
			
		||||
		} else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
 | 
			
		||||
		return x;														\
 | 
			
		||||
	}																	\
 | 
			
		||||
	SCOPE void kh_del_##name(kh_##name##_t *h, khuint_t x)				\
 | 
			
		||||
	{																	\
 | 
			
		||||
		if (x != h->n_buckets && !__ac_iseither(h->flags, x)) {			\
 | 
			
		||||
			__ac_set_isdel_true(h->flags, x);							\
 | 
			
		||||
			--h->size;													\
 | 
			
		||||
		}																\
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
 | 
			
		||||
	KHASH_INIT2(name, PANDAS_INLINE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
 | 
			
		||||
 | 
			
		||||
/* --- BEGIN OF HASH FUNCTIONS --- */
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Integer hash function
 | 
			
		||||
  @param  key   The integer [khuint32_t]
 | 
			
		||||
  @return       The hash value [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_int_hash_func(key) (khuint32_t)(key)
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Integer comparison function
 | 
			
		||||
 */
 | 
			
		||||
#define kh_int_hash_equal(a, b) ((a) == (b))
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     64-bit integer hash function
 | 
			
		||||
  @param  key   The integer [khuint64_t]
 | 
			
		||||
  @return       The hash value [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
PANDAS_INLINE khuint_t kh_int64_hash_func(khuint64_t key)
 | 
			
		||||
{
 | 
			
		||||
    return (khuint_t)((key)>>33^(key)^(key)<<11);
 | 
			
		||||
}
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     64-bit integer comparison function
 | 
			
		||||
 */
 | 
			
		||||
#define kh_int64_hash_equal(a, b) ((a) == (b))
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     const char* hash function
 | 
			
		||||
  @param  s     Pointer to a null terminated string
 | 
			
		||||
  @return       The hash value
 | 
			
		||||
 */
 | 
			
		||||
PANDAS_INLINE khuint_t __ac_X31_hash_string(const char *s)
 | 
			
		||||
{
 | 
			
		||||
	khuint_t h = *s;
 | 
			
		||||
	if (h) for (++s ; *s; ++s) h = (h << 5) - h + *s;
 | 
			
		||||
	return h;
 | 
			
		||||
}
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Another interface to const char* hash function
 | 
			
		||||
  @param  key   Pointer to a null terminated string [const char*]
 | 
			
		||||
  @return       The hash value [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_str_hash_func(key) __ac_X31_hash_string(key)
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Const char* comparison function
 | 
			
		||||
 */
 | 
			
		||||
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
 | 
			
		||||
 | 
			
		||||
PANDAS_INLINE khuint_t __ac_Wang_hash(khuint_t key)
 | 
			
		||||
{
 | 
			
		||||
    key += ~(key << 15);
 | 
			
		||||
    key ^=  (key >> 10);
 | 
			
		||||
    key +=  (key << 3);
 | 
			
		||||
    key ^=  (key >> 6);
 | 
			
		||||
    key += ~(key << 11);
 | 
			
		||||
    key ^=  (key >> 16);
 | 
			
		||||
    return key;
 | 
			
		||||
}
 | 
			
		||||
#define kh_int_hash_func2(k) __ac_Wang_hash((khuint_t)key)
 | 
			
		||||
 | 
			
		||||
/* --- END OF HASH FUNCTIONS --- */
 | 
			
		||||
 | 
			
		||||
/* Other convenient macros... */
 | 
			
		||||
 | 
			
		||||
/*!
 | 
			
		||||
  @abstract Type of the hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
 */
 | 
			
		||||
#define khash_t(name) kh_##name##_t
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Initiate a hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @return       Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_init(name) kh_init_##name(void)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Destroy a hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_destroy(name, h) kh_destroy_##name(h)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Reset a hash table without deallocating memory.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_clear(name, h) kh_clear_##name(h)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Resize a hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  s     New size [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_resize(name, h, s) kh_resize_##name(h, s)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Insert a key to the hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  k     Key [type of keys]
 | 
			
		||||
  @param  r     Extra return code: 0 if the key is present in the hash table;
 | 
			
		||||
                1 if the bucket is empty (never used); 2 if the element in
 | 
			
		||||
				the bucket has been deleted [int*]
 | 
			
		||||
  @return       Iterator to the inserted element [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Retrieve a key from the hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  k     Key [type of keys]
 | 
			
		||||
  @return       Iterator to the found element, or kh_end(h) is the element is absent [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_get(name, h, k) kh_get_##name(h, k)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Remove a key from the hash table.
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  k     Iterator to the element to be deleted [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_del(name, h, k) kh_del_##name(h, k)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Test whether a bucket contains data.
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  x     Iterator to the bucket [khuint_t]
 | 
			
		||||
  @return       1 if containing data; 0 otherwise [int]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get key given an iterator
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  x     Iterator to the bucket [khuint_t]
 | 
			
		||||
  @return       Key [type of keys]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_key(h, x) ((h)->keys[x])
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get value given an iterator
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @param  x     Iterator to the bucket [khuint_t]
 | 
			
		||||
  @return       Value [type of values]
 | 
			
		||||
  @discussion   For hash sets, calling this results in segfault.
 | 
			
		||||
 */
 | 
			
		||||
#define kh_val(h, x) ((h)->vals[x])
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Alias of kh_val()
 | 
			
		||||
 */
 | 
			
		||||
#define kh_value(h, x) ((h)->vals[x])
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get the start iterator
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @return       The start iterator [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_begin(h) (khuint_t)(0)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get the end iterator
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @return       The end iterator [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_end(h) ((h)->n_buckets)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get the number of elements in the hash table
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @return       Number of elements in the hash table [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_size(h) ((h)->size)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Get the number of buckets in the hash table
 | 
			
		||||
  @param  h     Pointer to the hash table [khash_t(name)*]
 | 
			
		||||
  @return       Number of buckets in the hash table [khuint_t]
 | 
			
		||||
 */
 | 
			
		||||
#define kh_n_buckets(h) ((h)->n_buckets)
 | 
			
		||||
 | 
			
		||||
/* More convenient interfaces */
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash set containing integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_SET_INIT_INT(name)										\
 | 
			
		||||
	KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  khval_t  Type of values [type]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_MAP_INIT_INT(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_UINT(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khuint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing 64-bit integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_SET_INIT_UINT64(name)										\
 | 
			
		||||
	KHASH_INIT(name, khuint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
 | 
			
		||||
 | 
			
		||||
#define KHASH_SET_INIT_INT64(name)										\
 | 
			
		||||
	KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing 64-bit integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  khval_t  Type of values [type]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_MAP_INIT_UINT64(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khuint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_INT64(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing 16bit-integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  khval_t  Type of values [type]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_MAP_INIT_INT16(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_UINT16(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khuint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing 8bit-integer keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  khval_t  Type of values [type]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_MAP_INIT_INT8(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_UINT8(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khuint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
typedef const char *kh_cstr_t;
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing const char* keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_SET_INIT_STR(name)										\
 | 
			
		||||
	KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
 | 
			
		||||
 | 
			
		||||
/*! @function
 | 
			
		||||
  @abstract     Instantiate a hash map containing const char* keys
 | 
			
		||||
  @param  name  Name of the hash table [symbol]
 | 
			
		||||
  @param  khval_t  Type of values [type]
 | 
			
		||||
 */
 | 
			
		||||
#define KHASH_MAP_INIT_STR(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define kh_exist_str(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_float64(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_uint64(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_int64(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_float32(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_int32(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_uint32(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_int16(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_uint16(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_int8(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_uint8(h, k) (kh_exist(h, k))
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_STR(str, size_t)
 | 
			
		||||
KHASH_MAP_INIT_INT(int32, size_t)
 | 
			
		||||
KHASH_MAP_INIT_UINT(uint32, size_t)
 | 
			
		||||
KHASH_MAP_INIT_INT64(int64, size_t)
 | 
			
		||||
KHASH_MAP_INIT_UINT64(uint64, size_t)
 | 
			
		||||
KHASH_MAP_INIT_INT16(int16, size_t)
 | 
			
		||||
KHASH_MAP_INIT_UINT16(uint16, size_t)
 | 
			
		||||
KHASH_MAP_INIT_INT8(int8, size_t)
 | 
			
		||||
KHASH_MAP_INIT_UINT8(uint8, size_t)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#endif /* __AC_KHASH_H */
 | 
			
		||||
@@ -0,0 +1,450 @@
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include <Python.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
    float real;
 | 
			
		||||
    float imag;
 | 
			
		||||
} khcomplex64_t;
 | 
			
		||||
typedef struct {
 | 
			
		||||
    double real;
 | 
			
		||||
    double imag;
 | 
			
		||||
} khcomplex128_t;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// khash should report usage to tracemalloc
 | 
			
		||||
#if PY_VERSION_HEX >= 0x03060000
 | 
			
		||||
#include <pymem.h>
 | 
			
		||||
#if PY_VERSION_HEX < 0x03070000
 | 
			
		||||
#define PyTraceMalloc_Track _PyTraceMalloc_Track
 | 
			
		||||
#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
 | 
			
		||||
#endif
 | 
			
		||||
#else
 | 
			
		||||
#define PyTraceMalloc_Track(...)
 | 
			
		||||
#define PyTraceMalloc_Untrack(...)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static const int KHASH_TRACE_DOMAIN = 424242;
 | 
			
		||||
void *traced_malloc(size_t size){
 | 
			
		||||
    void * ptr = malloc(size);
 | 
			
		||||
    if(ptr!=NULL){
 | 
			
		||||
        PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, size);
 | 
			
		||||
    }
 | 
			
		||||
    return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *traced_calloc(size_t num, size_t size){
 | 
			
		||||
    void * ptr = calloc(num, size);
 | 
			
		||||
    if(ptr!=NULL){
 | 
			
		||||
        PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, num*size);
 | 
			
		||||
    }
 | 
			
		||||
    return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *traced_realloc(void* old_ptr, size_t size){
 | 
			
		||||
    void * ptr = realloc(old_ptr, size);
 | 
			
		||||
    if(ptr!=NULL){
 | 
			
		||||
        if(old_ptr != ptr){
 | 
			
		||||
            PyTraceMalloc_Untrack(KHASH_TRACE_DOMAIN, (uintptr_t)old_ptr);
 | 
			
		||||
        }
 | 
			
		||||
        PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, size);
 | 
			
		||||
    }
 | 
			
		||||
    return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void traced_free(void* ptr){
 | 
			
		||||
    if(ptr!=NULL){
 | 
			
		||||
        PyTraceMalloc_Untrack(KHASH_TRACE_DOMAIN, (uintptr_t)ptr);
 | 
			
		||||
    }
 | 
			
		||||
    free(ptr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define KHASH_MALLOC traced_malloc
 | 
			
		||||
#define KHASH_REALLOC traced_realloc
 | 
			
		||||
#define KHASH_CALLOC traced_calloc
 | 
			
		||||
#define KHASH_FREE traced_free
 | 
			
		||||
#include "khash.h"
 | 
			
		||||
 | 
			
		||||
// Previously we were using the built in cpython hash function for doubles
 | 
			
		||||
// python 2.7 https://github.com/python/cpython/blob/2.7/Objects/object.c#L1021
 | 
			
		||||
// python 3.5 https://github.com/python/cpython/blob/3.5/Python/pyhash.c#L85
 | 
			
		||||
 | 
			
		||||
// The python 3 hash function has the invariant hash(x) == hash(int(x)) == hash(decimal(x))
 | 
			
		||||
// and the size of hash may be different by platform / version (long in py2, Py_ssize_t in py3).
 | 
			
		||||
// We don't need those invariants because types will be cast before hashing, and if Py_ssize_t
 | 
			
		||||
// is 64 bits the truncation causes collision issues.  Given all that, we use our own
 | 
			
		||||
// simple hash, viewing the double bytes as an int64 and using khash's default
 | 
			
		||||
// hash for 64 bit integers.
 | 
			
		||||
// GH 13436 showed that _Py_HashDouble doesn't work well with khash
 | 
			
		||||
// GH 28303 showed, that the simple xoring-version isn't good enough
 | 
			
		||||
// See GH 36729 for evaluation of the currently used murmur2-hash version
 | 
			
		||||
// An interesting alternative to expensive murmur2-hash would be to change
 | 
			
		||||
// the probing strategy and use e.g. the probing strategy from CPython's
 | 
			
		||||
// implementation of dicts, which shines for smaller sizes but is more
 | 
			
		||||
// predisposed to superlinear running times (see GH 36729 for comparison)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
khuint64_t PANDAS_INLINE asuint64(double key) {
 | 
			
		||||
    khuint64_t val;
 | 
			
		||||
    memcpy(&val, &key, sizeof(double));
 | 
			
		||||
    return val;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE asuint32(float key) {
 | 
			
		||||
    khuint32_t val;
 | 
			
		||||
    memcpy(&val, &key, sizeof(float));
 | 
			
		||||
    return val;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define ZERO_HASH 0
 | 
			
		||||
#define NAN_HASH  0
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE kh_float64_hash_func(double val){
 | 
			
		||||
    // 0.0 and -0.0 should have the same hash:
 | 
			
		||||
    if (val == 0.0){
 | 
			
		||||
        return ZERO_HASH;
 | 
			
		||||
    }
 | 
			
		||||
    // all nans should have the same hash:
 | 
			
		||||
    if ( val!=val ){
 | 
			
		||||
        return NAN_HASH;
 | 
			
		||||
    }
 | 
			
		||||
    khuint64_t as_int = asuint64(val);
 | 
			
		||||
    return murmur2_64to32(as_int);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE kh_float32_hash_func(float val){
 | 
			
		||||
    // 0.0 and -0.0 should have the same hash:
 | 
			
		||||
    if (val == 0.0f){
 | 
			
		||||
        return ZERO_HASH;
 | 
			
		||||
    }
 | 
			
		||||
    // all nans should have the same hash:
 | 
			
		||||
    if ( val!=val ){
 | 
			
		||||
        return NAN_HASH;
 | 
			
		||||
    }
 | 
			
		||||
    khuint32_t as_int = asuint32(val);
 | 
			
		||||
    return murmur2_32to32(as_int);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define kh_floats_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a)))
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_FLOAT64(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khfloat64_t, khval_t, 1, kh_float64_hash_func, kh_floats_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_FLOAT64(float64, size_t)
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_FLOAT32(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khfloat32_t, khval_t, 1, kh_float32_hash_func, kh_floats_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_FLOAT32(float32, size_t)
 | 
			
		||||
 | 
			
		||||
khint32_t PANDAS_INLINE kh_complex128_hash_func(khcomplex128_t val){
 | 
			
		||||
    return kh_float64_hash_func(val.real)^kh_float64_hash_func(val.imag);
 | 
			
		||||
}
 | 
			
		||||
khint32_t PANDAS_INLINE kh_complex64_hash_func(khcomplex64_t val){
 | 
			
		||||
    return kh_float32_hash_func(val.real)^kh_float32_hash_func(val.imag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define kh_complex_hash_equal(a, b) \
 | 
			
		||||
  (kh_floats_hash_equal(a.real, b.real) && kh_floats_hash_equal(a.imag, b.imag))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_COMPLEX64(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khcomplex64_t, khval_t, 1, kh_complex64_hash_func, kh_complex_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_COMPLEX64(complex64, size_t)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_COMPLEX128(name, khval_t)								\
 | 
			
		||||
	KHASH_INIT(name, khcomplex128_t, khval_t, 1, kh_complex128_hash_func, kh_complex_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_COMPLEX128(complex128, size_t)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define kh_exist_complex64(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_complex128(h, k) (kh_exist(h, k))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// NaN-floats should be in the same equivalency class, see GH 22119
 | 
			
		||||
int PANDAS_INLINE floatobject_cmp(PyFloatObject* a, PyFloatObject* b){
 | 
			
		||||
    return (
 | 
			
		||||
             Py_IS_NAN(PyFloat_AS_DOUBLE(a)) &&
 | 
			
		||||
             Py_IS_NAN(PyFloat_AS_DOUBLE(b))
 | 
			
		||||
           )
 | 
			
		||||
           ||
 | 
			
		||||
           ( PyFloat_AS_DOUBLE(a) == PyFloat_AS_DOUBLE(b) );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// NaNs should be in the same equivalency class, see GH 41836
 | 
			
		||||
// PyObject_RichCompareBool for complexobjects has a different behavior
 | 
			
		||||
// needs to be replaced
 | 
			
		||||
int PANDAS_INLINE complexobject_cmp(PyComplexObject* a, PyComplexObject* b){
 | 
			
		||||
    return (
 | 
			
		||||
                Py_IS_NAN(a->cval.real) &&
 | 
			
		||||
                Py_IS_NAN(b->cval.real) &&
 | 
			
		||||
                Py_IS_NAN(a->cval.imag) &&
 | 
			
		||||
                Py_IS_NAN(b->cval.imag)
 | 
			
		||||
           )
 | 
			
		||||
           ||
 | 
			
		||||
           (
 | 
			
		||||
                Py_IS_NAN(a->cval.real) &&
 | 
			
		||||
                Py_IS_NAN(b->cval.real) &&
 | 
			
		||||
                a->cval.imag == b->cval.imag
 | 
			
		||||
           )
 | 
			
		||||
           ||
 | 
			
		||||
           (
 | 
			
		||||
                a->cval.real == b->cval.real &&
 | 
			
		||||
                Py_IS_NAN(a->cval.imag) &&
 | 
			
		||||
                Py_IS_NAN(b->cval.imag)
 | 
			
		||||
           )
 | 
			
		||||
           ||
 | 
			
		||||
           (
 | 
			
		||||
                a->cval.real == b->cval.real &&
 | 
			
		||||
                a->cval.imag == b->cval.imag
 | 
			
		||||
           );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int PANDAS_INLINE pyobject_cmp(PyObject* a, PyObject* b);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// replacing PyObject_RichCompareBool (NaN!=NaN) with pyobject_cmp (NaN==NaN),
 | 
			
		||||
// which treats NaNs as equivalent
 | 
			
		||||
// see GH 41836
 | 
			
		||||
int PANDAS_INLINE tupleobject_cmp(PyTupleObject* a, PyTupleObject* b){
 | 
			
		||||
    Py_ssize_t i;
 | 
			
		||||
 | 
			
		||||
    if (Py_SIZE(a) != Py_SIZE(b)) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < Py_SIZE(a); ++i) {
 | 
			
		||||
        if (!pyobject_cmp(PyTuple_GET_ITEM(a, i), PyTuple_GET_ITEM(b, i))) {
 | 
			
		||||
            return 0;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int PANDAS_INLINE pyobject_cmp(PyObject* a, PyObject* b) {
 | 
			
		||||
    if (a == b) {
 | 
			
		||||
        return 1;
 | 
			
		||||
    }
 | 
			
		||||
    if (Py_TYPE(a) == Py_TYPE(b)) {
 | 
			
		||||
        // special handling for some built-in types which could have NaNs
 | 
			
		||||
        // as we would like to have them equivalent, but the usual
 | 
			
		||||
        // PyObject_RichCompareBool would return False
 | 
			
		||||
        if (PyFloat_CheckExact(a)) {
 | 
			
		||||
            return floatobject_cmp((PyFloatObject*)a, (PyFloatObject*)b);
 | 
			
		||||
        }
 | 
			
		||||
        if (PyComplex_CheckExact(a)) {
 | 
			
		||||
            return complexobject_cmp((PyComplexObject*)a, (PyComplexObject*)b);
 | 
			
		||||
        }
 | 
			
		||||
        if (PyTuple_CheckExact(a)) {
 | 
			
		||||
            return tupleobject_cmp((PyTupleObject*)a, (PyTupleObject*)b);
 | 
			
		||||
        }
 | 
			
		||||
        // frozenset isn't yet supported
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
	int result = PyObject_RichCompareBool(a, b, Py_EQ);
 | 
			
		||||
	if (result < 0) {
 | 
			
		||||
		PyErr_Clear();
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Py_hash_t PANDAS_INLINE _Pandas_HashDouble(double val) {
 | 
			
		||||
    //Since Python3.10, nan is no longer has hash 0
 | 
			
		||||
    if (Py_IS_NAN(val)) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
#if PY_VERSION_HEX < 0x030A0000
 | 
			
		||||
    return _Py_HashDouble(val);
 | 
			
		||||
#else
 | 
			
		||||
    return _Py_HashDouble(NULL, val);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Py_hash_t PANDAS_INLINE floatobject_hash(PyFloatObject* key) {
 | 
			
		||||
    return _Pandas_HashDouble(PyFloat_AS_DOUBLE(key));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define _PandasHASH_IMAG 1000003UL
 | 
			
		||||
 | 
			
		||||
// replaces _Py_HashDouble with _Pandas_HashDouble
 | 
			
		||||
Py_hash_t PANDAS_INLINE complexobject_hash(PyComplexObject* key) {
 | 
			
		||||
    Py_uhash_t realhash = (Py_uhash_t)_Pandas_HashDouble(key->cval.real);
 | 
			
		||||
    Py_uhash_t imaghash = (Py_uhash_t)_Pandas_HashDouble(key->cval.imag);
 | 
			
		||||
    if (realhash == (Py_uhash_t)-1 || imaghash == (Py_uhash_t)-1) {
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    Py_uhash_t combined = realhash + _PandasHASH_IMAG * imaghash;
 | 
			
		||||
    if (combined == (Py_uhash_t)-1) {
 | 
			
		||||
        return -2;
 | 
			
		||||
    }
 | 
			
		||||
    return (Py_hash_t)combined;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject* key);
 | 
			
		||||
 | 
			
		||||
//we could use any hashing algorithm, this is the original CPython's for tuples
 | 
			
		||||
 | 
			
		||||
#if SIZEOF_PY_UHASH_T > 4
 | 
			
		||||
#define _PandasHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL)
 | 
			
		||||
#define _PandasHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL)
 | 
			
		||||
#define _PandasHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL)
 | 
			
		||||
#define _PandasHASH_XXROTATE(x) ((x << 31) | (x >> 33))  /* Rotate left 31 bits */
 | 
			
		||||
#else
 | 
			
		||||
#define _PandasHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL)
 | 
			
		||||
#define _PandasHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL)
 | 
			
		||||
#define _PandasHASH_XXPRIME_5 ((Py_uhash_t)374761393UL)
 | 
			
		||||
#define _PandasHASH_XXROTATE(x) ((x << 13) | (x >> 19))  /* Rotate left 13 bits */
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
Py_hash_t PANDAS_INLINE tupleobject_hash(PyTupleObject* key) {
 | 
			
		||||
    Py_ssize_t i, len = Py_SIZE(key);
 | 
			
		||||
    PyObject **item = key->ob_item;
 | 
			
		||||
 | 
			
		||||
    Py_uhash_t acc = _PandasHASH_XXPRIME_5;
 | 
			
		||||
    for (i = 0; i < len; i++) {
 | 
			
		||||
        Py_uhash_t lane = kh_python_hash_func(item[i]);
 | 
			
		||||
        if (lane == (Py_uhash_t)-1) {
 | 
			
		||||
            return -1;
 | 
			
		||||
        }
 | 
			
		||||
        acc += lane * _PandasHASH_XXPRIME_2;
 | 
			
		||||
        acc = _PandasHASH_XXROTATE(acc);
 | 
			
		||||
        acc *= _PandasHASH_XXPRIME_1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* Add input length, mangled to keep the historical value of hash(()). */
 | 
			
		||||
    acc += len ^ (_PandasHASH_XXPRIME_5 ^ 3527539UL);
 | 
			
		||||
 | 
			
		||||
    if (acc == (Py_uhash_t)-1) {
 | 
			
		||||
        return 1546275796;
 | 
			
		||||
    }
 | 
			
		||||
    return acc;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject* key) {
 | 
			
		||||
    Py_hash_t hash;
 | 
			
		||||
    // For PyObject_Hash holds:
 | 
			
		||||
    //    hash(0.0) == 0 == hash(-0.0)
 | 
			
		||||
    //    yet for different nan-objects different hash-values
 | 
			
		||||
    //    are possible
 | 
			
		||||
    if (PyFloat_CheckExact(key)) {
 | 
			
		||||
        // we cannot use kh_float64_hash_func
 | 
			
		||||
        // because float(k) == k holds for any int-object k
 | 
			
		||||
        // and kh_float64_hash_func doesn't respect it
 | 
			
		||||
        hash = floatobject_hash((PyFloatObject*)key);
 | 
			
		||||
    }
 | 
			
		||||
    else if (PyComplex_CheckExact(key)) {
 | 
			
		||||
        // we cannot use kh_complex128_hash_func
 | 
			
		||||
        // because complex(k,0) == k holds for any int-object k
 | 
			
		||||
        // and kh_complex128_hash_func doesn't respect it
 | 
			
		||||
        hash = complexobject_hash((PyComplexObject*)key);
 | 
			
		||||
    }
 | 
			
		||||
    else if (PyTuple_CheckExact(key)) {
 | 
			
		||||
        hash = tupleobject_hash((PyTupleObject*)key);
 | 
			
		||||
    }
 | 
			
		||||
    else {
 | 
			
		||||
        hash = PyObject_Hash(key);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
	if (hash == -1) {
 | 
			
		||||
		PyErr_Clear();
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
    #if SIZEOF_PY_HASH_T == 4
 | 
			
		||||
        // it is already 32bit value
 | 
			
		||||
        return hash;
 | 
			
		||||
    #else
 | 
			
		||||
        // for 64bit builds,
 | 
			
		||||
        // we need information of the upper 32bits as well
 | 
			
		||||
        // see GH 37615
 | 
			
		||||
        khuint64_t as_uint = (khuint64_t) hash;
 | 
			
		||||
        // uints avoid undefined behavior of signed ints
 | 
			
		||||
        return (as_uint>>32)^as_uint;
 | 
			
		||||
    #endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define kh_python_hash_equal(a, b) (pyobject_cmp(a, b))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// Python object
 | 
			
		||||
 | 
			
		||||
typedef PyObject* kh_pyobject_t;
 | 
			
		||||
 | 
			
		||||
#define KHASH_MAP_INIT_PYOBJECT(name, khval_t)							\
 | 
			
		||||
	KHASH_INIT(name, kh_pyobject_t, khval_t, 1,						\
 | 
			
		||||
			   kh_python_hash_func, kh_python_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_PYOBJECT(pymap, Py_ssize_t)
 | 
			
		||||
 | 
			
		||||
#define KHASH_SET_INIT_PYOBJECT(name)                                  \
 | 
			
		||||
	KHASH_INIT(name, kh_pyobject_t, char, 0,     \
 | 
			
		||||
			   kh_python_hash_func, kh_python_hash_equal)
 | 
			
		||||
 | 
			
		||||
KHASH_SET_INIT_PYOBJECT(pyset)
 | 
			
		||||
 | 
			
		||||
#define kh_exist_pymap(h, k) (kh_exist(h, k))
 | 
			
		||||
#define kh_exist_pyset(h, k) (kh_exist(h, k))
 | 
			
		||||
 | 
			
		||||
KHASH_MAP_INIT_STR(strbox, kh_pyobject_t)
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
	kh_str_t *table;
 | 
			
		||||
	int starts[256];
 | 
			
		||||
} kh_str_starts_t;
 | 
			
		||||
 | 
			
		||||
typedef kh_str_starts_t* p_kh_str_starts_t;
 | 
			
		||||
 | 
			
		||||
p_kh_str_starts_t PANDAS_INLINE kh_init_str_starts(void) {
 | 
			
		||||
	kh_str_starts_t *result = (kh_str_starts_t*)KHASH_CALLOC(1, sizeof(kh_str_starts_t));
 | 
			
		||||
	result->table = kh_init_str();
 | 
			
		||||
	return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
khuint_t PANDAS_INLINE kh_put_str_starts_item(kh_str_starts_t* table, char* key, int* ret) {
 | 
			
		||||
    khuint_t result = kh_put_str(table->table, key, ret);
 | 
			
		||||
	if (*ret != 0) {
 | 
			
		||||
		table->starts[(unsigned char)key[0]] = 1;
 | 
			
		||||
	}
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
khuint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t* table, const char* key) {
 | 
			
		||||
    unsigned char ch = *key;
 | 
			
		||||
	if (table->starts[ch]) {
 | 
			
		||||
		if (ch == '\0' || kh_get_str(table->table, key) != table->table->n_buckets) return 1;
 | 
			
		||||
	}
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void PANDAS_INLINE kh_destroy_str_starts(kh_str_starts_t* table) {
 | 
			
		||||
	kh_destroy_str(table->table);
 | 
			
		||||
	KHASH_FREE(table);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t* table, khuint_t val) {
 | 
			
		||||
	kh_resize_str(table->table, val);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// utility function: given the number of elements
 | 
			
		||||
// returns number of necessary buckets
 | 
			
		||||
khuint_t PANDAS_INLINE kh_needed_n_buckets(khuint_t n_elements){
 | 
			
		||||
    khuint_t candidate = n_elements;
 | 
			
		||||
    kroundup32(candidate);
 | 
			
		||||
    khuint_t upper_bound = (khuint_t)(candidate * __ac_HASH_UPPER + 0.5);
 | 
			
		||||
    return (upper_bound < n_elements) ? 2*candidate : candidate;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,112 @@
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2005-2011, NumPy Developers
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifndef NPY_NO_DEPRECATED_API
 | 
			
		||||
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
 | 
			
		||||
#endif  // NPY_NO_DEPRECATED_API
 | 
			
		||||
 | 
			
		||||
#include <numpy/ndarraytypes.h>
 | 
			
		||||
 | 
			
		||||
typedef struct {
 | 
			
		||||
        npy_int64 days;
 | 
			
		||||
        npy_int32 hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds;
 | 
			
		||||
} pandas_timedeltastruct;
 | 
			
		||||
 | 
			
		||||
static const npy_datetimestruct _AS_MIN_DTS = {
 | 
			
		||||
    1969, 12, 31, 23, 59, 50, 776627, 963145, 224193};
 | 
			
		||||
static const npy_datetimestruct _FS_MIN_DTS = {
 | 
			
		||||
    1969, 12, 31, 21, 26, 16, 627963, 145224, 193000};
 | 
			
		||||
static const npy_datetimestruct _PS_MIN_DTS = {
 | 
			
		||||
    1969, 9, 16, 5, 57, 7, 963145, 224193, 0};
 | 
			
		||||
static const npy_datetimestruct _NS_MIN_DTS = {
 | 
			
		||||
    1677, 9, 21, 0, 12, 43, 145224, 193000, 0};
 | 
			
		||||
static const npy_datetimestruct _US_MIN_DTS = {
 | 
			
		||||
    -290308, 12, 21, 19, 59, 05, 224193, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _MS_MIN_DTS = {
 | 
			
		||||
    -292275055, 5, 16, 16, 47, 4, 193000, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _S_MIN_DTS = {
 | 
			
		||||
    -292277022657, 1, 27, 8, 29, 53, 0, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _M_MIN_DTS = {
 | 
			
		||||
    -17536621475646, 5, 4, 5, 53, 0, 0, 0, 0};
 | 
			
		||||
 | 
			
		||||
static const npy_datetimestruct _AS_MAX_DTS = {
 | 
			
		||||
    1970, 1, 1, 0, 0, 9, 223372, 36854, 775807};
 | 
			
		||||
static const npy_datetimestruct _FS_MAX_DTS = {
 | 
			
		||||
    1970, 1, 1, 2, 33, 43, 372036, 854775, 807000};
 | 
			
		||||
static const npy_datetimestruct _PS_MAX_DTS = {
 | 
			
		||||
    1970, 4, 17, 18, 2, 52, 36854, 775807, 0};
 | 
			
		||||
static const npy_datetimestruct _NS_MAX_DTS = {
 | 
			
		||||
    2262, 4, 11, 23, 47, 16, 854775, 807000, 0};
 | 
			
		||||
static const npy_datetimestruct _US_MAX_DTS = {
 | 
			
		||||
    294247, 1, 10, 4, 0, 54, 775807, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _MS_MAX_DTS = {
 | 
			
		||||
    292278994, 8, 17, 7, 12, 55, 807000, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _S_MAX_DTS = {
 | 
			
		||||
    292277026596, 12, 4, 15, 30, 7, 0, 0, 0};
 | 
			
		||||
static const npy_datetimestruct _M_MAX_DTS = {
 | 
			
		||||
    17536621479585, 8, 30, 18, 7, 0, 0, 0, 0};
 | 
			
		||||
 | 
			
		||||
// stuff pandas needs
 | 
			
		||||
// ----------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
PyObject *extract_utc_offset(PyObject *obj);
 | 
			
		||||
 | 
			
		||||
npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base,
 | 
			
		||||
                                            const npy_datetimestruct *dts);
 | 
			
		||||
 | 
			
		||||
void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr,
 | 
			
		||||
                                       npy_datetimestruct *result);
 | 
			
		||||
 | 
			
		||||
void pandas_timedelta_to_timedeltastruct(npy_timedelta val,
 | 
			
		||||
                                         NPY_DATETIMEUNIT fr,
 | 
			
		||||
                                         pandas_timedeltastruct *result);
 | 
			
		||||
 | 
			
		||||
extern const int days_per_month_table[2][12];
 | 
			
		||||
 | 
			
		||||
// stuff numpy-derived code needs in header
 | 
			
		||||
// ----------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
int is_leapyear(npy_int64 year);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Calculates the days offset from the 1970 epoch.
 | 
			
		||||
 */
 | 
			
		||||
npy_int64
 | 
			
		||||
get_datetimestruct_days(const npy_datetimestruct *dts);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Compares two npy_datetimestruct objects chronologically
 | 
			
		||||
 */
 | 
			
		||||
int cmp_npy_datetimestruct(const npy_datetimestruct *a,
 | 
			
		||||
                           const npy_datetimestruct *b);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Adjusts a datetimestruct based on a minutes offset. Assumes
 | 
			
		||||
 * the current values are valid.
 | 
			
		||||
 */
 | 
			
		||||
void
 | 
			
		||||
add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This function returns the DateTimeMetaData
 | 
			
		||||
 * contained within the provided datetime dtype.
 | 
			
		||||
 */
 | 
			
		||||
PyArray_DatetimeMetaData get_datetime_metadata_from_dtype(
 | 
			
		||||
        PyArray_Descr *dtype);
 | 
			
		||||
@@ -0,0 +1,109 @@
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2016, PyData Development Team
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Distributed under the terms of the BSD Simplified License.
 | 
			
		||||
 | 
			
		||||
The full license is in the LICENSE file, distributed with this software.
 | 
			
		||||
 | 
			
		||||
Written by Mark Wiebe (mwwiebe@gmail.com)
 | 
			
		||||
Copyright (c) 2011 by Enthought, Inc.
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2005-2011, NumPy Developers
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
See NUMPY_LICENSE.txt for the license.
 | 
			
		||||
 | 
			
		||||
This file implements string parsing and creation for NumPy datetime.
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifndef NPY_NO_DEPRECATED_API
 | 
			
		||||
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
 | 
			
		||||
#endif  // NPY_NO_DEPRECATED_API
 | 
			
		||||
 | 
			
		||||
/* 'format_requirement' can be one of three values:
 | 
			
		||||
 *      * PARTIAL_MATCH : Only require a partial match with 'format'.
 | 
			
		||||
 *           For example, if the string is '2020-01-01 05:00:00' and
 | 
			
		||||
 *           'format' is '%Y-%m-%d', then parse '2020-01-01';
 | 
			
		||||
 *      * EXACT_MATCH : require an exact match with 'format'. If the
 | 
			
		||||
 *           string is '2020-01-01', then the only format which will
 | 
			
		||||
 *           be able to parse it without error is '%Y-%m-%d';
 | 
			
		||||
 *      * INFER_FORMAT: parse without comparing 'format' (i.e. infer it).
 | 
			
		||||
 */
 | 
			
		||||
typedef enum  {
 | 
			
		||||
    PARTIAL_MATCH,
 | 
			
		||||
    EXACT_MATCH,
 | 
			
		||||
    INFER_FORMAT
 | 
			
		||||
} FormatRequirement;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Parses (almost) standard ISO 8601 date strings. The differences are:
 | 
			
		||||
 *
 | 
			
		||||
 * + The date "20100312" is parsed as the year 20100312, not as
 | 
			
		||||
 *   equivalent to "2010-03-12". The '-' in the dates are not optional.
 | 
			
		||||
 * + Only seconds may have a decimal point, with up to 18 digits after it
 | 
			
		||||
 *   (maximum attoseconds precision).
 | 
			
		||||
 * + Either a 'T' as in ISO 8601 or a ' ' may be used to separate
 | 
			
		||||
 *   the date and the time. Both are treated equivalently.
 | 
			
		||||
 * + Doesn't (yet) handle the "YYYY-DDD" or "YYYY-Www" formats.
 | 
			
		||||
 * + Doesn't handle leap seconds (seconds value has 60 in these cases).
 | 
			
		||||
 * + Doesn't handle 24:00:00 as synonym for midnight (00:00:00) tomorrow
 | 
			
		||||
 * + Accepts special values "NaT" (not a time), "Today", (current
 | 
			
		||||
 *   day according to local time) and "Now" (current time in UTC).
 | 
			
		||||
 *
 | 
			
		||||
 * 'str' must be a NULL-terminated string, and 'len' must be its length.
 | 
			
		||||
 *
 | 
			
		||||
 * 'out' gets filled with the parsed date-time.
 | 
			
		||||
 * 'out_local' gets whether returned value contains timezone. 0 for UTC, 1 for local time.
 | 
			
		||||
 * 'out_tzoffset' gets set to timezone offset by minutes
 | 
			
		||||
 *      if the parsed time was in local time,
 | 
			
		||||
 *      to 0 otherwise. The values 'now' and 'today' don't get counted
 | 
			
		||||
 *      as local, and neither do UTC +/-#### timezone offsets, because
 | 
			
		||||
 *      they aren't using the computer's local timezone offset.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, -1 on failure.
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
parse_iso_8601_datetime(const char *str, int len, int want_exc,
 | 
			
		||||
                        npy_datetimestruct *out,
 | 
			
		||||
                        NPY_DATETIMEUNIT *out_bestunit,
 | 
			
		||||
                        int *out_local,
 | 
			
		||||
                        int *out_tzoffset,
 | 
			
		||||
                        const char* format,
 | 
			
		||||
                        int format_len,
 | 
			
		||||
                        FormatRequirement format_requirement);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Provides a string length to use for converting datetime
 | 
			
		||||
 * objects with the given local and unit settings.
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Converts an npy_datetimestruct to an (almost) ISO 8601
 | 
			
		||||
 * NULL-terminated string using timezone Z (UTC).
 | 
			
		||||
 *
 | 
			
		||||
 * 'base' restricts the output to that unit. Set 'base' to
 | 
			
		||||
 * -1 to auto-detect a base after which all the values are zero.
 | 
			
		||||
 *
 | 
			
		||||
 *  Returns 0 on success, -1 on failure (for example if the output
 | 
			
		||||
 *  string was too short).
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen,
 | 
			
		||||
                       int utc, NPY_DATETIMEUNIT base);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Converts an pandas_timedeltastruct to an ISO 8601 string.
 | 
			
		||||
 *
 | 
			
		||||
 * Mutates outlen to provide size of (non-NULL terminated) string.
 | 
			
		||||
 *
 | 
			
		||||
 * Currently has no error handling
 | 
			
		||||
 */
 | 
			
		||||
int make_iso_8601_timedelta(pandas_timedeltastruct *tds, char *outstr,
 | 
			
		||||
                            size_t *outlen);
 | 
			
		||||
@@ -0,0 +1,314 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are met:
 | 
			
		||||
    * Redistributions of source code must retain the above copyright
 | 
			
		||||
      notice, this list of conditions and the following disclaimer.
 | 
			
		||||
    * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
      notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
      documentation and/or other materials provided with the distribution.
 | 
			
		||||
    * Neither the name of the ESN Social Software AB nor the
 | 
			
		||||
      names of its contributors may be used to endorse or promote products
 | 
			
		||||
      derived from this software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 | 
			
		||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | 
			
		||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | 
			
		||||
DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
 | 
			
		||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | 
			
		||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | 
			
		||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | 
			
		||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | 
			
		||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
 | 
			
		||||
https://github.com/client9/stringencoders
 | 
			
		||||
Copyright (c) 2007  Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Numeric decoder derived from TCL library
 | 
			
		||||
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
 | 
			
		||||
 * Copyright (c) 1988-1993 The Regents of the University of California.
 | 
			
		||||
 * Copyright (c) 1994 Sun Microsystems, Inc.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Ultra fast JSON encoder and decoder
 | 
			
		||||
Developed by Jonas Tarnstrom (jonas@esn.me).
 | 
			
		||||
 | 
			
		||||
Encoder notes:
 | 
			
		||||
------------------
 | 
			
		||||
 | 
			
		||||
:: Cyclic references ::
 | 
			
		||||
Cyclic referenced objects are not detected.
 | 
			
		||||
Set JSONObjectEncoder.recursionMax to suitable value or make sure input object
 | 
			
		||||
tree doesn't have cyclic references.
 | 
			
		||||
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <wchar.h>
 | 
			
		||||
#include "pandas/portable.h"
 | 
			
		||||
 | 
			
		||||
// Don't output any extra whitespaces when encoding
 | 
			
		||||
#define JSON_NO_EXTRA_WHITESPACE
 | 
			
		||||
 | 
			
		||||
// Max decimals to encode double floating point numbers with
 | 
			
		||||
#ifndef JSON_DOUBLE_MAX_DECIMALS
 | 
			
		||||
#define JSON_DOUBLE_MAX_DECIMALS 15
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
// Max recursion depth, default for encoder
 | 
			
		||||
#ifndef JSON_MAX_RECURSION_DEPTH
 | 
			
		||||
#define JSON_MAX_RECURSION_DEPTH 1024
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
// Max recursion depth, default for decoder
 | 
			
		||||
#ifndef JSON_MAX_OBJECT_DEPTH
 | 
			
		||||
#define JSON_MAX_OBJECT_DEPTH 1024
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Dictates and limits how much stack space for buffers UltraJSON will use before resorting to provided heap functions */
 | 
			
		||||
#ifndef JSON_MAX_STACK_BUFFER_SIZE
 | 
			
		||||
#define JSON_MAX_STACK_BUFFER_SIZE 131072
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
 | 
			
		||||
typedef __int64 JSINT64;
 | 
			
		||||
typedef unsigned __int64 JSUINT64;
 | 
			
		||||
 | 
			
		||||
typedef __int32 JSINT32;
 | 
			
		||||
typedef unsigned __int32 JSUINT32;
 | 
			
		||||
typedef unsigned __int8 JSUINT8;
 | 
			
		||||
typedef unsigned __int16 JSUTF16;
 | 
			
		||||
typedef unsigned __int32 JSUTF32;
 | 
			
		||||
typedef __int64 JSLONG;
 | 
			
		||||
 | 
			
		||||
#define EXPORTFUNCTION __declspec(dllexport)
 | 
			
		||||
 | 
			
		||||
#define FASTCALL_MSVC __fastcall
 | 
			
		||||
 | 
			
		||||
#define INLINE_PREFIX static __inline
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
#include <stdint.h>
 | 
			
		||||
typedef int64_t JSINT64;
 | 
			
		||||
typedef uint64_t JSUINT64;
 | 
			
		||||
 | 
			
		||||
typedef int32_t JSINT32;
 | 
			
		||||
typedef uint32_t JSUINT32;
 | 
			
		||||
 | 
			
		||||
#define FASTCALL_MSVC
 | 
			
		||||
 | 
			
		||||
#define INLINE_PREFIX static inline
 | 
			
		||||
 | 
			
		||||
typedef uint8_t JSUINT8;
 | 
			
		||||
typedef uint16_t JSUTF16;
 | 
			
		||||
typedef uint32_t JSUTF32;
 | 
			
		||||
 | 
			
		||||
typedef int64_t JSLONG;
 | 
			
		||||
 | 
			
		||||
#define EXPORTFUNCTION
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if !(defined(__LITTLE_ENDIAN__) || defined(__BIG_ENDIAN__))
 | 
			
		||||
 | 
			
		||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 | 
			
		||||
#define __LITTLE_ENDIAN__
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 | 
			
		||||
#define __BIG_ENDIAN__
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
 | 
			
		||||
#error "Endianness not supported"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
enum JSTYPES {
 | 
			
		||||
  JT_NULL,     // NULL
 | 
			
		||||
  JT_TRUE,     // boolean true
 | 
			
		||||
  JT_FALSE,    // boolean false
 | 
			
		||||
  JT_INT,      // (JSINT32 (signed 32-bit))
 | 
			
		||||
  JT_LONG,     // (JSINT64 (signed 64-bit))
 | 
			
		||||
  JT_DOUBLE,   // (double)
 | 
			
		||||
  JT_BIGNUM,   // integer larger than sys.maxsize
 | 
			
		||||
  JT_UTF8,     // (char 8-bit)
 | 
			
		||||
  JT_ARRAY,    // Array structure
 | 
			
		||||
  JT_OBJECT,   // Key/Value structure
 | 
			
		||||
  JT_INVALID,  // Internal, do not return nor expect
 | 
			
		||||
  JT_POS_INF,  // Positive infinity
 | 
			
		||||
  JT_NEG_INF,  // Negative infinity
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
typedef void * JSOBJ;
 | 
			
		||||
typedef void * JSITER;
 | 
			
		||||
 | 
			
		||||
typedef struct __JSONTypeContext {
 | 
			
		||||
  int type;
 | 
			
		||||
  void *encoder;
 | 
			
		||||
  void *prv;
 | 
			
		||||
} JSONTypeContext;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Function pointer declarations, suitable for implementing UltraJSON */
 | 
			
		||||
typedef void (*JSPFN_ITERBEGIN)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
typedef int (*JSPFN_ITERNEXT)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
typedef void (*JSPFN_ITEREND)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
typedef JSOBJ (*JSPFN_ITERGETVALUE)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
typedef char *(*JSPFN_ITERGETNAME)(JSOBJ obj, JSONTypeContext *tc,
 | 
			
		||||
                                   size_t *outLen);
 | 
			
		||||
typedef void *(*JSPFN_MALLOC)(size_t size);
 | 
			
		||||
typedef void (*JSPFN_FREE)(void *pptr);
 | 
			
		||||
typedef void *(*JSPFN_REALLOC)(void *base, size_t size);
 | 
			
		||||
 | 
			
		||||
typedef struct __JSONObjectEncoder {
 | 
			
		||||
  void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
  void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
  const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc,
 | 
			
		||||
                                size_t *_outLen);
 | 
			
		||||
  JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
  JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
  double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
 | 
			
		||||
  const char *(*getBigNumStringValue)(JSOBJ obj, JSONTypeContext *tc,
 | 
			
		||||
                                size_t *_outLen);
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Begin iteration of an iterable object (JS_ARRAY or JS_OBJECT)
 | 
			
		||||
  Implementor should setup iteration state in ti->prv
 | 
			
		||||
  */
 | 
			
		||||
  JSPFN_ITERBEGIN iterBegin;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items.
 | 
			
		||||
  Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this
 | 
			
		||||
  */
 | 
			
		||||
  JSPFN_ITERNEXT iterNext;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Ends the iteration of an iterable object.
 | 
			
		||||
  Any iteration state stored in ti->prv can be freed here
 | 
			
		||||
  */
 | 
			
		||||
  JSPFN_ITEREND iterEnd;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Returns a reference to the value object of an iterator
 | 
			
		||||
  The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
 | 
			
		||||
  */
 | 
			
		||||
  JSPFN_ITERGETVALUE iterGetValue;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Return name of iterator.
 | 
			
		||||
  The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
 | 
			
		||||
  */
 | 
			
		||||
  JSPFN_ITERGETNAME iterGetName;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Release a value as indicated by setting ti->release = 1 in the previous getValue call.
 | 
			
		||||
  The ti->prv array should contain the necessary context to release the value
 | 
			
		||||
  */
 | 
			
		||||
  void (*releaseObject)(JSOBJ obj);
 | 
			
		||||
 | 
			
		||||
  /* Library functions
 | 
			
		||||
  Set to NULL to use STDLIB malloc,realloc,free */
 | 
			
		||||
  JSPFN_MALLOC malloc;
 | 
			
		||||
  JSPFN_REALLOC realloc;
 | 
			
		||||
  JSPFN_FREE free;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/
 | 
			
		||||
  int recursionMax;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Configuration for max decimals of double floating point numbers to encode (0-9) */
 | 
			
		||||
  int doublePrecision;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */
 | 
			
		||||
  int forceASCII;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  If true, '<', '>', and '&' characters will be encoded as \u003c, \u003e, and \u0026, respectively. If false, no special encoding will be used. */
 | 
			
		||||
  int encodeHTMLChars;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Configuration for spaces of indent */
 | 
			
		||||
  int indent;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Set to an error message if error occurred */
 | 
			
		||||
  const char *errorMsg;
 | 
			
		||||
  JSOBJ errorObj;
 | 
			
		||||
 | 
			
		||||
  /* Buffer stuff */
 | 
			
		||||
  char *start;
 | 
			
		||||
  char *offset;
 | 
			
		||||
  char *end;
 | 
			
		||||
  int heap;
 | 
			
		||||
  int level;
 | 
			
		||||
} JSONObjectEncoder;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Encode an object structure into JSON.
 | 
			
		||||
 | 
			
		||||
Arguments:
 | 
			
		||||
obj - An anonymous type representing the object
 | 
			
		||||
enc - Function definitions for querying JSOBJ type
 | 
			
		||||
buffer - Preallocated buffer to store result in. If NULL function allocates own buffer
 | 
			
		||||
cbBuffer - Length of buffer (ignored if buffer is NULL)
 | 
			
		||||
 | 
			
		||||
Returns:
 | 
			
		||||
Encoded JSON object as a null terminated char string.
 | 
			
		||||
 | 
			
		||||
NOTE:
 | 
			
		||||
If the supplied buffer wasn't enough to hold the result the function will allocate a new buffer.
 | 
			
		||||
Life cycle of the provided buffer must still be handled by caller.
 | 
			
		||||
 | 
			
		||||
If the return value doesn't equal the specified buffer caller must release the memory using
 | 
			
		||||
JSONObjectEncoder.free or free() as specified when calling this function.
 | 
			
		||||
*/
 | 
			
		||||
EXPORTFUNCTION char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc,
 | 
			
		||||
                                       char *buffer, size_t cbBuffer);
 | 
			
		||||
 | 
			
		||||
typedef struct __JSONObjectDecoder {
 | 
			
		||||
  JSOBJ (*newString)(void *prv, wchar_t *start, wchar_t *end);
 | 
			
		||||
  int (*objectAddKey)(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value);
 | 
			
		||||
  int (*arrayAddItem)(void *prv, JSOBJ obj, JSOBJ value);
 | 
			
		||||
  JSOBJ (*newTrue)(void *prv);
 | 
			
		||||
  JSOBJ (*newFalse)(void *prv);
 | 
			
		||||
  JSOBJ (*newNull)(void *prv);
 | 
			
		||||
  JSOBJ (*newPosInf)(void *prv);
 | 
			
		||||
  JSOBJ (*newNegInf)(void *prv);
 | 
			
		||||
  JSOBJ (*newObject)(void *prv, void *decoder);
 | 
			
		||||
  JSOBJ (*endObject)(void *prv, JSOBJ obj);
 | 
			
		||||
  JSOBJ (*newArray)(void *prv, void *decoder);
 | 
			
		||||
  JSOBJ (*endArray)(void *prv, JSOBJ obj);
 | 
			
		||||
  JSOBJ (*newInt)(void *prv, JSINT32 value);
 | 
			
		||||
  JSOBJ (*newLong)(void *prv, JSINT64 value);
 | 
			
		||||
  JSOBJ (*newUnsignedLong)(void *prv, JSUINT64 value);
 | 
			
		||||
  JSOBJ (*newDouble)(void *prv, double value);
 | 
			
		||||
  void (*releaseObject)(void *prv, JSOBJ obj, void *decoder);
 | 
			
		||||
  JSPFN_MALLOC malloc;
 | 
			
		||||
  JSPFN_FREE free;
 | 
			
		||||
  JSPFN_REALLOC realloc;
 | 
			
		||||
  char *errorStr;
 | 
			
		||||
  char *errorOffset;
 | 
			
		||||
  int preciseFloat;
 | 
			
		||||
  void *prv;
 | 
			
		||||
} JSONObjectDecoder;
 | 
			
		||||
 | 
			
		||||
EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec,
 | 
			
		||||
                                       const char *buffer, size_t cbBuffer);
 | 
			
		||||
EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t);
 | 
			
		||||
@@ -0,0 +1,40 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are met:
 | 
			
		||||
    * Redistributions of source code must retain the above copyright
 | 
			
		||||
      notice, this list of conditions and the following disclaimer.
 | 
			
		||||
    * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
      notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
      documentation and/or other materials provided with the distribution.
 | 
			
		||||
    * Neither the name of the ESN Social Software AB nor the
 | 
			
		||||
      names of its contributors may be used to endorse or promote products
 | 
			
		||||
      derived from this software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 | 
			
		||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | 
			
		||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | 
			
		||||
DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
 | 
			
		||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | 
			
		||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 | 
			
		||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 | 
			
		||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | 
			
		||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
 | 
			
		||||
https://github.com/client9/stringencoders
 | 
			
		||||
Copyright (c) 2007  Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Numeric decoder derived from TCL library
 | 
			
		||||
https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
 | 
			
		||||
 * Copyright (c) 1988-1993 The Regents of the University of California.
 | 
			
		||||
 * Copyright (c) 1994 Sun Microsystems, Inc.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#define UJSON_VERSION "1.33"
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										107
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/index.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/index.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,107 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
from pandas import MultiIndex
 | 
			
		||||
from pandas.core.arrays import ExtensionArray
 | 
			
		||||
 | 
			
		||||
multiindex_nulls_shift: int
 | 
			
		||||
 | 
			
		||||
class IndexEngine:
 | 
			
		||||
    over_size_threshold: bool
 | 
			
		||||
    def __init__(self, values: np.ndarray) -> None: ...
 | 
			
		||||
    def __contains__(self, val: object) -> bool: ...
 | 
			
		||||
 | 
			
		||||
    # -> int | slice | np.ndarray[bool]
 | 
			
		||||
    def get_loc(self, val: object) -> int | slice | np.ndarray: ...
 | 
			
		||||
    def sizeof(self, deep: bool = ...) -> int: ...
 | 
			
		||||
    def __sizeof__(self) -> int: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_unique(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_increasing(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_decreasing(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_mapping_populated(self) -> bool: ...
 | 
			
		||||
    def clear_mapping(self): ...
 | 
			
		||||
    def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def get_indexer_non_unique(
 | 
			
		||||
        self,
 | 
			
		||||
        targets: np.ndarray,
 | 
			
		||||
    ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
 | 
			
		||||
class MaskedIndexEngine(IndexEngine):
 | 
			
		||||
    def __init__(self, values: object) -> None: ...
 | 
			
		||||
    def get_indexer_non_unique(
 | 
			
		||||
        self, targets: object
 | 
			
		||||
    ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
 | 
			
		||||
class Float64Engine(IndexEngine): ...
 | 
			
		||||
class Float32Engine(IndexEngine): ...
 | 
			
		||||
class Complex128Engine(IndexEngine): ...
 | 
			
		||||
class Complex64Engine(IndexEngine): ...
 | 
			
		||||
class Int64Engine(IndexEngine): ...
 | 
			
		||||
class Int32Engine(IndexEngine): ...
 | 
			
		||||
class Int16Engine(IndexEngine): ...
 | 
			
		||||
class Int8Engine(IndexEngine): ...
 | 
			
		||||
class UInt64Engine(IndexEngine): ...
 | 
			
		||||
class UInt32Engine(IndexEngine): ...
 | 
			
		||||
class UInt16Engine(IndexEngine): ...
 | 
			
		||||
class UInt8Engine(IndexEngine): ...
 | 
			
		||||
class ObjectEngine(IndexEngine): ...
 | 
			
		||||
class DatetimeEngine(Int64Engine): ...
 | 
			
		||||
class TimedeltaEngine(DatetimeEngine): ...
 | 
			
		||||
class PeriodEngine(Int64Engine): ...
 | 
			
		||||
class BoolEngine(UInt8Engine): ...
 | 
			
		||||
class MaskedFloat64Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedFloat32Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedComplex128Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedComplex64Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedInt64Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedInt32Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedInt16Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedInt8Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedUInt64Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedUInt32Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedUInt16Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedUInt8Engine(MaskedIndexEngine): ...
 | 
			
		||||
class MaskedBoolEngine(MaskedUInt8Engine): ...
 | 
			
		||||
 | 
			
		||||
class BaseMultiIndexCodesEngine:
 | 
			
		||||
    levels: list[np.ndarray]
 | 
			
		||||
    offsets: np.ndarray  # ndarray[uint64_t, ndim=1]
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        levels: list[np.ndarray],  # all entries hashable
 | 
			
		||||
        labels: list[np.ndarray],  # all entries integer-dtyped
 | 
			
		||||
        offsets: np.ndarray,  # np.ndarray[np.uint64, ndim=1]
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ...
 | 
			
		||||
    def get_indexer_with_fill(
 | 
			
		||||
        self,
 | 
			
		||||
        target: np.ndarray,  # np.ndarray[object] of tuples
 | 
			
		||||
        values: np.ndarray,  # np.ndarray[object] of tuples
 | 
			
		||||
        method: str,
 | 
			
		||||
        limit: int | None,
 | 
			
		||||
    ) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
 | 
			
		||||
class ExtensionEngine:
 | 
			
		||||
    def __init__(self, values: ExtensionArray) -> None: ...
 | 
			
		||||
    def __contains__(self, val: object) -> bool: ...
 | 
			
		||||
    def get_loc(self, val: object) -> int | slice | np.ndarray: ...
 | 
			
		||||
    def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def get_indexer_non_unique(
 | 
			
		||||
        self,
 | 
			
		||||
        targets: np.ndarray,
 | 
			
		||||
    ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_unique(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_increasing(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_decreasing(self) -> bool: ...
 | 
			
		||||
    def sizeof(self, deep: bool = ...) -> int: ...
 | 
			
		||||
    def clear_mapping(self): ...
 | 
			
		||||
							
								
								
									
										1308
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/index.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1308
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/index.pyx
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -0,0 +1,78 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for functions of IndexEngine subclasses.
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# IndexEngine Subclass Methods
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# name, dtype
 | 
			
		||||
dtypes = [('Float64', 'float64'),
 | 
			
		||||
          ('Float32', 'float32'),
 | 
			
		||||
          ('Int64', 'int64'),
 | 
			
		||||
          ('Int32', 'int32'),
 | 
			
		||||
          ('Int16', 'int16'),
 | 
			
		||||
          ('Int8', 'int8'),
 | 
			
		||||
          ('UInt64', 'uint64'),
 | 
			
		||||
          ('UInt32', 'uint32'),
 | 
			
		||||
          ('UInt16', 'uint16'),
 | 
			
		||||
          ('UInt8', 'uint8'),
 | 
			
		||||
          ('Complex64', 'complex64'),
 | 
			
		||||
          ('Complex128', 'complex128'),
 | 
			
		||||
          ]
 | 
			
		||||
 | 
			
		||||
engines = [('', 'IndexEngine'), ('Masked', 'MaskedIndexEngine')]
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
{{for name, dtype in dtypes}}
 | 
			
		||||
 | 
			
		||||
{{for prefix, engine in engines}}
 | 
			
		||||
 | 
			
		||||
cdef class {{prefix}}{{name}}Engine({{engine}}):
 | 
			
		||||
 | 
			
		||||
    cdef _make_hash_table(self, Py_ssize_t n):
 | 
			
		||||
    {{if engine == 'MaskedIndexEngine'}}
 | 
			
		||||
        return _hash.{{name}}HashTable(n, uses_mask=True)
 | 
			
		||||
    {{else}}
 | 
			
		||||
        return _hash.{{name}}HashTable(n)
 | 
			
		||||
    {{endif}}
 | 
			
		||||
 | 
			
		||||
    cdef _check_type(self, object val):
 | 
			
		||||
    {{if engine == 'MaskedIndexEngine'}}
 | 
			
		||||
        if val is C_NA:
 | 
			
		||||
            return val
 | 
			
		||||
    {{endif}}
 | 
			
		||||
    {{if name not in {'Float64', 'Float32', 'Complex64', 'Complex128'} }}
 | 
			
		||||
        if not util.is_integer_object(val):
 | 
			
		||||
            if util.is_float_object(val):
 | 
			
		||||
                # Make sure Int64Index.get_loc(2.0) works
 | 
			
		||||
                if val.is_integer():
 | 
			
		||||
                    return int(val)
 | 
			
		||||
            raise KeyError(val)
 | 
			
		||||
        {{if name.startswith("U")}}
 | 
			
		||||
        if val < 0:
 | 
			
		||||
            # cannot have negative values with unsigned int dtype
 | 
			
		||||
            raise KeyError(val)
 | 
			
		||||
        {{endif}}
 | 
			
		||||
    {{elif name not in {'Complex64', 'Complex128'} }}
 | 
			
		||||
        if not util.is_integer_object(val) and not util.is_float_object(val):
 | 
			
		||||
            # in particular catch bool and avoid casting True -> 1.0
 | 
			
		||||
            raise KeyError(val)
 | 
			
		||||
    {{else}}
 | 
			
		||||
        if (not util.is_integer_object(val)
 | 
			
		||||
            and not util.is_float_object(val)
 | 
			
		||||
            and not util.is_complex_object(val)
 | 
			
		||||
        ):
 | 
			
		||||
            # in particular catch bool and avoid casting True -> 1.0
 | 
			
		||||
            raise KeyError(val)
 | 
			
		||||
    {{endif}}
 | 
			
		||||
        return val
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Generic,
 | 
			
		||||
    TypeVar,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas.core.indexing import IndexingMixin
 | 
			
		||||
 | 
			
		||||
_IndexingMixinT = TypeVar("_IndexingMixinT", bound=IndexingMixin)
 | 
			
		||||
 | 
			
		||||
class NDFrameIndexerBase(Generic[_IndexingMixinT]):
 | 
			
		||||
    name: str
 | 
			
		||||
    # in practice obj is either a DataFrame or a Series
 | 
			
		||||
    obj: _IndexingMixinT
 | 
			
		||||
 | 
			
		||||
    def __init__(self, name: str, obj: _IndexingMixinT) -> None: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def ndim(self) -> int: ...
 | 
			
		||||
@@ -0,0 +1,28 @@
 | 
			
		||||
cdef class NDFrameIndexerBase:
 | 
			
		||||
    """
 | 
			
		||||
    A base class for _NDFrameIndexer for fast instantiation and attribute access.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t _ndim
 | 
			
		||||
 | 
			
		||||
    cdef public:
 | 
			
		||||
        str name
 | 
			
		||||
        object obj
 | 
			
		||||
 | 
			
		||||
    def __init__(self, name: str, obj):
 | 
			
		||||
        self.obj = obj
 | 
			
		||||
        self.name = name
 | 
			
		||||
        self._ndim = -1
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def ndim(self) -> int:
 | 
			
		||||
        # Delay `ndim` instantiation until required as reading it
 | 
			
		||||
        # from `obj` isn't entirely cheap.
 | 
			
		||||
        ndim = self._ndim
 | 
			
		||||
        if ndim == -1:
 | 
			
		||||
            ndim = self._ndim = self.obj.ndim
 | 
			
		||||
            if ndim > 2:
 | 
			
		||||
                raise ValueError(  # pragma: no cover
 | 
			
		||||
                    "NDFrameIndexer does not support NDFrame objects with ndim > 2"
 | 
			
		||||
                )
 | 
			
		||||
        return ndim
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										106
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/internals.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										106
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/internals.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,106 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Iterator,
 | 
			
		||||
    Sequence,
 | 
			
		||||
    final,
 | 
			
		||||
    overload,
 | 
			
		||||
)
 | 
			
		||||
import weakref
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    ArrayLike,
 | 
			
		||||
    Self,
 | 
			
		||||
    npt,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from pandas import Index
 | 
			
		||||
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
 | 
			
		||||
from pandas.core.internals.blocks import Block as B
 | 
			
		||||
 | 
			
		||||
def slice_len(slc: slice, objlen: int = ...) -> int: ...
 | 
			
		||||
def get_concat_blkno_indexers(
 | 
			
		||||
    blknos_list: list[npt.NDArray[np.intp]],
 | 
			
		||||
) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ...
 | 
			
		||||
def get_blkno_indexers(
 | 
			
		||||
    blknos: np.ndarray,  # int64_t[:]
 | 
			
		||||
    group: bool = ...,
 | 
			
		||||
) -> list[tuple[int, slice | np.ndarray]]: ...
 | 
			
		||||
def get_blkno_placements(
 | 
			
		||||
    blknos: np.ndarray,
 | 
			
		||||
    group: bool = ...,
 | 
			
		||||
) -> Iterator[tuple[int, BlockPlacement]]: ...
 | 
			
		||||
def update_blklocs_and_blknos(
 | 
			
		||||
    blklocs: npt.NDArray[np.intp],
 | 
			
		||||
    blknos: npt.NDArray[np.intp],
 | 
			
		||||
    loc: int,
 | 
			
		||||
    nblocks: int,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
@final
 | 
			
		||||
class BlockPlacement:
 | 
			
		||||
    def __init__(self, val: int | slice | np.ndarray) -> None: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def indexer(self) -> np.ndarray | slice: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def as_array(self) -> np.ndarray: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def as_slice(self) -> slice: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_slice_like(self) -> bool: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __getitem__(
 | 
			
		||||
        self, loc: slice | Sequence[int] | npt.NDArray[np.intp]
 | 
			
		||||
    ) -> BlockPlacement: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __getitem__(self, loc: int) -> int: ...
 | 
			
		||||
    def __iter__(self) -> Iterator[int]: ...
 | 
			
		||||
    def __len__(self) -> int: ...
 | 
			
		||||
    def delete(self, loc) -> BlockPlacement: ...
 | 
			
		||||
    def add(self, other) -> BlockPlacement: ...
 | 
			
		||||
    def append(self, others: list[BlockPlacement]) -> BlockPlacement: ...
 | 
			
		||||
    def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
 | 
			
		||||
class SharedBlock:
 | 
			
		||||
    _mgr_locs: BlockPlacement
 | 
			
		||||
    ndim: int
 | 
			
		||||
    values: ArrayLike
 | 
			
		||||
    refs: BlockValuesRefs
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        values: ArrayLike,
 | 
			
		||||
        placement: BlockPlacement,
 | 
			
		||||
        ndim: int,
 | 
			
		||||
        refs: BlockValuesRefs | None = ...,
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
 | 
			
		||||
class NumpyBlock(SharedBlock):
 | 
			
		||||
    values: np.ndarray
 | 
			
		||||
    @final
 | 
			
		||||
    def slice_block_rows(self, slicer: slice) -> Self: ...
 | 
			
		||||
 | 
			
		||||
class NDArrayBackedBlock(SharedBlock):
 | 
			
		||||
    values: NDArrayBackedExtensionArray
 | 
			
		||||
    @final
 | 
			
		||||
    def slice_block_rows(self, slicer: slice) -> Self: ...
 | 
			
		||||
 | 
			
		||||
class Block(SharedBlock): ...
 | 
			
		||||
 | 
			
		||||
class BlockManager:
 | 
			
		||||
    blocks: tuple[B, ...]
 | 
			
		||||
    axes: list[Index]
 | 
			
		||||
    _known_consolidated: bool
 | 
			
		||||
    _is_consolidated: bool
 | 
			
		||||
    _blknos: np.ndarray
 | 
			
		||||
    _blklocs: np.ndarray
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=...
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    def get_slice(self, slobj: slice, axis: int = ...) -> Self: ...
 | 
			
		||||
    def _rebuild_blknos_and_blklocs(self) -> None: ...
 | 
			
		||||
 | 
			
		||||
class BlockValuesRefs:
 | 
			
		||||
    referenced_blocks: list[weakref.ref]
 | 
			
		||||
    def __init__(self, blk: SharedBlock | None = ...) -> None: ...
 | 
			
		||||
    def add_reference(self, blk: SharedBlock) -> None: ...
 | 
			
		||||
    def add_index_reference(self, index: Index) -> None: ...
 | 
			
		||||
    def has_reference(self) -> bool: ...
 | 
			
		||||
							
								
								
									
										988
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/internals.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										988
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/internals.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,988 @@
 | 
			
		||||
from collections import defaultdict
 | 
			
		||||
import weakref
 | 
			
		||||
 | 
			
		||||
cimport cython
 | 
			
		||||
from cpython.slice cimport PySlice_GetIndicesEx
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef extern from "Python.h":
 | 
			
		||||
    # TODO(cython3): from cpython.pyport cimport PY_SSIZE_T_MAX
 | 
			
		||||
    Py_ssize_t PY_SSIZE_T_MAX
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    NPY_INTP,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    intp_t,
 | 
			
		||||
    ndarray,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
from pandas._libs.algos import ensure_int64
 | 
			
		||||
 | 
			
		||||
from pandas._libs.arrays cimport NDArrayBacked
 | 
			
		||||
from pandas._libs.util cimport (
 | 
			
		||||
    is_array,
 | 
			
		||||
    is_integer_object,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.final
 | 
			
		||||
@cython.freelist(32)
 | 
			
		||||
cdef class BlockPlacement:
 | 
			
		||||
    cdef:
 | 
			
		||||
        slice _as_slice
 | 
			
		||||
        ndarray _as_array  # Note: this still allows `None`; will be intp_t
 | 
			
		||||
        bint _has_slice, _has_array, _is_known_slice_like
 | 
			
		||||
 | 
			
		||||
    def __cinit__(self, val):
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice slc
 | 
			
		||||
 | 
			
		||||
        self._as_slice = None
 | 
			
		||||
        self._as_array = None
 | 
			
		||||
        self._has_slice = False
 | 
			
		||||
        self._has_array = False
 | 
			
		||||
 | 
			
		||||
        if is_integer_object(val):
 | 
			
		||||
            slc = slice(val, val + 1, 1)
 | 
			
		||||
            self._as_slice = slc
 | 
			
		||||
            self._has_slice = True
 | 
			
		||||
        elif isinstance(val, slice):
 | 
			
		||||
            slc = slice_canonize(val)
 | 
			
		||||
 | 
			
		||||
            if slc.start != slc.stop:
 | 
			
		||||
                self._as_slice = slc
 | 
			
		||||
                self._has_slice = True
 | 
			
		||||
            else:
 | 
			
		||||
                arr = np.empty(0, dtype=np.intp)
 | 
			
		||||
                self._as_array = arr
 | 
			
		||||
                self._has_array = True
 | 
			
		||||
        else:
 | 
			
		||||
            # Cython memoryview interface requires ndarray to be writeable.
 | 
			
		||||
            if (
 | 
			
		||||
                not is_array(val)
 | 
			
		||||
                or not cnp.PyArray_ISWRITEABLE(val)
 | 
			
		||||
                or (<ndarray>val).descr.type_num != cnp.NPY_INTP
 | 
			
		||||
            ):
 | 
			
		||||
                arr = np.require(val, dtype=np.intp, requirements="W")
 | 
			
		||||
            else:
 | 
			
		||||
                arr = val
 | 
			
		||||
            # Caller is responsible for ensuring arr.ndim == 1
 | 
			
		||||
            self._as_array = arr
 | 
			
		||||
            self._has_array = True
 | 
			
		||||
 | 
			
		||||
    def __str__(self) -> str:
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            v = self._as_slice
 | 
			
		||||
        else:
 | 
			
		||||
            v = self._as_array
 | 
			
		||||
 | 
			
		||||
        return f"{type(self).__name__}({v})"
 | 
			
		||||
 | 
			
		||||
    def __repr__(self) -> str:
 | 
			
		||||
        return str(self)
 | 
			
		||||
 | 
			
		||||
    def __len__(self) -> int:
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            return slice_len(s)
 | 
			
		||||
        else:
 | 
			
		||||
            return len(self._as_array)
 | 
			
		||||
 | 
			
		||||
    def __iter__(self):
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
            Py_ssize_t start, stop, step, _
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            start, stop, step, _ = slice_get_indices_ex(s)
 | 
			
		||||
            return iter(range(start, stop, step))
 | 
			
		||||
        else:
 | 
			
		||||
            return iter(self._as_array)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def as_slice(self) -> slice:
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            return s
 | 
			
		||||
        else:
 | 
			
		||||
            raise TypeError("Not slice-like")
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def indexer(self):
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            return s
 | 
			
		||||
        else:
 | 
			
		||||
            return self._as_array
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def as_array(self) -> np.ndarray:
 | 
			
		||||
        cdef:
 | 
			
		||||
            Py_ssize_t start, stop, _
 | 
			
		||||
 | 
			
		||||
        if not self._has_array:
 | 
			
		||||
            start, stop, step, _ = slice_get_indices_ex(self._as_slice)
 | 
			
		||||
            # NOTE: this is the C-optimized equivalent of
 | 
			
		||||
            #  `np.arange(start, stop, step, dtype=np.intp)`
 | 
			
		||||
            self._as_array = cnp.PyArray_Arange(start, stop, step, NPY_INTP)
 | 
			
		||||
            self._has_array = True
 | 
			
		||||
 | 
			
		||||
        return self._as_array
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def is_slice_like(self) -> bool:
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        return s is not None
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, loc):
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            val = slice_getitem(s, loc)
 | 
			
		||||
        else:
 | 
			
		||||
            val = self._as_array[loc]
 | 
			
		||||
 | 
			
		||||
        if not isinstance(val, slice) and val.ndim == 0:
 | 
			
		||||
            return val
 | 
			
		||||
 | 
			
		||||
        return BlockPlacement(val)
 | 
			
		||||
 | 
			
		||||
    def delete(self, loc) -> BlockPlacement:
 | 
			
		||||
        return BlockPlacement(np.delete(self.as_array, loc, axis=0))
 | 
			
		||||
 | 
			
		||||
    def append(self, others) -> BlockPlacement:
 | 
			
		||||
        if not len(others):
 | 
			
		||||
            return self
 | 
			
		||||
 | 
			
		||||
        return BlockPlacement(
 | 
			
		||||
            np.concatenate([self.as_array] + [o.as_array for o in others])
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    cdef BlockPlacement iadd(self, other):
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice s = self._ensure_has_slice()
 | 
			
		||||
            Py_ssize_t other_int, start, stop, step
 | 
			
		||||
 | 
			
		||||
        if is_integer_object(other) and s is not None:
 | 
			
		||||
            other_int = <Py_ssize_t>other
 | 
			
		||||
 | 
			
		||||
            if other_int == 0:
 | 
			
		||||
                # BlockPlacement is treated as immutable
 | 
			
		||||
                return self
 | 
			
		||||
 | 
			
		||||
            start, stop, step, _ = slice_get_indices_ex(s)
 | 
			
		||||
            start += other_int
 | 
			
		||||
            stop += other_int
 | 
			
		||||
 | 
			
		||||
            if (step > 0 and start < 0) or (step < 0 and stop < step):
 | 
			
		||||
                raise ValueError("iadd causes length change")
 | 
			
		||||
 | 
			
		||||
            if stop < 0:
 | 
			
		||||
                val = slice(start, None, step)
 | 
			
		||||
            else:
 | 
			
		||||
                val = slice(start, stop, step)
 | 
			
		||||
 | 
			
		||||
            return BlockPlacement(val)
 | 
			
		||||
        else:
 | 
			
		||||
            newarr = self.as_array + other
 | 
			
		||||
            if (newarr < 0).any():
 | 
			
		||||
                raise ValueError("iadd causes length change")
 | 
			
		||||
 | 
			
		||||
            val = newarr
 | 
			
		||||
            return BlockPlacement(val)
 | 
			
		||||
 | 
			
		||||
    def add(self, other) -> BlockPlacement:
 | 
			
		||||
        # We can get here with int or ndarray
 | 
			
		||||
        return self.iadd(other)
 | 
			
		||||
 | 
			
		||||
    cdef slice _ensure_has_slice(self):
 | 
			
		||||
        if not self._has_slice:
 | 
			
		||||
            self._as_slice = indexer_as_slice(self._as_array)
 | 
			
		||||
            self._has_slice = True
 | 
			
		||||
 | 
			
		||||
        return self._as_slice
 | 
			
		||||
 | 
			
		||||
    cpdef BlockPlacement increment_above(self, Py_ssize_t loc):
 | 
			
		||||
        """
 | 
			
		||||
        Increment any entries of 'loc' or above by one.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice nv, s = self._ensure_has_slice()
 | 
			
		||||
            Py_ssize_t start, stop, step
 | 
			
		||||
            ndarray[intp_t, ndim=1] newarr
 | 
			
		||||
 | 
			
		||||
        if s is not None:
 | 
			
		||||
            # see if we are either all-above or all-below, each of which
 | 
			
		||||
            #  have fastpaths available.
 | 
			
		||||
 | 
			
		||||
            start, stop, step, _ = slice_get_indices_ex(s)
 | 
			
		||||
 | 
			
		||||
            if start < loc and stop <= loc:
 | 
			
		||||
                # We are entirely below, nothing to increment
 | 
			
		||||
                return self
 | 
			
		||||
 | 
			
		||||
            if start >= loc and stop >= loc:
 | 
			
		||||
                # We are entirely above, we can efficiently increment out slice
 | 
			
		||||
                nv = slice(start + 1, stop + 1, step)
 | 
			
		||||
                return BlockPlacement(nv)
 | 
			
		||||
 | 
			
		||||
        if loc == 0:
 | 
			
		||||
            # fastpath where we know everything is >= 0
 | 
			
		||||
            newarr = self.as_array + 1
 | 
			
		||||
            return BlockPlacement(newarr)
 | 
			
		||||
 | 
			
		||||
        newarr = self.as_array.copy()
 | 
			
		||||
        newarr[newarr >= loc] += 1
 | 
			
		||||
        return BlockPlacement(newarr)
 | 
			
		||||
 | 
			
		||||
    def tile_for_unstack(self, factor: int) -> np.ndarray:
 | 
			
		||||
        """
 | 
			
		||||
        Find the new mgr_locs for the un-stacked version of a Block.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            slice slc = self._ensure_has_slice()
 | 
			
		||||
            ndarray[intp_t, ndim=1] new_placement
 | 
			
		||||
 | 
			
		||||
        if slc is not None and slc.step == 1:
 | 
			
		||||
            new_slc = slice(slc.start * factor, slc.stop * factor, 1)
 | 
			
		||||
            # equiv: np.arange(new_slc.start, new_slc.stop, dtype=np.intp)
 | 
			
		||||
            new_placement = cnp.PyArray_Arange(new_slc.start, new_slc.stop, 1, NPY_INTP)
 | 
			
		||||
        else:
 | 
			
		||||
            # Note: test_pivot_table_empty_aggfunc gets here with `slc is not None`
 | 
			
		||||
            mapped = [
 | 
			
		||||
                # equiv: np.arange(x * factor, (x + 1) * factor, dtype=np.intp)
 | 
			
		||||
                cnp.PyArray_Arange(x * factor, (x + 1) * factor, 1, NPY_INTP)
 | 
			
		||||
                for x in self
 | 
			
		||||
            ]
 | 
			
		||||
            new_placement = np.concatenate(mapped)
 | 
			
		||||
        return new_placement
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef slice slice_canonize(slice s):
 | 
			
		||||
    """
 | 
			
		||||
    Convert slice to canonical bounded form.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t start = 0, stop = 0, step = 1
 | 
			
		||||
 | 
			
		||||
    if s.step is None:
 | 
			
		||||
        step = 1
 | 
			
		||||
    else:
 | 
			
		||||
        step = <Py_ssize_t>s.step
 | 
			
		||||
        if step == 0:
 | 
			
		||||
            raise ValueError("slice step cannot be zero")
 | 
			
		||||
 | 
			
		||||
    if step > 0:
 | 
			
		||||
        if s.stop is None:
 | 
			
		||||
            raise ValueError("unbounded slice")
 | 
			
		||||
 | 
			
		||||
        stop = <Py_ssize_t>s.stop
 | 
			
		||||
        if s.start is None:
 | 
			
		||||
            start = 0
 | 
			
		||||
        else:
 | 
			
		||||
            start = <Py_ssize_t>s.start
 | 
			
		||||
            if start > stop:
 | 
			
		||||
                start = stop
 | 
			
		||||
    elif step < 0:
 | 
			
		||||
        if s.start is None:
 | 
			
		||||
            raise ValueError("unbounded slice")
 | 
			
		||||
 | 
			
		||||
        start = <Py_ssize_t>s.start
 | 
			
		||||
        if s.stop is None:
 | 
			
		||||
            stop = -1
 | 
			
		||||
        else:
 | 
			
		||||
            stop = <Py_ssize_t>s.stop
 | 
			
		||||
            if stop > start:
 | 
			
		||||
                stop = start
 | 
			
		||||
 | 
			
		||||
    if start < 0 or (stop < 0 and s.stop is not None and step > 0):
 | 
			
		||||
        raise ValueError("unbounded slice")
 | 
			
		||||
 | 
			
		||||
    if stop < 0:
 | 
			
		||||
        return slice(start, None, step)
 | 
			
		||||
    else:
 | 
			
		||||
        return slice(start, stop, step)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef Py_ssize_t slice_len(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -1:
 | 
			
		||||
    """
 | 
			
		||||
    Get length of a bounded slice.
 | 
			
		||||
 | 
			
		||||
    The slice must not have any "open" bounds that would create dependency on
 | 
			
		||||
    container size, i.e.:
 | 
			
		||||
    - if ``s.step is None or s.step > 0``, ``s.stop`` is not ``None``
 | 
			
		||||
    - if ``s.step < 0``, ``s.start`` is not ``None``
 | 
			
		||||
 | 
			
		||||
    Otherwise, the result is unreliable.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t start, stop, step, length
 | 
			
		||||
 | 
			
		||||
    if slc is None:
 | 
			
		||||
        raise TypeError("slc must be slice")  # pragma: no cover
 | 
			
		||||
 | 
			
		||||
    PySlice_GetIndicesEx(slc, objlen, &start, &stop, &step, &length)
 | 
			
		||||
 | 
			
		||||
    return length
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef (Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t) slice_get_indices_ex(
 | 
			
		||||
    slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    Get (start, stop, step, length) tuple for a slice.
 | 
			
		||||
 | 
			
		||||
    If `objlen` is not specified, slice must be bounded, otherwise the result
 | 
			
		||||
    will be wrong.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t start, stop, step, length
 | 
			
		||||
 | 
			
		||||
    if slc is None:
 | 
			
		||||
        raise TypeError("slc should be a slice")  # pragma: no cover
 | 
			
		||||
 | 
			
		||||
    PySlice_GetIndicesEx(slc, objlen, &start, &stop, &step, &length)
 | 
			
		||||
 | 
			
		||||
    return start, stop, step, length
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef slice_getitem(slice slc, ind):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t s_start, s_stop, s_step, s_len
 | 
			
		||||
        Py_ssize_t ind_start, ind_stop, ind_step, ind_len
 | 
			
		||||
 | 
			
		||||
    s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc)
 | 
			
		||||
 | 
			
		||||
    if isinstance(ind, slice):
 | 
			
		||||
        ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, s_len)
 | 
			
		||||
 | 
			
		||||
        if ind_step > 0 and ind_len == s_len:
 | 
			
		||||
            # short-cut for no-op slice
 | 
			
		||||
            if ind_len == s_len:
 | 
			
		||||
                return slc
 | 
			
		||||
 | 
			
		||||
        if ind_step < 0:
 | 
			
		||||
            s_start = s_stop - s_step
 | 
			
		||||
            ind_step = -ind_step
 | 
			
		||||
 | 
			
		||||
        s_step *= ind_step
 | 
			
		||||
        s_stop = s_start + ind_stop * s_step
 | 
			
		||||
        s_start = s_start + ind_start * s_step
 | 
			
		||||
 | 
			
		||||
        if s_step < 0 and s_stop < 0:
 | 
			
		||||
            return slice(s_start, None, s_step)
 | 
			
		||||
        else:
 | 
			
		||||
            return slice(s_start, s_stop, s_step)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        # NOTE:
 | 
			
		||||
        # this is the C-optimized equivalent of
 | 
			
		||||
        # `np.arange(s_start, s_stop, s_step, dtype=np.intp)[ind]`
 | 
			
		||||
        return cnp.PyArray_Arange(s_start, s_stop, s_step, NPY_INTP)[ind]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
cdef slice indexer_as_slice(intp_t[:] vals):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n, start, stop
 | 
			
		||||
        int64_t d
 | 
			
		||||
 | 
			
		||||
    if vals is None:
 | 
			
		||||
        raise TypeError("vals must be ndarray")  # pragma: no cover
 | 
			
		||||
 | 
			
		||||
    n = vals.shape[0]
 | 
			
		||||
 | 
			
		||||
    if n == 0 or vals[0] < 0:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    if n == 1:
 | 
			
		||||
        return slice(vals[0], vals[0] + 1, 1)
 | 
			
		||||
 | 
			
		||||
    if vals[1] < 0:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    # n > 2
 | 
			
		||||
    d = vals[1] - vals[0]
 | 
			
		||||
 | 
			
		||||
    if d == 0:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    for i in range(2, n):
 | 
			
		||||
        if vals[i] < 0 or vals[i] - vals[i - 1] != d:
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
    start = vals[0]
 | 
			
		||||
    stop = start + n * d
 | 
			
		||||
    if stop < 0 and d < 0:
 | 
			
		||||
        return slice(start, None, d)
 | 
			
		||||
    else:
 | 
			
		||||
        return slice(start, stop, d)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
def get_concat_blkno_indexers(list blknos_list not None):
 | 
			
		||||
    """
 | 
			
		||||
    Given the mgr.blknos for a list of mgrs, break range(len(mgrs[0])) into
 | 
			
		||||
    slices such that within each slice blknos_list[i] is constant for each i.
 | 
			
		||||
 | 
			
		||||
    This is a multi-Manager analogue to get_blkno_indexers with group=False.
 | 
			
		||||
    """
 | 
			
		||||
    # we have the blknos for each of several BlockManagers
 | 
			
		||||
    # list[np.ndarray[int64_t]]
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, start, ncols
 | 
			
		||||
        cnp.npy_intp n_mgrs
 | 
			
		||||
        ndarray[intp_t] blknos, cur_blknos, run_blknos
 | 
			
		||||
        BlockPlacement bp
 | 
			
		||||
        list result = []
 | 
			
		||||
 | 
			
		||||
    n_mgrs = len(blknos_list)
 | 
			
		||||
    cur_blknos = cnp.PyArray_EMPTY(1, &n_mgrs, cnp.NPY_INTP, 0)
 | 
			
		||||
 | 
			
		||||
    blknos = blknos_list[0]
 | 
			
		||||
    ncols = len(blknos)
 | 
			
		||||
    if ncols == 0:
 | 
			
		||||
        return []
 | 
			
		||||
 | 
			
		||||
    start = 0
 | 
			
		||||
    for i in range(n_mgrs):
 | 
			
		||||
        blknos = blknos_list[i]
 | 
			
		||||
        cur_blknos[i] = blknos[0]
 | 
			
		||||
        assert len(blknos) == ncols
 | 
			
		||||
 | 
			
		||||
    for i in range(1, ncols):
 | 
			
		||||
        # For each column, we check if the Block it is part of (i.e. blknos[i])
 | 
			
		||||
        #  is the same the previous column (i.e. blknos[i-1]) *and* if this is
 | 
			
		||||
        #  the case for each blknos in blknos_list.  If not, we start a new "run".
 | 
			
		||||
        for k in range(n_mgrs):
 | 
			
		||||
            blknos = blknos_list[k]
 | 
			
		||||
            # assert cur_blknos[k] == blknos[i - 1]
 | 
			
		||||
 | 
			
		||||
            if blknos[i] != blknos[i - 1]:
 | 
			
		||||
                bp = BlockPlacement(slice(start, i))
 | 
			
		||||
                run_blknos = cnp.PyArray_Copy(cur_blknos)
 | 
			
		||||
                result.append((run_blknos, bp))
 | 
			
		||||
 | 
			
		||||
                start = i
 | 
			
		||||
                for j in range(n_mgrs):
 | 
			
		||||
                    blknos = blknos_list[j]
 | 
			
		||||
                    cur_blknos[j] = blknos[i]
 | 
			
		||||
                break  # break out of `for k in range(n_mgrs)` loop
 | 
			
		||||
 | 
			
		||||
    if start != ncols:
 | 
			
		||||
        bp = BlockPlacement(slice(start, ncols))
 | 
			
		||||
        run_blknos = cnp.PyArray_Copy(cur_blknos)
 | 
			
		||||
        result.append((run_blknos, bp))
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
def get_blkno_indexers(
 | 
			
		||||
    int64_t[:] blknos, bint group=True
 | 
			
		||||
) -> list[tuple[int, slice | np.ndarray]]:
 | 
			
		||||
    """
 | 
			
		||||
    Enumerate contiguous runs of integers in ndarray.
 | 
			
		||||
 | 
			
		||||
    Iterate over elements of `blknos` yielding ``(blkno, slice(start, stop))``
 | 
			
		||||
    pairs for each contiguous run found.
 | 
			
		||||
 | 
			
		||||
    If `group` is True and there is more than one run for a certain blkno,
 | 
			
		||||
    ``(blkno, array)`` with an array containing positions of all elements equal
 | 
			
		||||
    to blkno.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    list[tuple[int, slice | np.ndarray]]
 | 
			
		||||
    """
 | 
			
		||||
    # There's blkno in this function's name because it's used in block &
 | 
			
		||||
    # blockno handling.
 | 
			
		||||
    cdef:
 | 
			
		||||
        int64_t cur_blkno
 | 
			
		||||
        Py_ssize_t i, start, stop, n, diff
 | 
			
		||||
        cnp.npy_intp tot_len
 | 
			
		||||
        int64_t blkno
 | 
			
		||||
        object group_dict = defaultdict(list)
 | 
			
		||||
        ndarray[int64_t, ndim=1] arr
 | 
			
		||||
 | 
			
		||||
    n = blknos.shape[0]
 | 
			
		||||
    result = list()
 | 
			
		||||
 | 
			
		||||
    if n == 0:
 | 
			
		||||
        return result
 | 
			
		||||
 | 
			
		||||
    start = 0
 | 
			
		||||
    cur_blkno = blknos[start]
 | 
			
		||||
 | 
			
		||||
    if group is False:
 | 
			
		||||
        for i in range(1, n):
 | 
			
		||||
            if blknos[i] != cur_blkno:
 | 
			
		||||
                result.append((cur_blkno, slice(start, i)))
 | 
			
		||||
 | 
			
		||||
                start = i
 | 
			
		||||
                cur_blkno = blknos[i]
 | 
			
		||||
 | 
			
		||||
        result.append((cur_blkno, slice(start, n)))
 | 
			
		||||
    else:
 | 
			
		||||
        for i in range(1, n):
 | 
			
		||||
            if blknos[i] != cur_blkno:
 | 
			
		||||
                group_dict[cur_blkno].append((start, i))
 | 
			
		||||
 | 
			
		||||
                start = i
 | 
			
		||||
                cur_blkno = blknos[i]
 | 
			
		||||
 | 
			
		||||
        group_dict[cur_blkno].append((start, n))
 | 
			
		||||
 | 
			
		||||
        for blkno, slices in group_dict.items():
 | 
			
		||||
            if len(slices) == 1:
 | 
			
		||||
                result.append((blkno, slice(slices[0][0], slices[0][1])))
 | 
			
		||||
            else:
 | 
			
		||||
                tot_len = sum(stop - start for start, stop in slices)
 | 
			
		||||
                # equiv np.empty(tot_len, dtype=np.int64)
 | 
			
		||||
                arr = cnp.PyArray_EMPTY(1, &tot_len, cnp.NPY_INT64, 0)
 | 
			
		||||
 | 
			
		||||
                i = 0
 | 
			
		||||
                for start, stop in slices:
 | 
			
		||||
                    for diff in range(start, stop):
 | 
			
		||||
                        arr[i] = diff
 | 
			
		||||
                        i += 1
 | 
			
		||||
 | 
			
		||||
                result.append((blkno, arr))
 | 
			
		||||
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_blkno_placements(blknos, group: bool = True):
 | 
			
		||||
    """
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    blknos : np.ndarray[int64]
 | 
			
		||||
    group : bool, default True
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    iterator
 | 
			
		||||
        yield (blkno, BlockPlacement)
 | 
			
		||||
    """
 | 
			
		||||
    blknos = ensure_int64(blknos)
 | 
			
		||||
 | 
			
		||||
    for blkno, indexer in get_blkno_indexers(blknos, group):
 | 
			
		||||
        yield blkno, BlockPlacement(indexer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
cpdef update_blklocs_and_blknos(
 | 
			
		||||
    ndarray[intp_t, ndim=1] blklocs,
 | 
			
		||||
    ndarray[intp_t, ndim=1] blknos,
 | 
			
		||||
    Py_ssize_t loc,
 | 
			
		||||
    intp_t nblocks,
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    Update blklocs and blknos when a new column is inserted at 'loc'.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i
 | 
			
		||||
        cnp.npy_intp length = blklocs.shape[0] + 1
 | 
			
		||||
        ndarray[intp_t, ndim=1] new_blklocs, new_blknos
 | 
			
		||||
 | 
			
		||||
    # equiv: new_blklocs = np.empty(length, dtype=np.intp)
 | 
			
		||||
    new_blklocs = cnp.PyArray_EMPTY(1, &length, cnp.NPY_INTP, 0)
 | 
			
		||||
    new_blknos = cnp.PyArray_EMPTY(1, &length, cnp.NPY_INTP, 0)
 | 
			
		||||
 | 
			
		||||
    for i in range(loc):
 | 
			
		||||
        new_blklocs[i] = blklocs[i]
 | 
			
		||||
        new_blknos[i] = blknos[i]
 | 
			
		||||
 | 
			
		||||
    new_blklocs[loc] = 0
 | 
			
		||||
    new_blknos[loc] = nblocks
 | 
			
		||||
 | 
			
		||||
    for i in range(loc, length - 1):
 | 
			
		||||
        new_blklocs[i + 1] = blklocs[i]
 | 
			
		||||
        new_blknos[i + 1] = blknos[i]
 | 
			
		||||
 | 
			
		||||
    return new_blklocs, new_blknos
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _unpickle_block(values, placement, ndim):
 | 
			
		||||
    # We have to do some gymnastics b/c "ndim" is keyword-only
 | 
			
		||||
 | 
			
		||||
    from pandas.core.internals.blocks import (
 | 
			
		||||
        maybe_coerce_values,
 | 
			
		||||
        new_block,
 | 
			
		||||
    )
 | 
			
		||||
    values = maybe_coerce_values(values)
 | 
			
		||||
 | 
			
		||||
    if not isinstance(placement, BlockPlacement):
 | 
			
		||||
        placement = BlockPlacement(placement)
 | 
			
		||||
    return new_block(values, placement, ndim=ndim)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.freelist(64)
 | 
			
		||||
cdef class SharedBlock:
 | 
			
		||||
    """
 | 
			
		||||
    Defining __init__ in a cython class significantly improves performance.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        public BlockPlacement _mgr_locs
 | 
			
		||||
        public BlockValuesRefs refs
 | 
			
		||||
        readonly int ndim
 | 
			
		||||
 | 
			
		||||
    def __cinit__(
 | 
			
		||||
        self,
 | 
			
		||||
        values,
 | 
			
		||||
        placement: BlockPlacement,
 | 
			
		||||
        ndim: int,
 | 
			
		||||
        refs: BlockValuesRefs | None = None,
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        values : np.ndarray or ExtensionArray
 | 
			
		||||
            We assume maybe_coerce_values has already been called.
 | 
			
		||||
        placement : BlockPlacement
 | 
			
		||||
        ndim : int
 | 
			
		||||
            1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame
 | 
			
		||||
        refs: BlockValuesRefs, optional
 | 
			
		||||
            Ref tracking object or None if block does not have any refs.
 | 
			
		||||
        """
 | 
			
		||||
        self._mgr_locs = placement
 | 
			
		||||
        self.ndim = ndim
 | 
			
		||||
        if refs is None:
 | 
			
		||||
            # if no refs are passed, that means we are creating a Block from
 | 
			
		||||
            # new values that it uniquely owns -> start a new BlockValuesRefs
 | 
			
		||||
            # object that only references this block
 | 
			
		||||
            self.refs = BlockValuesRefs(self)
 | 
			
		||||
        else:
 | 
			
		||||
            # if refs are passed, this is the BlockValuesRefs object that is shared
 | 
			
		||||
            # with the parent blocks which share the values, and a reference to this
 | 
			
		||||
            # new block is added
 | 
			
		||||
            refs.add_reference(self)
 | 
			
		||||
            self.refs = refs
 | 
			
		||||
 | 
			
		||||
    cpdef __reduce__(self):
 | 
			
		||||
        args = (self.values, self.mgr_locs.indexer, self.ndim)
 | 
			
		||||
        return _unpickle_block, args
 | 
			
		||||
 | 
			
		||||
    cpdef __setstate__(self, state):
 | 
			
		||||
        from pandas.core.construction import extract_array
 | 
			
		||||
 | 
			
		||||
        self.mgr_locs = BlockPlacement(state[0])
 | 
			
		||||
        self.values = extract_array(state[1], extract_numpy=True)
 | 
			
		||||
        if len(state) > 2:
 | 
			
		||||
            # we stored ndim
 | 
			
		||||
            self.ndim = state[2]
 | 
			
		||||
        else:
 | 
			
		||||
            # older pickle
 | 
			
		||||
            from pandas.core.internals.api import maybe_infer_ndim
 | 
			
		||||
 | 
			
		||||
            ndim = maybe_infer_ndim(self.values, self.mgr_locs)
 | 
			
		||||
            self.ndim = ndim
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class NumpyBlock(SharedBlock):
 | 
			
		||||
    cdef:
 | 
			
		||||
        public ndarray values
 | 
			
		||||
 | 
			
		||||
    def __cinit__(
 | 
			
		||||
        self,
 | 
			
		||||
        ndarray values,
 | 
			
		||||
        BlockPlacement placement,
 | 
			
		||||
        int ndim,
 | 
			
		||||
        refs: BlockValuesRefs | None = None,
 | 
			
		||||
    ):
 | 
			
		||||
        # set values here; the (implicit) call to SharedBlock.__cinit__ will
 | 
			
		||||
        #  set placement, ndim and refs
 | 
			
		||||
        self.values = values
 | 
			
		||||
 | 
			
		||||
    cpdef NumpyBlock slice_block_rows(self, slice slicer):
 | 
			
		||||
        """
 | 
			
		||||
        Perform __getitem__-like specialized to slicing along index.
 | 
			
		||||
 | 
			
		||||
        Assumes self.ndim == 2
 | 
			
		||||
        """
 | 
			
		||||
        new_values = self.values[..., slicer]
 | 
			
		||||
        return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class NDArrayBackedBlock(SharedBlock):
 | 
			
		||||
    """
 | 
			
		||||
    Block backed by NDArrayBackedExtensionArray
 | 
			
		||||
    """
 | 
			
		||||
    cdef public:
 | 
			
		||||
        NDArrayBacked values
 | 
			
		||||
 | 
			
		||||
    def __cinit__(
 | 
			
		||||
        self,
 | 
			
		||||
        NDArrayBacked values,
 | 
			
		||||
        BlockPlacement placement,
 | 
			
		||||
        int ndim,
 | 
			
		||||
        refs: BlockValuesRefs | None = None,
 | 
			
		||||
    ):
 | 
			
		||||
        # set values here; the (implicit) call to SharedBlock.__cinit__ will
 | 
			
		||||
        #  set placement, ndim and refs
 | 
			
		||||
        self.values = values
 | 
			
		||||
 | 
			
		||||
    cpdef NDArrayBackedBlock slice_block_rows(self, slice slicer):
 | 
			
		||||
        """
 | 
			
		||||
        Perform __getitem__-like specialized to slicing along index.
 | 
			
		||||
 | 
			
		||||
        Assumes self.ndim == 2
 | 
			
		||||
        """
 | 
			
		||||
        new_values = self.values[..., slicer]
 | 
			
		||||
        return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class Block(SharedBlock):
 | 
			
		||||
    cdef:
 | 
			
		||||
        public object values
 | 
			
		||||
 | 
			
		||||
    def __cinit__(
 | 
			
		||||
        self,
 | 
			
		||||
        object values,
 | 
			
		||||
        BlockPlacement placement,
 | 
			
		||||
        int ndim,
 | 
			
		||||
        refs: BlockValuesRefs | None = None,
 | 
			
		||||
    ):
 | 
			
		||||
        # set values here; the (implicit) call to SharedBlock.__cinit__ will
 | 
			
		||||
        #  set placement, ndim and refs
 | 
			
		||||
        self.values = values
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.freelist(64)
 | 
			
		||||
cdef class BlockManager:
 | 
			
		||||
    cdef:
 | 
			
		||||
        public tuple blocks
 | 
			
		||||
        public list axes
 | 
			
		||||
        public bint _known_consolidated, _is_consolidated
 | 
			
		||||
        public ndarray _blknos, _blklocs
 | 
			
		||||
 | 
			
		||||
    def __cinit__(
 | 
			
		||||
        self,
 | 
			
		||||
        blocks=None,
 | 
			
		||||
        axes=None,
 | 
			
		||||
        verify_integrity=True,
 | 
			
		||||
    ):
 | 
			
		||||
        # None as defaults for unpickling GH#42345
 | 
			
		||||
        if blocks is None:
 | 
			
		||||
            # This adds 1-2 microseconds to DataFrame(np.array([]))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        if isinstance(blocks, list):
 | 
			
		||||
            # Backward compat for e.g. pyarrow
 | 
			
		||||
            blocks = tuple(blocks)
 | 
			
		||||
 | 
			
		||||
        self.blocks = blocks
 | 
			
		||||
        self.axes = axes.copy()  # copy to make sure we are not remotely-mutable
 | 
			
		||||
 | 
			
		||||
        # Populate known_consolidate, blknos, and blklocs lazily
 | 
			
		||||
        self._known_consolidated = False
 | 
			
		||||
        self._is_consolidated = False
 | 
			
		||||
        self._blknos = None
 | 
			
		||||
        self._blklocs = None
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------
 | 
			
		||||
    # Block Placement
 | 
			
		||||
 | 
			
		||||
    def _rebuild_blknos_and_blklocs(self) -> None:
 | 
			
		||||
        """
 | 
			
		||||
        Update mgr._blknos / mgr._blklocs.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            intp_t blkno, i, j
 | 
			
		||||
            cnp.npy_intp length = self.shape[0]
 | 
			
		||||
            SharedBlock blk
 | 
			
		||||
            BlockPlacement bp
 | 
			
		||||
            ndarray[intp_t, ndim=1] new_blknos, new_blklocs
 | 
			
		||||
 | 
			
		||||
        # equiv: np.empty(length, dtype=np.intp)
 | 
			
		||||
        new_blknos = cnp.PyArray_EMPTY(1, &length, cnp.NPY_INTP, 0)
 | 
			
		||||
        new_blklocs = cnp.PyArray_EMPTY(1, &length, cnp.NPY_INTP, 0)
 | 
			
		||||
        # equiv: new_blknos.fill(-1)
 | 
			
		||||
        cnp.PyArray_FILLWBYTE(new_blknos, -1)
 | 
			
		||||
        cnp.PyArray_FILLWBYTE(new_blklocs, -1)
 | 
			
		||||
 | 
			
		||||
        for blkno, blk in enumerate(self.blocks):
 | 
			
		||||
            bp = blk._mgr_locs
 | 
			
		||||
            # Iterating over `bp` is a faster equivalent to
 | 
			
		||||
            #  new_blknos[bp.indexer] = blkno
 | 
			
		||||
            #  new_blklocs[bp.indexer] = np.arange(len(bp))
 | 
			
		||||
            for i, j in enumerate(bp):
 | 
			
		||||
                new_blknos[j] = blkno
 | 
			
		||||
                new_blklocs[j] = i
 | 
			
		||||
 | 
			
		||||
        for i in range(length):
 | 
			
		||||
            # faster than `for blkno in new_blknos`
 | 
			
		||||
            #  https://github.com/cython/cython/issues/4393
 | 
			
		||||
            blkno = new_blknos[i]
 | 
			
		||||
 | 
			
		||||
            # If there are any -1s remaining, this indicates that our mgr_locs
 | 
			
		||||
            #  are invalid.
 | 
			
		||||
            if blkno == -1:
 | 
			
		||||
                raise AssertionError("Gaps in blk ref_locs")
 | 
			
		||||
 | 
			
		||||
        self._blknos = new_blknos
 | 
			
		||||
        self._blklocs = new_blklocs
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------
 | 
			
		||||
    # Pickle
 | 
			
		||||
 | 
			
		||||
    cpdef __reduce__(self):
 | 
			
		||||
        if len(self.axes) == 1:
 | 
			
		||||
            # SingleBlockManager, __init__ expects Block, axis
 | 
			
		||||
            args = (self.blocks[0], self.axes[0])
 | 
			
		||||
        else:
 | 
			
		||||
            args = (self.blocks, self.axes)
 | 
			
		||||
        return type(self), args
 | 
			
		||||
 | 
			
		||||
    cpdef __setstate__(self, state):
 | 
			
		||||
        from pandas.core.construction import extract_array
 | 
			
		||||
        from pandas.core.internals.blocks import (
 | 
			
		||||
            ensure_block_shape,
 | 
			
		||||
            maybe_coerce_values,
 | 
			
		||||
            new_block,
 | 
			
		||||
        )
 | 
			
		||||
        from pandas.core.internals.managers import ensure_index
 | 
			
		||||
 | 
			
		||||
        if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
 | 
			
		||||
            state = state[3]["0.14.1"]
 | 
			
		||||
            axes = [ensure_index(ax) for ax in state["axes"]]
 | 
			
		||||
            ndim = len(axes)
 | 
			
		||||
 | 
			
		||||
            for blk in state["blocks"]:
 | 
			
		||||
                vals = blk["values"]
 | 
			
		||||
                # older versions may hold e.g. DatetimeIndex instead of DTA
 | 
			
		||||
                vals = extract_array(vals, extract_numpy=True)
 | 
			
		||||
                blk["values"] = maybe_coerce_values(ensure_block_shape(vals, ndim=ndim))
 | 
			
		||||
 | 
			
		||||
                if not isinstance(blk["mgr_locs"], BlockPlacement):
 | 
			
		||||
                    blk["mgr_locs"] = BlockPlacement(blk["mgr_locs"])
 | 
			
		||||
 | 
			
		||||
            nbs = [
 | 
			
		||||
                new_block(blk["values"], blk["mgr_locs"], ndim=ndim)
 | 
			
		||||
                for blk in state["blocks"]
 | 
			
		||||
            ]
 | 
			
		||||
            blocks = tuple(nbs)
 | 
			
		||||
            self.blocks = blocks
 | 
			
		||||
            self.axes = axes
 | 
			
		||||
 | 
			
		||||
        else:  # pragma: no cover
 | 
			
		||||
            raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
 | 
			
		||||
 | 
			
		||||
        self._post_setstate()
 | 
			
		||||
 | 
			
		||||
    def _post_setstate(self) -> None:
 | 
			
		||||
        self._is_consolidated = False
 | 
			
		||||
        self._known_consolidated = False
 | 
			
		||||
        self._rebuild_blknos_and_blklocs()
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------
 | 
			
		||||
    # Indexing
 | 
			
		||||
 | 
			
		||||
    cdef BlockManager _slice_mgr_rows(self, slice slobj):
 | 
			
		||||
        cdef:
 | 
			
		||||
            SharedBlock blk, nb
 | 
			
		||||
            BlockManager mgr
 | 
			
		||||
            ndarray blknos, blklocs
 | 
			
		||||
 | 
			
		||||
        nbs = []
 | 
			
		||||
        for blk in self.blocks:
 | 
			
		||||
            nb = blk.slice_block_rows(slobj)
 | 
			
		||||
            nbs.append(nb)
 | 
			
		||||
 | 
			
		||||
        new_axes = [self.axes[0], self.axes[1]._getitem_slice(slobj)]
 | 
			
		||||
        mgr = type(self)(tuple(nbs), new_axes, verify_integrity=False)
 | 
			
		||||
 | 
			
		||||
        # We can avoid having to rebuild blklocs/blknos
 | 
			
		||||
        blklocs = self._blklocs
 | 
			
		||||
        blknos = self._blknos
 | 
			
		||||
        if blknos is not None:
 | 
			
		||||
            mgr._blknos = blknos.copy()
 | 
			
		||||
            mgr._blklocs = blklocs.copy()
 | 
			
		||||
        return mgr
 | 
			
		||||
 | 
			
		||||
    def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
 | 
			
		||||
 | 
			
		||||
        if axis == 0:
 | 
			
		||||
            new_blocks = self._slice_take_blocks_ax0(slobj)
 | 
			
		||||
        elif axis == 1:
 | 
			
		||||
            return self._slice_mgr_rows(slobj)
 | 
			
		||||
        else:
 | 
			
		||||
            raise IndexError("Requested axis not found in manager")
 | 
			
		||||
 | 
			
		||||
        new_axes = list(self.axes)
 | 
			
		||||
        new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
 | 
			
		||||
 | 
			
		||||
        return type(self)(tuple(new_blocks), new_axes, verify_integrity=False)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class BlockValuesRefs:
 | 
			
		||||
    """Tracks all references to a given array.
 | 
			
		||||
 | 
			
		||||
    Keeps track of all blocks (through weak references) that reference the same
 | 
			
		||||
    data.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        public list referenced_blocks
 | 
			
		||||
 | 
			
		||||
    def __cinit__(self, blk: SharedBlock | None = None) -> None:
 | 
			
		||||
        if blk is not None:
 | 
			
		||||
            self.referenced_blocks = [weakref.ref(blk)]
 | 
			
		||||
        else:
 | 
			
		||||
            self.referenced_blocks = []
 | 
			
		||||
 | 
			
		||||
    def add_reference(self, blk: SharedBlock) -> None:
 | 
			
		||||
        """Adds a new reference to our reference collection.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        blk: SharedBlock
 | 
			
		||||
            The block that the new references should point to.
 | 
			
		||||
        """
 | 
			
		||||
        self.referenced_blocks.append(weakref.ref(blk))
 | 
			
		||||
 | 
			
		||||
    def add_index_reference(self, index: object) -> None:
 | 
			
		||||
        """Adds a new reference to our reference collection when creating an index.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        index : Index
 | 
			
		||||
            The index that the new reference should point to.
 | 
			
		||||
        """
 | 
			
		||||
        self.referenced_blocks.append(weakref.ref(index))
 | 
			
		||||
 | 
			
		||||
    def has_reference(self) -> bool:
 | 
			
		||||
        """Checks if block has foreign references.
 | 
			
		||||
 | 
			
		||||
        A reference is only relevant if it is still alive. The reference to
 | 
			
		||||
        ourselves does not count.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
        """
 | 
			
		||||
        self.referenced_blocks = [
 | 
			
		||||
            ref for ref in self.referenced_blocks if ref() is not None
 | 
			
		||||
        ]
 | 
			
		||||
        # Checking for more references than block pointing to itself
 | 
			
		||||
        return len(self.referenced_blocks) > 1
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										174
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/interval.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										174
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/interval.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,174 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Any,
 | 
			
		||||
    Generic,
 | 
			
		||||
    TypeVar,
 | 
			
		||||
    overload,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import numpy.typing as npt
 | 
			
		||||
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    IntervalClosedType,
 | 
			
		||||
    Timedelta,
 | 
			
		||||
    Timestamp,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
VALID_CLOSED: frozenset[str]
 | 
			
		||||
 | 
			
		||||
_OrderableScalarT = TypeVar("_OrderableScalarT", int, float)
 | 
			
		||||
_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta)
 | 
			
		||||
_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta)
 | 
			
		||||
 | 
			
		||||
class _LengthDescriptor:
 | 
			
		||||
    @overload
 | 
			
		||||
    def __get__(
 | 
			
		||||
        self, instance: Interval[_OrderableScalarT], owner: Any
 | 
			
		||||
    ) -> _OrderableScalarT: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __get__(
 | 
			
		||||
        self, instance: Interval[_OrderableTimesT], owner: Any
 | 
			
		||||
    ) -> Timedelta: ...
 | 
			
		||||
 | 
			
		||||
class _MidDescriptor:
 | 
			
		||||
    @overload
 | 
			
		||||
    def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __get__(
 | 
			
		||||
        self, instance: Interval[_OrderableTimesT], owner: Any
 | 
			
		||||
    ) -> _OrderableTimesT: ...
 | 
			
		||||
 | 
			
		||||
class IntervalMixin:
 | 
			
		||||
    @property
 | 
			
		||||
    def closed_left(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def closed_right(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def open_left(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def open_right(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_empty(self) -> bool: ...
 | 
			
		||||
    def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ...
 | 
			
		||||
 | 
			
		||||
class Interval(IntervalMixin, Generic[_OrderableT]):
 | 
			
		||||
    @property
 | 
			
		||||
    def left(self: Interval[_OrderableT]) -> _OrderableT: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def right(self: Interval[_OrderableT]) -> _OrderableT: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def closed(self) -> IntervalClosedType: ...
 | 
			
		||||
    mid: _MidDescriptor
 | 
			
		||||
    length: _LengthDescriptor
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        left: _OrderableT,
 | 
			
		||||
        right: _OrderableT,
 | 
			
		||||
        closed: IntervalClosedType = ...,
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    def __hash__(self) -> int: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __contains__(
 | 
			
		||||
        self: Interval[Timedelta], key: Timedelta | Interval[Timedelta]
 | 
			
		||||
    ) -> bool: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __contains__(
 | 
			
		||||
        self: Interval[Timestamp], key: Timestamp | Interval[Timestamp]
 | 
			
		||||
    ) -> bool: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __contains__(
 | 
			
		||||
        self: Interval[_OrderableScalarT],
 | 
			
		||||
        key: _OrderableScalarT | Interval[_OrderableScalarT],
 | 
			
		||||
    ) -> bool: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __add__(
 | 
			
		||||
        self: Interval[_OrderableTimesT], y: Timedelta
 | 
			
		||||
    ) -> Interval[_OrderableTimesT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __add__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __add__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __radd__(
 | 
			
		||||
        self: Interval[_OrderableTimesT], y: Timedelta
 | 
			
		||||
    ) -> Interval[_OrderableTimesT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __radd__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __radd__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __sub__(
 | 
			
		||||
        self: Interval[_OrderableTimesT], y: Timedelta
 | 
			
		||||
    ) -> Interval[_OrderableTimesT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __sub__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __sub__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __rsub__(
 | 
			
		||||
        self: Interval[_OrderableTimesT], y: Timedelta
 | 
			
		||||
    ) -> Interval[_OrderableTimesT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __rsub__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __rsub__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __mul__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __mul__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __rmul__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __rmul__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __truediv__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __truediv__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __floordiv__(
 | 
			
		||||
        self: Interval[int], y: _OrderableScalarT
 | 
			
		||||
    ) -> Interval[_OrderableScalarT]: ...
 | 
			
		||||
    @overload
 | 
			
		||||
    def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ...
 | 
			
		||||
    def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
 | 
			
		||||
 | 
			
		||||
def intervals_to_interval_bounds(
 | 
			
		||||
    intervals: np.ndarray, validate_closed: bool = ...
 | 
			
		||||
) -> tuple[np.ndarray, np.ndarray, IntervalClosedType]: ...
 | 
			
		||||
 | 
			
		||||
class IntervalTree(IntervalMixin):
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        left: np.ndarray,
 | 
			
		||||
        right: np.ndarray,
 | 
			
		||||
        closed: IntervalClosedType = ...,
 | 
			
		||||
        leaf_size: int = ...,
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def mid(self) -> np.ndarray: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def length(self) -> np.ndarray: ...
 | 
			
		||||
    def get_indexer(self, target) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
    def get_indexer_non_unique(
 | 
			
		||||
        self, target
 | 
			
		||||
    ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
    _na_count: int
 | 
			
		||||
    @property
 | 
			
		||||
    def is_overlapping(self) -> bool: ...
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_increasing(self) -> bool: ...
 | 
			
		||||
    def clear_mapping(self) -> None: ...
 | 
			
		||||
							
								
								
									
										683
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/interval.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										683
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/interval.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,683 @@
 | 
			
		||||
import numbers
 | 
			
		||||
from operator import (
 | 
			
		||||
    le,
 | 
			
		||||
    lt,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from cpython.datetime cimport (
 | 
			
		||||
    PyDelta_Check,
 | 
			
		||||
    import_datetime,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import_datetime()
 | 
			
		||||
 | 
			
		||||
cimport cython
 | 
			
		||||
from cpython.object cimport PyObject_RichCompare
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    NPY_QUICKSORT,
 | 
			
		||||
    PyArray_ArgSort,
 | 
			
		||||
    PyArray_Take,
 | 
			
		||||
    float64_t,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from pandas._libs cimport util
 | 
			
		||||
from pandas._libs.hashtable cimport Int64Vector
 | 
			
		||||
from pandas._libs.tslibs.timedeltas cimport _Timedelta
 | 
			
		||||
from pandas._libs.tslibs.timestamps cimport _Timestamp
 | 
			
		||||
from pandas._libs.tslibs.timezones cimport tz_compare
 | 
			
		||||
from pandas._libs.tslibs.util cimport (
 | 
			
		||||
    is_float_object,
 | 
			
		||||
    is_integer_object,
 | 
			
		||||
    is_timedelta64_object,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
VALID_CLOSED = frozenset(["left", "right", "both", "neither"])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class IntervalMixin:
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def closed_left(self):
 | 
			
		||||
        """
 | 
			
		||||
        Check if the interval is closed on the left side.
 | 
			
		||||
 | 
			
		||||
        For the meaning of `closed` and `open` see :class:`~pandas.Interval`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
            True if the Interval is closed on the left-side.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.closed_right : Check if the interval is closed on the right side.
 | 
			
		||||
        Interval.open_left : Boolean inverse of closed_left.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='left')
 | 
			
		||||
        >>> iv.closed_left
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='right')
 | 
			
		||||
        >>> iv.closed_left
 | 
			
		||||
        False
 | 
			
		||||
        """
 | 
			
		||||
        return self.closed in ("left", "both")
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def closed_right(self):
 | 
			
		||||
        """
 | 
			
		||||
        Check if the interval is closed on the right side.
 | 
			
		||||
 | 
			
		||||
        For the meaning of `closed` and `open` see :class:`~pandas.Interval`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
            True if the Interval is closed on the left-side.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.closed_left : Check if the interval is closed on the left side.
 | 
			
		||||
        Interval.open_right : Boolean inverse of closed_right.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='both')
 | 
			
		||||
        >>> iv.closed_right
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='left')
 | 
			
		||||
        >>> iv.closed_right
 | 
			
		||||
        False
 | 
			
		||||
        """
 | 
			
		||||
        return self.closed in ("right", "both")
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def open_left(self):
 | 
			
		||||
        """
 | 
			
		||||
        Check if the interval is open on the left side.
 | 
			
		||||
 | 
			
		||||
        For the meaning of `closed` and `open` see :class:`~pandas.Interval`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
            True if the Interval is not closed on the left-side.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.open_right : Check if the interval is open on the right side.
 | 
			
		||||
        Interval.closed_left : Boolean inverse of open_left.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='neither')
 | 
			
		||||
        >>> iv.open_left
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='both')
 | 
			
		||||
        >>> iv.open_left
 | 
			
		||||
        False
 | 
			
		||||
        """
 | 
			
		||||
        return not self.closed_left
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def open_right(self):
 | 
			
		||||
        """
 | 
			
		||||
        Check if the interval is open on the right side.
 | 
			
		||||
 | 
			
		||||
        For the meaning of `closed` and `open` see :class:`~pandas.Interval`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
            True if the Interval is not closed on the left-side.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.open_left : Check if the interval is open on the left side.
 | 
			
		||||
        Interval.closed_right : Boolean inverse of open_right.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> iv = pd.Interval(0, 5, closed='left')
 | 
			
		||||
        >>> iv.open_right
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        >>> iv = pd.Interval(0, 5)
 | 
			
		||||
        >>> iv.open_right
 | 
			
		||||
        False
 | 
			
		||||
        """
 | 
			
		||||
        return not self.closed_right
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def mid(self):
 | 
			
		||||
        """
 | 
			
		||||
        Return the midpoint of the Interval.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> iv = pd.Interval(0, 5)
 | 
			
		||||
        >>> iv.mid
 | 
			
		||||
        2.5
 | 
			
		||||
        """
 | 
			
		||||
        try:
 | 
			
		||||
            return 0.5 * (self.left + self.right)
 | 
			
		||||
        except TypeError:
 | 
			
		||||
            # datetime safe version
 | 
			
		||||
            return self.left + 0.5 * self.length
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def length(self):
 | 
			
		||||
        """
 | 
			
		||||
        Return the length of the Interval.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.is_empty : Indicates if an interval contains no points.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> interval = pd.Interval(left=1, right=2, closed='left')
 | 
			
		||||
        >>> interval
 | 
			
		||||
        Interval(1, 2, closed='left')
 | 
			
		||||
        >>> interval.length
 | 
			
		||||
        1
 | 
			
		||||
        """
 | 
			
		||||
        return self.right - self.left
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def is_empty(self):
 | 
			
		||||
        """
 | 
			
		||||
        Indicates if an interval is empty, meaning it contains no points.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool or ndarray
 | 
			
		||||
            A boolean indicating if a scalar :class:`Interval` is empty, or a
 | 
			
		||||
            boolean ``ndarray`` positionally indicating if an ``Interval`` in
 | 
			
		||||
            an :class:`~arrays.IntervalArray` or :class:`IntervalIndex` is
 | 
			
		||||
            empty.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        Interval.length : Return the length of the Interval.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        An :class:`Interval` that contains points is not empty:
 | 
			
		||||
 | 
			
		||||
        >>> pd.Interval(0, 1, closed='right').is_empty
 | 
			
		||||
        False
 | 
			
		||||
 | 
			
		||||
        An ``Interval`` that does not contain any points is empty:
 | 
			
		||||
 | 
			
		||||
        >>> pd.Interval(0, 0, closed='right').is_empty
 | 
			
		||||
        True
 | 
			
		||||
        >>> pd.Interval(0, 0, closed='left').is_empty
 | 
			
		||||
        True
 | 
			
		||||
        >>> pd.Interval(0, 0, closed='neither').is_empty
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        An ``Interval`` that contains a single point is not empty:
 | 
			
		||||
 | 
			
		||||
        >>> pd.Interval(0, 0, closed='both').is_empty
 | 
			
		||||
        False
 | 
			
		||||
 | 
			
		||||
        An :class:`~arrays.IntervalArray` or :class:`IntervalIndex` returns a
 | 
			
		||||
        boolean ``ndarray`` positionally indicating if an ``Interval`` is
 | 
			
		||||
        empty:
 | 
			
		||||
 | 
			
		||||
        >>> ivs = [pd.Interval(0, 0, closed='neither'),
 | 
			
		||||
        ...        pd.Interval(1, 2, closed='neither')]
 | 
			
		||||
        >>> pd.arrays.IntervalArray(ivs).is_empty
 | 
			
		||||
        array([ True, False])
 | 
			
		||||
 | 
			
		||||
        Missing values are not considered empty:
 | 
			
		||||
 | 
			
		||||
        >>> ivs = [pd.Interval(0, 0, closed='neither'), np.nan]
 | 
			
		||||
        >>> pd.IntervalIndex(ivs).is_empty
 | 
			
		||||
        array([ True, False])
 | 
			
		||||
        """
 | 
			
		||||
        return (self.right == self.left) & (self.closed != "both")
 | 
			
		||||
 | 
			
		||||
    def _check_closed_matches(self, other, name="other"):
 | 
			
		||||
        """
 | 
			
		||||
        Check if the closed attribute of `other` matches.
 | 
			
		||||
 | 
			
		||||
        Note that 'left' and 'right' are considered different from 'both'.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        other : Interval, IntervalIndex, IntervalArray
 | 
			
		||||
        name : str
 | 
			
		||||
            Name to use for 'other' in the error message.
 | 
			
		||||
 | 
			
		||||
        Raises
 | 
			
		||||
        ------
 | 
			
		||||
        ValueError
 | 
			
		||||
            When `other` is not closed exactly the same as self.
 | 
			
		||||
        """
 | 
			
		||||
        if self.closed != other.closed:
 | 
			
		||||
            raise ValueError(f"'{name}.closed' is {repr(other.closed)}, "
 | 
			
		||||
                             f"expected {repr(self.closed)}.")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint _interval_like(other):
 | 
			
		||||
    return (hasattr(other, "left")
 | 
			
		||||
            and hasattr(other, "right")
 | 
			
		||||
            and hasattr(other, "closed"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class Interval(IntervalMixin):
 | 
			
		||||
    """
 | 
			
		||||
    Immutable object implementing an Interval, a bounded slice-like interval.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    left : orderable scalar
 | 
			
		||||
        Left bound for the interval.
 | 
			
		||||
    right : orderable scalar
 | 
			
		||||
        Right bound for the interval.
 | 
			
		||||
    closed : {'right', 'left', 'both', 'neither'}, default 'right'
 | 
			
		||||
        Whether the interval is closed on the left-side, right-side, both or
 | 
			
		||||
        neither. See the Notes for more detailed explanation.
 | 
			
		||||
 | 
			
		||||
    See Also
 | 
			
		||||
    --------
 | 
			
		||||
    IntervalIndex : An Index of Interval objects that are all closed on the
 | 
			
		||||
        same side.
 | 
			
		||||
    cut : Convert continuous data into discrete bins (Categorical
 | 
			
		||||
        of Interval objects).
 | 
			
		||||
    qcut : Convert continuous data into bins (Categorical of Interval objects)
 | 
			
		||||
        based on quantiles.
 | 
			
		||||
    Period : Represents a period of time.
 | 
			
		||||
 | 
			
		||||
    Notes
 | 
			
		||||
    -----
 | 
			
		||||
    The parameters `left` and `right` must be from the same type, you must be
 | 
			
		||||
    able to compare them and they must satisfy ``left <= right``.
 | 
			
		||||
 | 
			
		||||
    A closed interval (in mathematics denoted by square brackets) contains
 | 
			
		||||
    its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the
 | 
			
		||||
    conditions ``0 <= x <= 5``. This is what ``closed='both'`` stands for.
 | 
			
		||||
    An open interval (in mathematics denoted by parentheses) does not contain
 | 
			
		||||
    its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the
 | 
			
		||||
    conditions ``0 < x < 5``. This is what ``closed='neither'`` stands for.
 | 
			
		||||
    Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is
 | 
			
		||||
    described by ``0 <= x < 5`` (``closed='left'``) and ``(0, 5]`` is
 | 
			
		||||
    described by ``0 < x <= 5`` (``closed='right'``).
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    It is possible to build Intervals of different types, like numeric ones:
 | 
			
		||||
 | 
			
		||||
    >>> iv = pd.Interval(left=0, right=5)
 | 
			
		||||
    >>> iv
 | 
			
		||||
    Interval(0, 5, closed='right')
 | 
			
		||||
 | 
			
		||||
    You can check if an element belongs to it, or if it contains another interval:
 | 
			
		||||
 | 
			
		||||
    >>> 2.5 in iv
 | 
			
		||||
    True
 | 
			
		||||
    >>> pd.Interval(left=2, right=5, closed='both') in iv
 | 
			
		||||
    True
 | 
			
		||||
 | 
			
		||||
    You can test the bounds (``closed='right'``, so ``0 < x <= 5``):
 | 
			
		||||
 | 
			
		||||
    >>> 0 in iv
 | 
			
		||||
    False
 | 
			
		||||
    >>> 5 in iv
 | 
			
		||||
    True
 | 
			
		||||
    >>> 0.0001 in iv
 | 
			
		||||
    True
 | 
			
		||||
 | 
			
		||||
    Calculate its length
 | 
			
		||||
 | 
			
		||||
    >>> iv.length
 | 
			
		||||
    5
 | 
			
		||||
 | 
			
		||||
    You can operate with `+` and `*` over an Interval and the operation
 | 
			
		||||
    is applied to each of its bounds, so the result depends on the type
 | 
			
		||||
    of the bound elements
 | 
			
		||||
 | 
			
		||||
    >>> shifted_iv = iv + 3
 | 
			
		||||
    >>> shifted_iv
 | 
			
		||||
    Interval(3, 8, closed='right')
 | 
			
		||||
    >>> extended_iv = iv * 10.0
 | 
			
		||||
    >>> extended_iv
 | 
			
		||||
    Interval(0.0, 50.0, closed='right')
 | 
			
		||||
 | 
			
		||||
    To create a time interval you can use Timestamps as the bounds
 | 
			
		||||
 | 
			
		||||
    >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'),
 | 
			
		||||
    ...                         pd.Timestamp('2018-01-01 00:00:00'),
 | 
			
		||||
    ...                         closed='left')
 | 
			
		||||
    >>> pd.Timestamp('2017-01-01 00:00') in year_2017
 | 
			
		||||
    True
 | 
			
		||||
    >>> year_2017.length
 | 
			
		||||
    Timedelta('365 days 00:00:00')
 | 
			
		||||
    """
 | 
			
		||||
    _typ = "interval"
 | 
			
		||||
    __array_priority__ = 1000
 | 
			
		||||
 | 
			
		||||
    cdef readonly object left
 | 
			
		||||
    """
 | 
			
		||||
    Left bound for the interval.
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> interval = pd.Interval(left=1, right=2, closed='left')
 | 
			
		||||
    >>> interval
 | 
			
		||||
    Interval(1, 2, closed='left')
 | 
			
		||||
    >>> interval.left
 | 
			
		||||
    1
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    cdef readonly object right
 | 
			
		||||
    """
 | 
			
		||||
    Right bound for the interval.
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> interval = pd.Interval(left=1, right=2, closed='left')
 | 
			
		||||
    >>> interval
 | 
			
		||||
    Interval(1, 2, closed='left')
 | 
			
		||||
    >>> interval.right
 | 
			
		||||
    2
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    cdef readonly str closed
 | 
			
		||||
    """
 | 
			
		||||
    String describing the inclusive side the intervals.
 | 
			
		||||
 | 
			
		||||
    Either ``left``, ``right``, ``both`` or ``neither``.
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> interval = pd.Interval(left=1, right=2, closed='left')
 | 
			
		||||
    >>> interval
 | 
			
		||||
    Interval(1, 2, closed='left')
 | 
			
		||||
    >>> interval.closed
 | 
			
		||||
    'left'
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, left, right, str closed="right"):
 | 
			
		||||
        # note: it is faster to just do these checks than to use a special
 | 
			
		||||
        # constructor (__cinit__/__new__) to avoid them
 | 
			
		||||
 | 
			
		||||
        self._validate_endpoint(left)
 | 
			
		||||
        self._validate_endpoint(right)
 | 
			
		||||
 | 
			
		||||
        if closed not in VALID_CLOSED:
 | 
			
		||||
            raise ValueError(f"invalid option for 'closed': {closed}")
 | 
			
		||||
        if not left <= right:
 | 
			
		||||
            raise ValueError("left side of interval must be <= right side")
 | 
			
		||||
        if (isinstance(left, _Timestamp) and
 | 
			
		||||
                not tz_compare(left.tzinfo, right.tzinfo)):
 | 
			
		||||
            # GH 18538
 | 
			
		||||
            raise ValueError("left and right must have the same time zone, got "
 | 
			
		||||
                             f"{repr(left.tzinfo)}' and {repr(right.tzinfo)}")
 | 
			
		||||
        self.left = left
 | 
			
		||||
        self.right = right
 | 
			
		||||
        self.closed = closed
 | 
			
		||||
 | 
			
		||||
    def _validate_endpoint(self, endpoint):
 | 
			
		||||
        # GH 23013
 | 
			
		||||
        if not (is_integer_object(endpoint) or is_float_object(endpoint) or
 | 
			
		||||
                isinstance(endpoint, (_Timestamp, _Timedelta))):
 | 
			
		||||
            raise ValueError("Only numeric, Timestamp and Timedelta endpoints "
 | 
			
		||||
                             "are allowed when constructing an Interval.")
 | 
			
		||||
 | 
			
		||||
    def __hash__(self):
 | 
			
		||||
        return hash((self.left, self.right, self.closed))
 | 
			
		||||
 | 
			
		||||
    def __contains__(self, key) -> bool:
 | 
			
		||||
        if _interval_like(key):
 | 
			
		||||
            key_closed_left = key.closed in ("left", "both")
 | 
			
		||||
            key_closed_right = key.closed in ("right", "both")
 | 
			
		||||
            if self.open_left and key_closed_left:
 | 
			
		||||
                left_contained = self.left < key.left
 | 
			
		||||
            else:
 | 
			
		||||
                left_contained = self.left <= key.left
 | 
			
		||||
            if self.open_right and key_closed_right:
 | 
			
		||||
                right_contained = key.right < self.right
 | 
			
		||||
            else:
 | 
			
		||||
                right_contained = key.right <= self.right
 | 
			
		||||
            return left_contained and right_contained
 | 
			
		||||
        return ((self.left < key if self.open_left else self.left <= key) and
 | 
			
		||||
                (key < self.right if self.open_right else key <= self.right))
 | 
			
		||||
 | 
			
		||||
    def __richcmp__(self, other, op: int):
 | 
			
		||||
        if isinstance(other, Interval):
 | 
			
		||||
            self_tuple = (self.left, self.right, self.closed)
 | 
			
		||||
            other_tuple = (other.left, other.right, other.closed)
 | 
			
		||||
            return PyObject_RichCompare(self_tuple, other_tuple, op)
 | 
			
		||||
        elif util.is_array(other):
 | 
			
		||||
            return np.array(
 | 
			
		||||
                [PyObject_RichCompare(self, x, op) for x in other],
 | 
			
		||||
                dtype=bool,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __reduce__(self):
 | 
			
		||||
        args = (self.left, self.right, self.closed)
 | 
			
		||||
        return (type(self), args)
 | 
			
		||||
 | 
			
		||||
    def _repr_base(self):
 | 
			
		||||
        left = self.left
 | 
			
		||||
        right = self.right
 | 
			
		||||
 | 
			
		||||
        # TODO: need more general formatting methodology here
 | 
			
		||||
        if isinstance(left, _Timestamp) and isinstance(right, _Timestamp):
 | 
			
		||||
            left = left._short_repr
 | 
			
		||||
            right = right._short_repr
 | 
			
		||||
 | 
			
		||||
        return left, right
 | 
			
		||||
 | 
			
		||||
    def __repr__(self) -> str:
 | 
			
		||||
 | 
			
		||||
        left, right = self._repr_base()
 | 
			
		||||
        disp = str if isinstance(left, np.generic) else repr
 | 
			
		||||
        name = type(self).__name__
 | 
			
		||||
        repr_str = f"{name}({disp(left)}, {disp(right)}, closed={repr(self.closed)})"
 | 
			
		||||
        return repr_str
 | 
			
		||||
 | 
			
		||||
    def __str__(self) -> str:
 | 
			
		||||
 | 
			
		||||
        left, right = self._repr_base()
 | 
			
		||||
        start_symbol = "[" if self.closed_left else "("
 | 
			
		||||
        end_symbol = "]" if self.closed_right else ")"
 | 
			
		||||
        return f"{start_symbol}{left}, {right}{end_symbol}"
 | 
			
		||||
 | 
			
		||||
    def __add__(self, y):
 | 
			
		||||
        if (
 | 
			
		||||
            isinstance(y, numbers.Number)
 | 
			
		||||
            or PyDelta_Check(y)
 | 
			
		||||
            or is_timedelta64_object(y)
 | 
			
		||||
        ):
 | 
			
		||||
            return Interval(self.left + y, self.right + y, closed=self.closed)
 | 
			
		||||
        elif (
 | 
			
		||||
            # __radd__ pattern
 | 
			
		||||
            # TODO(cython3): remove this
 | 
			
		||||
            isinstance(y, Interval)
 | 
			
		||||
            and (
 | 
			
		||||
                isinstance(self, numbers.Number)
 | 
			
		||||
                or PyDelta_Check(self)
 | 
			
		||||
                or is_timedelta64_object(self)
 | 
			
		||||
            )
 | 
			
		||||
        ):
 | 
			
		||||
            return Interval(y.left + self, y.right + self, closed=y.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __radd__(self, other):
 | 
			
		||||
        if (
 | 
			
		||||
                isinstance(other, numbers.Number)
 | 
			
		||||
                or PyDelta_Check(other)
 | 
			
		||||
                or is_timedelta64_object(other)
 | 
			
		||||
        ):
 | 
			
		||||
            return Interval(self.left + other, self.right + other, closed=self.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __sub__(self, y):
 | 
			
		||||
        if (
 | 
			
		||||
            isinstance(y, numbers.Number)
 | 
			
		||||
            or PyDelta_Check(y)
 | 
			
		||||
            or is_timedelta64_object(y)
 | 
			
		||||
        ):
 | 
			
		||||
            return Interval(self.left - y, self.right - y, closed=self.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __mul__(self, y):
 | 
			
		||||
        if isinstance(y, numbers.Number):
 | 
			
		||||
            return Interval(self.left * y, self.right * y, closed=self.closed)
 | 
			
		||||
        elif isinstance(y, Interval) and isinstance(self, numbers.Number):
 | 
			
		||||
            # __radd__ semantics
 | 
			
		||||
            # TODO(cython3): remove this
 | 
			
		||||
            return Interval(y.left * self, y.right * self, closed=y.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __rmul__(self, other):
 | 
			
		||||
        if isinstance(other, numbers.Number):
 | 
			
		||||
            return Interval(self.left * other, self.right * other, closed=self.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __truediv__(self, y):
 | 
			
		||||
        if isinstance(y, numbers.Number):
 | 
			
		||||
            return Interval(self.left / y, self.right / y, closed=self.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __floordiv__(self, y):
 | 
			
		||||
        if isinstance(y, numbers.Number):
 | 
			
		||||
            return Interval(
 | 
			
		||||
                self.left // y, self.right // y, closed=self.closed)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def overlaps(self, other):
 | 
			
		||||
        """
 | 
			
		||||
        Check whether two Interval objects overlap.
 | 
			
		||||
 | 
			
		||||
        Two intervals overlap if they share a common point, including closed
 | 
			
		||||
        endpoints. Intervals that only have an open endpoint in common do not
 | 
			
		||||
        overlap.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        other : Interval
 | 
			
		||||
            Interval to check against for an overlap.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        bool
 | 
			
		||||
            True if the two intervals overlap.
 | 
			
		||||
 | 
			
		||||
        See Also
 | 
			
		||||
        --------
 | 
			
		||||
        IntervalArray.overlaps : The corresponding method for IntervalArray.
 | 
			
		||||
        IntervalIndex.overlaps : The corresponding method for IntervalIndex.
 | 
			
		||||
 | 
			
		||||
        Examples
 | 
			
		||||
        --------
 | 
			
		||||
        >>> i1 = pd.Interval(0, 2)
 | 
			
		||||
        >>> i2 = pd.Interval(1, 3)
 | 
			
		||||
        >>> i1.overlaps(i2)
 | 
			
		||||
        True
 | 
			
		||||
        >>> i3 = pd.Interval(4, 5)
 | 
			
		||||
        >>> i1.overlaps(i3)
 | 
			
		||||
        False
 | 
			
		||||
 | 
			
		||||
        Intervals that share closed endpoints overlap:
 | 
			
		||||
 | 
			
		||||
        >>> i4 = pd.Interval(0, 1, closed='both')
 | 
			
		||||
        >>> i5 = pd.Interval(1, 2, closed='both')
 | 
			
		||||
        >>> i4.overlaps(i5)
 | 
			
		||||
        True
 | 
			
		||||
 | 
			
		||||
        Intervals that only have an open endpoint in common do not overlap:
 | 
			
		||||
 | 
			
		||||
        >>> i6 = pd.Interval(1, 2, closed='neither')
 | 
			
		||||
        >>> i4.overlaps(i6)
 | 
			
		||||
        False
 | 
			
		||||
        """
 | 
			
		||||
        if not isinstance(other, Interval):
 | 
			
		||||
            raise TypeError("`other` must be an Interval, "
 | 
			
		||||
                            f"got {type(other).__name__}")
 | 
			
		||||
 | 
			
		||||
        # equality is okay if both endpoints are closed (overlap at a point)
 | 
			
		||||
        op1 = le if (self.closed_left and other.closed_right) else lt
 | 
			
		||||
        op2 = le if (other.closed_left and self.closed_right) else lt
 | 
			
		||||
 | 
			
		||||
        # overlaps is equivalent negation of two interval being disjoint:
 | 
			
		||||
        # disjoint = (A.left > B.right) or (B.left > A.right)
 | 
			
		||||
        # (simplifying the negation allows this to be done in less operations)
 | 
			
		||||
        return op1(self.left, other.right) and op2(other.left, self.right)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True):
 | 
			
		||||
    """
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    intervals : ndarray
 | 
			
		||||
        Object array of Intervals / nulls.
 | 
			
		||||
 | 
			
		||||
    validate_closed: bool, default True
 | 
			
		||||
        Boolean indicating if all intervals must be closed on the same side.
 | 
			
		||||
        Mismatching closed will raise if True, else return None for closed.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    tuple of
 | 
			
		||||
        left : ndarray
 | 
			
		||||
        right : ndarray
 | 
			
		||||
        closed: IntervalClosedType
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        object closed = None, interval
 | 
			
		||||
        Py_ssize_t i, n = len(intervals)
 | 
			
		||||
        ndarray left, right
 | 
			
		||||
        bint seen_closed = False
 | 
			
		||||
 | 
			
		||||
    left = np.empty(n, dtype=intervals.dtype)
 | 
			
		||||
    right = np.empty(n, dtype=intervals.dtype)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        interval = intervals[i]
 | 
			
		||||
        if interval is None or util.is_nan(interval):
 | 
			
		||||
            left[i] = np.nan
 | 
			
		||||
            right[i] = np.nan
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        if not isinstance(interval, Interval):
 | 
			
		||||
            raise TypeError(f"type {type(interval)} with value "
 | 
			
		||||
                            f"{interval} is not an interval")
 | 
			
		||||
 | 
			
		||||
        left[i] = interval.left
 | 
			
		||||
        right[i] = interval.right
 | 
			
		||||
        if not seen_closed:
 | 
			
		||||
            seen_closed = True
 | 
			
		||||
            closed = interval.closed
 | 
			
		||||
        elif closed != interval.closed:
 | 
			
		||||
            closed = None
 | 
			
		||||
            if validate_closed:
 | 
			
		||||
                raise ValueError("intervals must all be closed on the same side")
 | 
			
		||||
 | 
			
		||||
    return left, right, closed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
include "intervaltree.pxi"
 | 
			
		||||
@@ -0,0 +1,434 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for intervaltree
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from pandas._libs.algos import is_monotonic
 | 
			
		||||
 | 
			
		||||
ctypedef fused int_scalar_t:
 | 
			
		||||
    int64_t
 | 
			
		||||
    float64_t
 | 
			
		||||
 | 
			
		||||
ctypedef fused uint_scalar_t:
 | 
			
		||||
    uint64_t
 | 
			
		||||
    float64_t
 | 
			
		||||
 | 
			
		||||
ctypedef fused scalar_t:
 | 
			
		||||
    int_scalar_t
 | 
			
		||||
    uint_scalar_t
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# IntervalTree
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
cdef class IntervalTree(IntervalMixin):
 | 
			
		||||
    """A centered interval tree
 | 
			
		||||
 | 
			
		||||
    Based off the algorithm described on Wikipedia:
 | 
			
		||||
    https://en.wikipedia.org/wiki/Interval_tree
 | 
			
		||||
 | 
			
		||||
    we are emulating the IndexEngine interface
 | 
			
		||||
    """
 | 
			
		||||
    cdef readonly:
 | 
			
		||||
        ndarray left, right
 | 
			
		||||
        IntervalNode root
 | 
			
		||||
        object dtype
 | 
			
		||||
        str closed
 | 
			
		||||
        object _is_overlapping, _left_sorter, _right_sorter
 | 
			
		||||
        Py_ssize_t _na_count
 | 
			
		||||
 | 
			
		||||
    def __init__(self, left, right, closed='right', leaf_size=100):
 | 
			
		||||
        """
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        left, right : np.ndarray[ndim=1]
 | 
			
		||||
            Left and right bounds for each interval. Assumed to contain no
 | 
			
		||||
            NaNs.
 | 
			
		||||
        closed : {'left', 'right', 'both', 'neither'}, optional
 | 
			
		||||
            Whether the intervals are closed on the left-side, right-side, both
 | 
			
		||||
            or neither. Defaults to 'right'.
 | 
			
		||||
        leaf_size : int, optional
 | 
			
		||||
            Parameter that controls when the tree switches from creating nodes
 | 
			
		||||
            to brute-force search. Tune this parameter to optimize query
 | 
			
		||||
            performance.
 | 
			
		||||
        """
 | 
			
		||||
        if closed not in ['left', 'right', 'both', 'neither']:
 | 
			
		||||
            raise ValueError("invalid option for 'closed': %s" % closed)
 | 
			
		||||
 | 
			
		||||
        left = np.asarray(left)
 | 
			
		||||
        right = np.asarray(right)
 | 
			
		||||
        self.dtype = np.result_type(left, right)
 | 
			
		||||
        self.left = np.asarray(left, dtype=self.dtype)
 | 
			
		||||
        self.right = np.asarray(right, dtype=self.dtype)
 | 
			
		||||
 | 
			
		||||
        indices = np.arange(len(left), dtype='int64')
 | 
			
		||||
 | 
			
		||||
        self.closed = closed
 | 
			
		||||
 | 
			
		||||
        # GH 23352: ensure no nan in nodes
 | 
			
		||||
        mask = ~np.isnan(self.left)
 | 
			
		||||
        self._na_count = len(mask) - mask.sum()
 | 
			
		||||
        self.left = self.left[mask]
 | 
			
		||||
        self.right = self.right[mask]
 | 
			
		||||
        indices = indices[mask]
 | 
			
		||||
 | 
			
		||||
        node_cls = NODE_CLASSES[str(self.dtype), closed]
 | 
			
		||||
        self.root = node_cls(self.left, self.right, indices, leaf_size)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def left_sorter(self) -> np.ndarray:
 | 
			
		||||
        """How to sort the left labels; this is used for binary search
 | 
			
		||||
        """
 | 
			
		||||
        if self._left_sorter is None:
 | 
			
		||||
            values = [self.right, self.left]
 | 
			
		||||
            self._left_sorter = np.lexsort(values)
 | 
			
		||||
        return self._left_sorter
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def right_sorter(self) -> np.ndarray:
 | 
			
		||||
        """How to sort the right labels
 | 
			
		||||
        """
 | 
			
		||||
        if self._right_sorter is None:
 | 
			
		||||
            self._right_sorter = np.argsort(self.right)
 | 
			
		||||
        return self._right_sorter
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def is_overlapping(self) -> bool:
 | 
			
		||||
        """
 | 
			
		||||
        Determine if the IntervalTree contains overlapping intervals.
 | 
			
		||||
        Cached as self._is_overlapping.
 | 
			
		||||
        """
 | 
			
		||||
        if self._is_overlapping is not None:
 | 
			
		||||
            return self._is_overlapping
 | 
			
		||||
 | 
			
		||||
        # <= when both sides closed since endpoints can overlap
 | 
			
		||||
        op = le if self.closed == 'both' else lt
 | 
			
		||||
 | 
			
		||||
        # overlap if start of current interval < end of previous interval
 | 
			
		||||
        # (current and previous in terms of sorted order by left/start side)
 | 
			
		||||
        current = self.left[self.left_sorter[1:]]
 | 
			
		||||
        previous = self.right[self.left_sorter[:-1]]
 | 
			
		||||
        self._is_overlapping = bool(op(current, previous).any())
 | 
			
		||||
 | 
			
		||||
        return self._is_overlapping
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def is_monotonic_increasing(self) -> bool:
 | 
			
		||||
        """
 | 
			
		||||
        Return True if the IntervalTree is monotonic increasing (only equal or
 | 
			
		||||
        increasing values), else False
 | 
			
		||||
        """
 | 
			
		||||
        if self._na_count > 0:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        sort_order = self.left_sorter
 | 
			
		||||
        return is_monotonic(sort_order, False)[0]
 | 
			
		||||
 | 
			
		||||
    def get_indexer(self, ndarray[scalar_t, ndim=1] target) -> np.ndarray:
 | 
			
		||||
        """Return the positions corresponding to unique intervals that overlap
 | 
			
		||||
        with the given array of scalar targets.
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        # TODO: write get_indexer_intervals
 | 
			
		||||
        cdef:
 | 
			
		||||
            Py_ssize_t old_len
 | 
			
		||||
            Py_ssize_t i
 | 
			
		||||
            Int64Vector result
 | 
			
		||||
 | 
			
		||||
        result = Int64Vector()
 | 
			
		||||
        old_len = 0
 | 
			
		||||
        for i in range(len(target)):
 | 
			
		||||
            try:
 | 
			
		||||
                self.root.query(result, target[i])
 | 
			
		||||
            except OverflowError:
 | 
			
		||||
                # overflow -> no match, which is already handled below
 | 
			
		||||
                pass
 | 
			
		||||
 | 
			
		||||
            if result.data.n == old_len:
 | 
			
		||||
                result.append(-1)
 | 
			
		||||
            elif result.data.n > old_len + 1:
 | 
			
		||||
                raise KeyError(
 | 
			
		||||
                    'indexer does not intersect a unique set of intervals')
 | 
			
		||||
            old_len = result.data.n
 | 
			
		||||
        return result.to_array().astype('intp')
 | 
			
		||||
 | 
			
		||||
    def get_indexer_non_unique(self, ndarray[scalar_t, ndim=1] target):
 | 
			
		||||
        """Return the positions corresponding to intervals that overlap with
 | 
			
		||||
        the given array of scalar targets. Non-unique positions are repeated.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            Py_ssize_t old_len
 | 
			
		||||
            Py_ssize_t i
 | 
			
		||||
            Int64Vector result, missing
 | 
			
		||||
 | 
			
		||||
        result = Int64Vector()
 | 
			
		||||
        missing = Int64Vector()
 | 
			
		||||
        old_len = 0
 | 
			
		||||
        for i in range(len(target)):
 | 
			
		||||
            try:
 | 
			
		||||
                self.root.query(result, target[i])
 | 
			
		||||
            except OverflowError:
 | 
			
		||||
                # overflow -> no match, which is already handled below
 | 
			
		||||
                pass
 | 
			
		||||
 | 
			
		||||
            if result.data.n == old_len:
 | 
			
		||||
                result.append(-1)
 | 
			
		||||
                missing.append(i)
 | 
			
		||||
            old_len = result.data.n
 | 
			
		||||
        return (result.to_array().astype('intp'),
 | 
			
		||||
                missing.to_array().astype('intp'))
 | 
			
		||||
 | 
			
		||||
    def __repr__(self) -> str:
 | 
			
		||||
        return ('<IntervalTree[{dtype},{closed}]: '
 | 
			
		||||
                '{n_elements} elements>'.format(
 | 
			
		||||
                    dtype=self.dtype, closed=self.closed,
 | 
			
		||||
                    n_elements=self.root.n_elements))
 | 
			
		||||
 | 
			
		||||
    # compat with IndexEngine interface
 | 
			
		||||
    def clear_mapping(self) -> None:
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef take(ndarray source, ndarray indices):
 | 
			
		||||
    """Take the given positions from a 1D ndarray
 | 
			
		||||
    """
 | 
			
		||||
    return PyArray_Take(source, indices, 0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef sort_values_and_indices(all_values, all_indices, subset):
 | 
			
		||||
    indices = take(all_indices, subset)
 | 
			
		||||
    values = take(all_values, subset)
 | 
			
		||||
    sorter = PyArray_ArgSort(values, 0, NPY_QUICKSORT)
 | 
			
		||||
    sorted_values = take(values, sorter)
 | 
			
		||||
    sorted_indices = take(indices, sorter)
 | 
			
		||||
    return sorted_values, sorted_indices
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# Nodes
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
@cython.internal
 | 
			
		||||
cdef class IntervalNode:
 | 
			
		||||
    cdef readonly:
 | 
			
		||||
        int64_t n_elements, n_center, leaf_size
 | 
			
		||||
        bint is_leaf_node
 | 
			
		||||
 | 
			
		||||
    def __repr__(self) -> str:
 | 
			
		||||
        if self.is_leaf_node:
 | 
			
		||||
            return (
 | 
			
		||||
                f"<{type(self).__name__}: {self.n_elements} elements (terminal)>"
 | 
			
		||||
            )
 | 
			
		||||
        else:
 | 
			
		||||
            n_left = self.left_node.n_elements
 | 
			
		||||
            n_right = self.right_node.n_elements
 | 
			
		||||
            n_center = self.n_elements - n_left - n_right
 | 
			
		||||
            return (
 | 
			
		||||
                f"<{type(self).__name__}: "
 | 
			
		||||
                f"pivot {self.pivot}, {self.n_elements} elements "
 | 
			
		||||
                f"({n_left} left, {n_right} right, {n_center} overlapping)>"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    def counts(self):
 | 
			
		||||
        """
 | 
			
		||||
        Inspect counts on this node
 | 
			
		||||
        useful for debugging purposes
 | 
			
		||||
        """
 | 
			
		||||
        if self.is_leaf_node:
 | 
			
		||||
            return self.n_elements
 | 
			
		||||
        else:
 | 
			
		||||
            m = len(self.center_left_values)
 | 
			
		||||
            l = self.left_node.counts()
 | 
			
		||||
            r = self.right_node.counts()
 | 
			
		||||
            return (m, (l, r))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# we need specialized nodes and leaves to optimize for different dtype and
 | 
			
		||||
# closed values
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
nodes = []
 | 
			
		||||
for dtype in ['float64', 'int64', 'uint64']:
 | 
			
		||||
    for closed, cmp_left, cmp_right in [
 | 
			
		||||
        ('left', '<=', '<'),
 | 
			
		||||
        ('right', '<', '<='),
 | 
			
		||||
        ('both', '<=', '<='),
 | 
			
		||||
        ('neither', '<', '<')]:
 | 
			
		||||
        cmp_left_converse = '<' if cmp_left == '<=' else '<='
 | 
			
		||||
        cmp_right_converse = '<' if cmp_right == '<=' else '<='
 | 
			
		||||
        if dtype.startswith('int'):
 | 
			
		||||
            fused_prefix = 'int_'
 | 
			
		||||
        elif dtype.startswith('uint'):
 | 
			
		||||
            fused_prefix = 'uint_'
 | 
			
		||||
        elif dtype.startswith('float'):
 | 
			
		||||
            fused_prefix = ''
 | 
			
		||||
        nodes.append((dtype, dtype.title(),
 | 
			
		||||
                      closed, closed.title(),
 | 
			
		||||
                      cmp_left,
 | 
			
		||||
                      cmp_right,
 | 
			
		||||
                      cmp_left_converse,
 | 
			
		||||
                      cmp_right_converse,
 | 
			
		||||
                      fused_prefix))
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
NODE_CLASSES = {}
 | 
			
		||||
 | 
			
		||||
{{for dtype, dtype_title, closed, closed_title, cmp_left, cmp_right,
 | 
			
		||||
      cmp_left_converse, cmp_right_converse, fused_prefix in nodes}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.internal
 | 
			
		||||
cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode(IntervalNode):
 | 
			
		||||
    """Non-terminal node for an IntervalTree
 | 
			
		||||
 | 
			
		||||
    Categorizes intervals by those that fall to the left, those that fall to
 | 
			
		||||
    the right, and those that overlap with the pivot.
 | 
			
		||||
    """
 | 
			
		||||
    cdef readonly:
 | 
			
		||||
        {{dtype_title}}Closed{{closed_title}}IntervalNode left_node, right_node
 | 
			
		||||
        {{dtype}}_t[:] center_left_values, center_right_values, left, right
 | 
			
		||||
        int64_t[:] center_left_indices, center_right_indices, indices
 | 
			
		||||
        {{dtype}}_t min_left, max_right
 | 
			
		||||
        {{dtype}}_t pivot
 | 
			
		||||
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 ndarray[{{dtype}}_t, ndim=1] left,
 | 
			
		||||
                 ndarray[{{dtype}}_t, ndim=1] right,
 | 
			
		||||
                 ndarray[int64_t, ndim=1] indices,
 | 
			
		||||
                 int64_t leaf_size):
 | 
			
		||||
 | 
			
		||||
        self.n_elements = len(left)
 | 
			
		||||
        self.leaf_size = leaf_size
 | 
			
		||||
 | 
			
		||||
        # min_left and min_right are used to speed-up query by skipping
 | 
			
		||||
        # query on sub-nodes. If this node has size 0, query is cheap,
 | 
			
		||||
        # so these values don't matter.
 | 
			
		||||
        if left.size > 0:
 | 
			
		||||
            self.min_left = left.min()
 | 
			
		||||
            self.max_right = right.max()
 | 
			
		||||
        else:
 | 
			
		||||
            self.min_left = 0
 | 
			
		||||
            self.max_right = 0
 | 
			
		||||
 | 
			
		||||
        if self.n_elements <= leaf_size:
 | 
			
		||||
            # make this a terminal (leaf) node
 | 
			
		||||
            self.is_leaf_node = True
 | 
			
		||||
            self.left = left
 | 
			
		||||
            self.right = right
 | 
			
		||||
            self.indices = indices
 | 
			
		||||
            self.n_center = 0
 | 
			
		||||
        else:
 | 
			
		||||
            # calculate a pivot so we can create child nodes
 | 
			
		||||
            self.is_leaf_node = False
 | 
			
		||||
            self.pivot = np.median(left / 2 + right / 2)
 | 
			
		||||
            if np.isinf(self.pivot):
 | 
			
		||||
                self.pivot = cython.cast({{dtype}}_t, 0)
 | 
			
		||||
                if self.pivot > np.max(right):
 | 
			
		||||
                    self.pivot = np.max(left)
 | 
			
		||||
                if self.pivot < np.min(left):
 | 
			
		||||
                    self.pivot = np.min(right)
 | 
			
		||||
 | 
			
		||||
            left_set, right_set, center_set = self.classify_intervals(
 | 
			
		||||
                left, right)
 | 
			
		||||
 | 
			
		||||
            self.left_node = self.new_child_node(left, right,
 | 
			
		||||
                                                 indices, left_set)
 | 
			
		||||
            self.right_node = self.new_child_node(left, right,
 | 
			
		||||
                                                  indices, right_set)
 | 
			
		||||
 | 
			
		||||
            self.center_left_values, self.center_left_indices = \
 | 
			
		||||
                sort_values_and_indices(left, indices, center_set)
 | 
			
		||||
            self.center_right_values, self.center_right_indices = \
 | 
			
		||||
                sort_values_and_indices(right, indices, center_set)
 | 
			
		||||
            self.n_center = len(self.center_left_indices)
 | 
			
		||||
 | 
			
		||||
    @cython.wraparound(False)
 | 
			
		||||
    @cython.boundscheck(False)
 | 
			
		||||
    cdef classify_intervals(self, {{dtype}}_t[:] left, {{dtype}}_t[:] right):
 | 
			
		||||
        """Classify the given intervals based upon whether they fall to the
 | 
			
		||||
        left, right, or overlap with this node's pivot.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            Int64Vector left_ind, right_ind, overlapping_ind
 | 
			
		||||
            Py_ssize_t i
 | 
			
		||||
 | 
			
		||||
        left_ind = Int64Vector()
 | 
			
		||||
        right_ind = Int64Vector()
 | 
			
		||||
        overlapping_ind = Int64Vector()
 | 
			
		||||
 | 
			
		||||
        for i in range(self.n_elements):
 | 
			
		||||
            if right[i] {{cmp_right_converse}} self.pivot:
 | 
			
		||||
                left_ind.append(i)
 | 
			
		||||
            elif self.pivot {{cmp_left_converse}} left[i]:
 | 
			
		||||
                right_ind.append(i)
 | 
			
		||||
            else:
 | 
			
		||||
                overlapping_ind.append(i)
 | 
			
		||||
 | 
			
		||||
        return (left_ind.to_array(),
 | 
			
		||||
                right_ind.to_array(),
 | 
			
		||||
                overlapping_ind.to_array())
 | 
			
		||||
 | 
			
		||||
    cdef new_child_node(self,
 | 
			
		||||
                        ndarray[{{dtype}}_t, ndim=1] left,
 | 
			
		||||
                        ndarray[{{dtype}}_t, ndim=1] right,
 | 
			
		||||
                        ndarray[int64_t, ndim=1] indices,
 | 
			
		||||
                        ndarray[int64_t, ndim=1] subset):
 | 
			
		||||
        """Create a new child node.
 | 
			
		||||
        """
 | 
			
		||||
        left = take(left, subset)
 | 
			
		||||
        right = take(right, subset)
 | 
			
		||||
        indices = take(indices, subset)
 | 
			
		||||
        return {{dtype_title}}Closed{{closed_title}}IntervalNode(
 | 
			
		||||
            left, right, indices, self.leaf_size)
 | 
			
		||||
 | 
			
		||||
    @cython.wraparound(False)
 | 
			
		||||
    @cython.boundscheck(False)
 | 
			
		||||
    @cython.initializedcheck(False)
 | 
			
		||||
    cpdef query(self, Int64Vector result, {{fused_prefix}}scalar_t point):
 | 
			
		||||
        """Recursively query this node and its sub-nodes for intervals that
 | 
			
		||||
        overlap with the query point.
 | 
			
		||||
        """
 | 
			
		||||
        cdef:
 | 
			
		||||
            int64_t[:] indices
 | 
			
		||||
            {{dtype}}_t[:] values
 | 
			
		||||
            Py_ssize_t i
 | 
			
		||||
 | 
			
		||||
        if self.is_leaf_node:
 | 
			
		||||
            # Once we get down to a certain size, it doesn't make sense to
 | 
			
		||||
            # continue the binary tree structure. Instead, we use linear
 | 
			
		||||
            # search.
 | 
			
		||||
            for i in range(self.n_elements):
 | 
			
		||||
                if self.left[i] {{cmp_left}} point {{cmp_right}} self.right[i]:
 | 
			
		||||
                    result.append(self.indices[i])
 | 
			
		||||
        else:
 | 
			
		||||
            # There are child nodes. Based on comparing our query to the pivot,
 | 
			
		||||
            # look at the center values, then go to the relevant child.
 | 
			
		||||
            if point < self.pivot:
 | 
			
		||||
                values = self.center_left_values
 | 
			
		||||
                indices = self.center_left_indices
 | 
			
		||||
                for i in range(self.n_center):
 | 
			
		||||
                    if not values[i] {{cmp_left}} point:
 | 
			
		||||
                        break
 | 
			
		||||
                    result.append(indices[i])
 | 
			
		||||
                if point {{cmp_right}} self.left_node.max_right:
 | 
			
		||||
                    self.left_node.query(result, point)
 | 
			
		||||
            elif point > self.pivot:
 | 
			
		||||
                values = self.center_right_values
 | 
			
		||||
                indices = self.center_right_indices
 | 
			
		||||
                for i in range(self.n_center - 1, -1, -1):
 | 
			
		||||
                    if not point {{cmp_right}} values[i]:
 | 
			
		||||
                        break
 | 
			
		||||
                    result.append(indices[i])
 | 
			
		||||
                if self.right_node.min_left {{cmp_left}} point:
 | 
			
		||||
                    self.right_node.query(result, point)
 | 
			
		||||
            else:
 | 
			
		||||
                result.extend(self.center_left_indices)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
NODE_CLASSES['{{dtype}}',
 | 
			
		||||
             '{{closed}}'] = {{dtype_title}}Closed{{closed_title}}IntervalNode
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										78
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/join.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/join.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,78 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
def inner_join(
 | 
			
		||||
    left: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    right: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    max_groups: int,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
def left_outer_join(
 | 
			
		||||
    left: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    right: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    max_groups: int,
 | 
			
		||||
    sort: bool = ...,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
def full_outer_join(
 | 
			
		||||
    left: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    right: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    max_groups: int,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
def ffill_indexer(
 | 
			
		||||
    indexer: np.ndarray,  # const intp_t[:]
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
def left_join_indexer_unique(
 | 
			
		||||
    left: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
    right: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
def left_join_indexer(
 | 
			
		||||
    left: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
    right: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
) -> tuple[
 | 
			
		||||
    np.ndarray,  # np.ndarray[join_t]
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
]: ...
 | 
			
		||||
def inner_join_indexer(
 | 
			
		||||
    left: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
    right: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
) -> tuple[
 | 
			
		||||
    np.ndarray,  # np.ndarray[join_t]
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
]: ...
 | 
			
		||||
def outer_join_indexer(
 | 
			
		||||
    left: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
    right: np.ndarray,  # ndarray[join_t]
 | 
			
		||||
) -> tuple[
 | 
			
		||||
    np.ndarray,  # np.ndarray[join_t]
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
    npt.NDArray[np.intp],
 | 
			
		||||
]: ...
 | 
			
		||||
def asof_join_backward_on_X_by_Y(
 | 
			
		||||
    left_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    right_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    left_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    right_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    allow_exact_matches: bool = ...,
 | 
			
		||||
    tolerance: np.number | float | None = ...,
 | 
			
		||||
    use_hashtable: bool = ...,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
def asof_join_forward_on_X_by_Y(
 | 
			
		||||
    left_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    right_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    left_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    right_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    allow_exact_matches: bool = ...,
 | 
			
		||||
    tolerance: np.number | float | None = ...,
 | 
			
		||||
    use_hashtable: bool = ...,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
def asof_join_nearest_on_X_by_Y(
 | 
			
		||||
    left_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    right_values: np.ndarray,  # ndarray[numeric_t]
 | 
			
		||||
    left_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    right_by_values: np.ndarray,  # ndarray[by_t]
 | 
			
		||||
    allow_exact_matches: bool = ...,
 | 
			
		||||
    tolerance: np.number | float | None = ...,
 | 
			
		||||
    use_hashtable: bool = ...,
 | 
			
		||||
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
 | 
			
		||||
							
								
								
									
										887
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/join.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										887
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/join.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,887 @@
 | 
			
		||||
cimport cython
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    int64_t,
 | 
			
		||||
    intp_t,
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
from pandas._libs.algos import groupsort_indexer
 | 
			
		||||
 | 
			
		||||
from pandas._libs.dtypes cimport (
 | 
			
		||||
    numeric_object_t,
 | 
			
		||||
    numeric_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def inner_join(const intp_t[:] left, const intp_t[:] right,
 | 
			
		||||
               Py_ssize_t max_groups):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, count = 0
 | 
			
		||||
        intp_t[::1] left_sorter, right_sorter
 | 
			
		||||
        intp_t[::1] left_count, right_count
 | 
			
		||||
        intp_t[::1] left_indexer, right_indexer
 | 
			
		||||
        intp_t lc, rc
 | 
			
		||||
        Py_ssize_t left_pos = 0, right_pos = 0, position = 0
 | 
			
		||||
        Py_ssize_t offset
 | 
			
		||||
 | 
			
		||||
    left_sorter, left_count = groupsort_indexer(left, max_groups)
 | 
			
		||||
    right_sorter, right_count = groupsort_indexer(right, max_groups)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # First pass, determine size of result set, do not use the NA group
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc > 0 and lc > 0:
 | 
			
		||||
                count += lc * rc
 | 
			
		||||
 | 
			
		||||
    left_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # exclude the NA group
 | 
			
		||||
        left_pos = left_count[0]
 | 
			
		||||
        right_pos = right_count[0]
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc > 0 and lc > 0:
 | 
			
		||||
                for j in range(lc):
 | 
			
		||||
                    offset = position + j * rc
 | 
			
		||||
                    for k in range(rc):
 | 
			
		||||
                        left_indexer[offset + k] = left_pos + j
 | 
			
		||||
                        right_indexer[offset + k] = right_pos + k
 | 
			
		||||
                position += lc * rc
 | 
			
		||||
            left_pos += lc
 | 
			
		||||
            right_pos += rc
 | 
			
		||||
 | 
			
		||||
        # Will overwrite left/right indexer with the result
 | 
			
		||||
        _get_result_indexer(left_sorter, left_indexer)
 | 
			
		||||
        _get_result_indexer(right_sorter, right_indexer)
 | 
			
		||||
 | 
			
		||||
    return np.asarray(left_indexer), np.asarray(right_indexer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def left_outer_join(const intp_t[:] left, const intp_t[:] right,
 | 
			
		||||
                    Py_ssize_t max_groups, bint sort=True):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, count = 0
 | 
			
		||||
        ndarray[intp_t] rev
 | 
			
		||||
        intp_t[::1] left_count, right_count
 | 
			
		||||
        intp_t[::1] left_sorter, right_sorter
 | 
			
		||||
        intp_t[::1] left_indexer, right_indexer
 | 
			
		||||
        intp_t lc, rc
 | 
			
		||||
        Py_ssize_t left_pos = 0, right_pos = 0, position = 0
 | 
			
		||||
        Py_ssize_t offset
 | 
			
		||||
 | 
			
		||||
    left_sorter, left_count = groupsort_indexer(left, max_groups)
 | 
			
		||||
    right_sorter, right_count = groupsort_indexer(right, max_groups)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # First pass, determine size of result set, do not use the NA group
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc > 0:
 | 
			
		||||
                count += lc * rc
 | 
			
		||||
            else:
 | 
			
		||||
                count += lc
 | 
			
		||||
 | 
			
		||||
    left_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # exclude the NA group
 | 
			
		||||
        left_pos = left_count[0]
 | 
			
		||||
        right_pos = right_count[0]
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc == 0:
 | 
			
		||||
                for j in range(lc):
 | 
			
		||||
                    left_indexer[position + j] = left_pos + j
 | 
			
		||||
                    right_indexer[position + j] = -1
 | 
			
		||||
                position += lc
 | 
			
		||||
            else:
 | 
			
		||||
                for j in range(lc):
 | 
			
		||||
                    offset = position + j * rc
 | 
			
		||||
                    for k in range(rc):
 | 
			
		||||
                        left_indexer[offset + k] = left_pos + j
 | 
			
		||||
                        right_indexer[offset + k] = right_pos + k
 | 
			
		||||
                position += lc * rc
 | 
			
		||||
            left_pos += lc
 | 
			
		||||
            right_pos += rc
 | 
			
		||||
 | 
			
		||||
        # Will overwrite left/right indexer with the result
 | 
			
		||||
        _get_result_indexer(left_sorter, left_indexer)
 | 
			
		||||
        _get_result_indexer(right_sorter, right_indexer)
 | 
			
		||||
 | 
			
		||||
    if not sort:  # if not asked to sort, revert to original order
 | 
			
		||||
        if len(left) == len(left_indexer):
 | 
			
		||||
            # no multiple matches for any row on the left
 | 
			
		||||
            # this is a short-cut to avoid groupsort_indexer
 | 
			
		||||
            # otherwise, the `else` path also works in this case
 | 
			
		||||
            rev = np.empty(len(left), dtype=np.intp)
 | 
			
		||||
            rev.put(np.asarray(left_sorter), np.arange(len(left)))
 | 
			
		||||
        else:
 | 
			
		||||
            rev, _ = groupsort_indexer(left_indexer, len(left))
 | 
			
		||||
 | 
			
		||||
        return np.asarray(left_indexer).take(rev), np.asarray(right_indexer).take(rev)
 | 
			
		||||
    else:
 | 
			
		||||
        return np.asarray(left_indexer), np.asarray(right_indexer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def full_outer_join(const intp_t[:] left, const intp_t[:] right,
 | 
			
		||||
                    Py_ssize_t max_groups):
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, k, count = 0
 | 
			
		||||
        intp_t[::1] left_sorter, right_sorter
 | 
			
		||||
        intp_t[::1] left_count, right_count
 | 
			
		||||
        intp_t[::1] left_indexer, right_indexer
 | 
			
		||||
        intp_t lc, rc
 | 
			
		||||
        intp_t left_pos = 0, right_pos = 0
 | 
			
		||||
        Py_ssize_t offset, position = 0
 | 
			
		||||
 | 
			
		||||
    left_sorter, left_count = groupsort_indexer(left, max_groups)
 | 
			
		||||
    right_sorter, right_count = groupsort_indexer(right, max_groups)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # First pass, determine size of result set, do not use the NA group
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc > 0 and lc > 0:
 | 
			
		||||
                count += lc * rc
 | 
			
		||||
            else:
 | 
			
		||||
                count += lc + rc
 | 
			
		||||
 | 
			
		||||
    left_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    with nogil:
 | 
			
		||||
        # exclude the NA group
 | 
			
		||||
        left_pos = left_count[0]
 | 
			
		||||
        right_pos = right_count[0]
 | 
			
		||||
        for i in range(1, max_groups + 1):
 | 
			
		||||
            lc = left_count[i]
 | 
			
		||||
            rc = right_count[i]
 | 
			
		||||
 | 
			
		||||
            if rc == 0:
 | 
			
		||||
                for j in range(lc):
 | 
			
		||||
                    left_indexer[position + j] = left_pos + j
 | 
			
		||||
                    right_indexer[position + j] = -1
 | 
			
		||||
                position += lc
 | 
			
		||||
            elif lc == 0:
 | 
			
		||||
                for j in range(rc):
 | 
			
		||||
                    left_indexer[position + j] = -1
 | 
			
		||||
                    right_indexer[position + j] = right_pos + j
 | 
			
		||||
                position += rc
 | 
			
		||||
            else:
 | 
			
		||||
                for j in range(lc):
 | 
			
		||||
                    offset = position + j * rc
 | 
			
		||||
                    for k in range(rc):
 | 
			
		||||
                        left_indexer[offset + k] = left_pos + j
 | 
			
		||||
                        right_indexer[offset + k] = right_pos + k
 | 
			
		||||
                position += lc * rc
 | 
			
		||||
            left_pos += lc
 | 
			
		||||
            right_pos += rc
 | 
			
		||||
 | 
			
		||||
        # Will overwrite left/right indexer with the result
 | 
			
		||||
        _get_result_indexer(left_sorter, left_indexer)
 | 
			
		||||
        _get_result_indexer(right_sorter, right_indexer)
 | 
			
		||||
 | 
			
		||||
    return np.asarray(left_indexer), np.asarray(right_indexer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
cdef void _get_result_indexer(intp_t[::1] sorter, intp_t[::1] indexer) noexcept nogil:
 | 
			
		||||
    """NOTE: overwrites indexer with the result to avoid allocating another array"""
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n, idx
 | 
			
		||||
 | 
			
		||||
    if len(sorter) > 0:
 | 
			
		||||
        # cython-only equivalent to
 | 
			
		||||
        #  `res = algos.take_nd(sorter, indexer, fill_value=-1)`
 | 
			
		||||
        n = indexer.shape[0]
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            idx = indexer[i]
 | 
			
		||||
            if idx == -1:
 | 
			
		||||
                indexer[i] = -1
 | 
			
		||||
            else:
 | 
			
		||||
                indexer[i] = sorter[idx]
 | 
			
		||||
    else:
 | 
			
		||||
        # length-0 case
 | 
			
		||||
        indexer[:] = -1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def ffill_indexer(const intp_t[:] indexer) -> np.ndarray:
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = len(indexer)
 | 
			
		||||
        ndarray[intp_t] result
 | 
			
		||||
        intp_t val, last_obs
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=np.intp)
 | 
			
		||||
    last_obs = -1
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        val = indexer[i]
 | 
			
		||||
        if val == -1:
 | 
			
		||||
            result[i] = last_obs
 | 
			
		||||
        else:
 | 
			
		||||
            result[i] = val
 | 
			
		||||
            last_obs = val
 | 
			
		||||
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# left_join_indexer, inner_join_indexer, outer_join_indexer
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# Joins on ordered, unique indices
 | 
			
		||||
 | 
			
		||||
# right might contain non-unique values
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def left_join_indexer_unique(
 | 
			
		||||
    ndarray[numeric_object_t] left,
 | 
			
		||||
    ndarray[numeric_object_t] right
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    Both left and right are strictly monotonic increasing.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, nleft, nright
 | 
			
		||||
        ndarray[intp_t] indexer
 | 
			
		||||
        numeric_object_t rval
 | 
			
		||||
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    nleft = len(left)
 | 
			
		||||
    nright = len(right)
 | 
			
		||||
 | 
			
		||||
    indexer = np.empty(nleft, dtype=np.intp)
 | 
			
		||||
    while True:
 | 
			
		||||
        if i == nleft:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
        if j == nright:
 | 
			
		||||
            indexer[i] = -1
 | 
			
		||||
            i += 1
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        rval = right[j]
 | 
			
		||||
 | 
			
		||||
        while i < nleft - 1 and left[i] == rval:
 | 
			
		||||
            indexer[i] = j
 | 
			
		||||
            i += 1
 | 
			
		||||
 | 
			
		||||
        if left[i] == rval:
 | 
			
		||||
            indexer[i] = j
 | 
			
		||||
            i += 1
 | 
			
		||||
            while i < nleft - 1 and left[i] == rval:
 | 
			
		||||
                indexer[i] = j
 | 
			
		||||
                i += 1
 | 
			
		||||
            j += 1
 | 
			
		||||
        elif left[i] > rval:
 | 
			
		||||
            indexer[i] = -1
 | 
			
		||||
            j += 1
 | 
			
		||||
        else:
 | 
			
		||||
            indexer[i] = -1
 | 
			
		||||
            i += 1
 | 
			
		||||
    return indexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
 | 
			
		||||
    """
 | 
			
		||||
    Two-pass algorithm for monotonic indexes. Handles many-to-one merges.
 | 
			
		||||
 | 
			
		||||
    Both left and right are monotonic increasing, but at least one of them
 | 
			
		||||
    is non-unique (if both were unique we'd use left_join_indexer_unique).
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, nright, nleft, count
 | 
			
		||||
        numeric_object_t lval, rval
 | 
			
		||||
        ndarray[intp_t] lindexer, rindexer
 | 
			
		||||
        ndarray[numeric_object_t] result
 | 
			
		||||
 | 
			
		||||
    nleft = len(left)
 | 
			
		||||
    nright = len(right)
 | 
			
		||||
 | 
			
		||||
    # First pass is to find the size 'count' of our output indexers.
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft > 0:
 | 
			
		||||
        while i < nleft:
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                count += nleft - i
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                # This block is identical across
 | 
			
		||||
                #  left_join_indexer, inner_join_indexer, outer_join_indexer
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                count += 1
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    # do it again now that result size is known
 | 
			
		||||
 | 
			
		||||
    lindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    rindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    result = np.empty(count, dtype=left.dtype)
 | 
			
		||||
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft > 0:
 | 
			
		||||
        while i < nleft:
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                while i < nleft:
 | 
			
		||||
                    lindexer[count] = i
 | 
			
		||||
                    rindexer[count] = -1
 | 
			
		||||
                    result[count] = left[i]
 | 
			
		||||
                    i += 1
 | 
			
		||||
                    count += 1
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                lindexer[count] = i
 | 
			
		||||
                rindexer[count] = j
 | 
			
		||||
                result[count] = lval
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                # i.e. lval not in right; we keep for left_join_indexer
 | 
			
		||||
                lindexer[count] = i
 | 
			
		||||
                rindexer[count] = -1
 | 
			
		||||
                result[count] = lval
 | 
			
		||||
                count += 1
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                # i.e. rval not in left; we discard for left_join_indexer
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    return result, lindexer, rindexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
 | 
			
		||||
    """
 | 
			
		||||
    Two-pass algorithm for monotonic indexes. Handles many-to-one merges.
 | 
			
		||||
 | 
			
		||||
    Both left and right are monotonic increasing but not necessarily unique.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, nright, nleft, count
 | 
			
		||||
        numeric_object_t lval, rval
 | 
			
		||||
        ndarray[intp_t] lindexer, rindexer
 | 
			
		||||
        ndarray[numeric_object_t] result
 | 
			
		||||
 | 
			
		||||
    nleft = len(left)
 | 
			
		||||
    nright = len(right)
 | 
			
		||||
 | 
			
		||||
    # First pass is to find the size 'count' of our output indexers.
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft > 0 and nright > 0:
 | 
			
		||||
        while True:
 | 
			
		||||
            if i == nleft:
 | 
			
		||||
                break
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                # i.e. lval not in right; we discard for inner_indexer
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                # i.e. rval not in left; we discard for inner_indexer
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    # do it again now that result size is known
 | 
			
		||||
 | 
			
		||||
    lindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    rindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    result = np.empty(count, dtype=left.dtype)
 | 
			
		||||
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft > 0 and nright > 0:
 | 
			
		||||
        while True:
 | 
			
		||||
            if i == nleft:
 | 
			
		||||
                break
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                lindexer[count] = i
 | 
			
		||||
                rindexer[count] = j
 | 
			
		||||
                result[count] = lval
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                # i.e. lval not in right; we discard for inner_indexer
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                # i.e. rval not in left; we discard for inner_indexer
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    return result, lindexer, rindexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def outer_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
 | 
			
		||||
    """
 | 
			
		||||
    Both left and right are monotonic increasing but not necessarily unique.
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, j, nright, nleft, count
 | 
			
		||||
        numeric_object_t lval, rval
 | 
			
		||||
        ndarray[intp_t] lindexer, rindexer
 | 
			
		||||
        ndarray[numeric_object_t] result
 | 
			
		||||
 | 
			
		||||
    nleft = len(left)
 | 
			
		||||
    nright = len(right)
 | 
			
		||||
 | 
			
		||||
    # First pass is to find the size 'count' of our output indexers.
 | 
			
		||||
    # count will be length of left plus the number of elements of right not in
 | 
			
		||||
    # left (counting duplicates)
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft == 0:
 | 
			
		||||
        count = nright
 | 
			
		||||
    elif nright == 0:
 | 
			
		||||
        count = nleft
 | 
			
		||||
    else:
 | 
			
		||||
        while True:
 | 
			
		||||
            if i == nleft:
 | 
			
		||||
                count += nright - j
 | 
			
		||||
                break
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                count += nleft - i
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                count += 1
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                count += 1
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    lindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    rindexer = np.empty(count, dtype=np.intp)
 | 
			
		||||
    result = np.empty(count, dtype=left.dtype)
 | 
			
		||||
 | 
			
		||||
    # do it again, but populate the indexers / result
 | 
			
		||||
 | 
			
		||||
    i = 0
 | 
			
		||||
    j = 0
 | 
			
		||||
    count = 0
 | 
			
		||||
    if nleft == 0:
 | 
			
		||||
        for j in range(nright):
 | 
			
		||||
            lindexer[j] = -1
 | 
			
		||||
            rindexer[j] = j
 | 
			
		||||
            result[j] = right[j]
 | 
			
		||||
    elif nright == 0:
 | 
			
		||||
        for i in range(nleft):
 | 
			
		||||
            lindexer[i] = i
 | 
			
		||||
            rindexer[i] = -1
 | 
			
		||||
            result[i] = left[i]
 | 
			
		||||
    else:
 | 
			
		||||
        while True:
 | 
			
		||||
            if i == nleft:
 | 
			
		||||
                while j < nright:
 | 
			
		||||
                    lindexer[count] = -1
 | 
			
		||||
                    rindexer[count] = j
 | 
			
		||||
                    result[count] = right[j]
 | 
			
		||||
                    count += 1
 | 
			
		||||
                    j += 1
 | 
			
		||||
                break
 | 
			
		||||
            if j == nright:
 | 
			
		||||
                while i < nleft:
 | 
			
		||||
                    lindexer[count] = i
 | 
			
		||||
                    rindexer[count] = -1
 | 
			
		||||
                    result[count] = left[i]
 | 
			
		||||
                    count += 1
 | 
			
		||||
                    i += 1
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            lval = left[i]
 | 
			
		||||
            rval = right[j]
 | 
			
		||||
 | 
			
		||||
            if lval == rval:
 | 
			
		||||
                lindexer[count] = i
 | 
			
		||||
                rindexer[count] = j
 | 
			
		||||
                result[count] = lval
 | 
			
		||||
                count += 1
 | 
			
		||||
                if i < nleft - 1:
 | 
			
		||||
                    if j < nright - 1 and right[j + 1] == rval:
 | 
			
		||||
                        j += 1
 | 
			
		||||
                    else:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                        if left[i] != rval:
 | 
			
		||||
                            j += 1
 | 
			
		||||
                elif j < nright - 1:
 | 
			
		||||
                    j += 1
 | 
			
		||||
                    if lval != right[j]:
 | 
			
		||||
                        i += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    # end of the road
 | 
			
		||||
                    break
 | 
			
		||||
            elif lval < rval:
 | 
			
		||||
                # i.e. lval not in right; we keep for outer_join_indexer
 | 
			
		||||
                lindexer[count] = i
 | 
			
		||||
                rindexer[count] = -1
 | 
			
		||||
                result[count] = lval
 | 
			
		||||
                count += 1
 | 
			
		||||
                i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                # i.e. rval not in left; we keep for outer_join_indexer
 | 
			
		||||
                lindexer[count] = -1
 | 
			
		||||
                rindexer[count] = j
 | 
			
		||||
                result[count] = rval
 | 
			
		||||
                count += 1
 | 
			
		||||
                j += 1
 | 
			
		||||
 | 
			
		||||
    return result, lindexer, rindexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
# asof_join_by
 | 
			
		||||
# ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
from pandas._libs.hashtable cimport (
 | 
			
		||||
    HashTable,
 | 
			
		||||
    Int64HashTable,
 | 
			
		||||
    PyObjectHashTable,
 | 
			
		||||
    UInt64HashTable,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
ctypedef fused by_t:
 | 
			
		||||
    object
 | 
			
		||||
    int64_t
 | 
			
		||||
    uint64_t
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def asof_join_backward_on_X_by_Y(ndarray[numeric_t] left_values,
 | 
			
		||||
                                 ndarray[numeric_t] right_values,
 | 
			
		||||
                                 ndarray[by_t] left_by_values,
 | 
			
		||||
                                 ndarray[by_t] right_by_values,
 | 
			
		||||
                                 bint allow_exact_matches=True,
 | 
			
		||||
                                 tolerance=None,
 | 
			
		||||
                                 bint use_hashtable=True):
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
 | 
			
		||||
        ndarray[intp_t] left_indexer, right_indexer
 | 
			
		||||
        bint has_tolerance = False
 | 
			
		||||
        numeric_t tolerance_ = 0
 | 
			
		||||
        numeric_t diff = 0
 | 
			
		||||
        HashTable hash_table
 | 
			
		||||
        by_t by_value
 | 
			
		||||
 | 
			
		||||
    # if we are using tolerance, set our objects
 | 
			
		||||
    if tolerance is not None:
 | 
			
		||||
        has_tolerance = True
 | 
			
		||||
        tolerance_ = tolerance
 | 
			
		||||
 | 
			
		||||
    left_size = len(left_values)
 | 
			
		||||
    right_size = len(right_values)
 | 
			
		||||
 | 
			
		||||
    left_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    if use_hashtable:
 | 
			
		||||
        if by_t is object:
 | 
			
		||||
            hash_table = PyObjectHashTable(right_size)
 | 
			
		||||
        elif by_t is int64_t:
 | 
			
		||||
            hash_table = Int64HashTable(right_size)
 | 
			
		||||
        elif by_t is uint64_t:
 | 
			
		||||
            hash_table = UInt64HashTable(right_size)
 | 
			
		||||
 | 
			
		||||
    right_pos = 0
 | 
			
		||||
    for left_pos in range(left_size):
 | 
			
		||||
        # restart right_pos if it went negative in a previous iteration
 | 
			
		||||
        if right_pos < 0:
 | 
			
		||||
            right_pos = 0
 | 
			
		||||
 | 
			
		||||
        # find last position in right whose value is less than left's
 | 
			
		||||
        if allow_exact_matches:
 | 
			
		||||
            while (right_pos < right_size and
 | 
			
		||||
                   right_values[right_pos] <= left_values[left_pos]):
 | 
			
		||||
                if use_hashtable:
 | 
			
		||||
                    hash_table.set_item(right_by_values[right_pos], right_pos)
 | 
			
		||||
                right_pos += 1
 | 
			
		||||
        else:
 | 
			
		||||
            while (right_pos < right_size and
 | 
			
		||||
                   right_values[right_pos] < left_values[left_pos]):
 | 
			
		||||
                if use_hashtable:
 | 
			
		||||
                    hash_table.set_item(right_by_values[right_pos], right_pos)
 | 
			
		||||
                right_pos += 1
 | 
			
		||||
        right_pos -= 1
 | 
			
		||||
 | 
			
		||||
        # save positions as the desired index
 | 
			
		||||
        if use_hashtable:
 | 
			
		||||
            by_value = left_by_values[left_pos]
 | 
			
		||||
            found_right_pos = (hash_table.get_item(by_value)
 | 
			
		||||
                               if by_value in hash_table else -1)
 | 
			
		||||
        else:
 | 
			
		||||
            found_right_pos = right_pos
 | 
			
		||||
 | 
			
		||||
        left_indexer[left_pos] = left_pos
 | 
			
		||||
        right_indexer[left_pos] = found_right_pos
 | 
			
		||||
 | 
			
		||||
        # if needed, verify that tolerance is met
 | 
			
		||||
        if has_tolerance and found_right_pos != -1:
 | 
			
		||||
            diff = left_values[left_pos] - right_values[found_right_pos]
 | 
			
		||||
            if diff > tolerance_:
 | 
			
		||||
                right_indexer[left_pos] = -1
 | 
			
		||||
 | 
			
		||||
    return left_indexer, right_indexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def asof_join_forward_on_X_by_Y(ndarray[numeric_t] left_values,
 | 
			
		||||
                                ndarray[numeric_t] right_values,
 | 
			
		||||
                                ndarray[by_t] left_by_values,
 | 
			
		||||
                                ndarray[by_t] right_by_values,
 | 
			
		||||
                                bint allow_exact_matches=1,
 | 
			
		||||
                                tolerance=None,
 | 
			
		||||
                                bint use_hashtable=True):
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos
 | 
			
		||||
        ndarray[intp_t] left_indexer, right_indexer
 | 
			
		||||
        bint has_tolerance = False
 | 
			
		||||
        numeric_t tolerance_ = 0
 | 
			
		||||
        numeric_t diff = 0
 | 
			
		||||
        HashTable hash_table
 | 
			
		||||
        by_t by_value
 | 
			
		||||
 | 
			
		||||
    # if we are using tolerance, set our objects
 | 
			
		||||
    if tolerance is not None:
 | 
			
		||||
        has_tolerance = True
 | 
			
		||||
        tolerance_ = tolerance
 | 
			
		||||
 | 
			
		||||
    left_size = len(left_values)
 | 
			
		||||
    right_size = len(right_values)
 | 
			
		||||
 | 
			
		||||
    left_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    if use_hashtable:
 | 
			
		||||
        if by_t is object:
 | 
			
		||||
            hash_table = PyObjectHashTable(right_size)
 | 
			
		||||
        elif by_t is int64_t:
 | 
			
		||||
            hash_table = Int64HashTable(right_size)
 | 
			
		||||
        elif by_t is uint64_t:
 | 
			
		||||
            hash_table = UInt64HashTable(right_size)
 | 
			
		||||
 | 
			
		||||
    right_pos = right_size - 1
 | 
			
		||||
    for left_pos in range(left_size - 1, -1, -1):
 | 
			
		||||
        # restart right_pos if it went over in a previous iteration
 | 
			
		||||
        if right_pos == right_size:
 | 
			
		||||
            right_pos = right_size - 1
 | 
			
		||||
 | 
			
		||||
        # find first position in right whose value is greater than left's
 | 
			
		||||
        if allow_exact_matches:
 | 
			
		||||
            while (right_pos >= 0 and
 | 
			
		||||
                   right_values[right_pos] >= left_values[left_pos]):
 | 
			
		||||
                if use_hashtable:
 | 
			
		||||
                    hash_table.set_item(right_by_values[right_pos], right_pos)
 | 
			
		||||
                right_pos -= 1
 | 
			
		||||
        else:
 | 
			
		||||
            while (right_pos >= 0 and
 | 
			
		||||
                   right_values[right_pos] > left_values[left_pos]):
 | 
			
		||||
                if use_hashtable:
 | 
			
		||||
                    hash_table.set_item(right_by_values[right_pos], right_pos)
 | 
			
		||||
                right_pos -= 1
 | 
			
		||||
        right_pos += 1
 | 
			
		||||
 | 
			
		||||
        # save positions as the desired index
 | 
			
		||||
        if use_hashtable:
 | 
			
		||||
            by_value = left_by_values[left_pos]
 | 
			
		||||
            found_right_pos = (hash_table.get_item(by_value)
 | 
			
		||||
                               if by_value in hash_table else -1)
 | 
			
		||||
        else:
 | 
			
		||||
            found_right_pos = (right_pos
 | 
			
		||||
                               if right_pos != right_size else -1)
 | 
			
		||||
 | 
			
		||||
        left_indexer[left_pos] = left_pos
 | 
			
		||||
        right_indexer[left_pos] = found_right_pos
 | 
			
		||||
 | 
			
		||||
        # if needed, verify that tolerance is met
 | 
			
		||||
        if has_tolerance and found_right_pos != -1:
 | 
			
		||||
            diff = right_values[found_right_pos] - left_values[left_pos]
 | 
			
		||||
            if diff > tolerance_:
 | 
			
		||||
                right_indexer[left_pos] = -1
 | 
			
		||||
 | 
			
		||||
    return left_indexer, right_indexer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def asof_join_nearest_on_X_by_Y(ndarray[numeric_t] left_values,
 | 
			
		||||
                                ndarray[numeric_t] right_values,
 | 
			
		||||
                                ndarray[by_t] left_by_values,
 | 
			
		||||
                                ndarray[by_t] right_by_values,
 | 
			
		||||
                                bint allow_exact_matches=True,
 | 
			
		||||
                                tolerance=None,
 | 
			
		||||
                                bint use_hashtable=True):
 | 
			
		||||
 | 
			
		||||
    cdef:
 | 
			
		||||
        ndarray[intp_t] bli, bri, fli, fri
 | 
			
		||||
 | 
			
		||||
        ndarray[intp_t] left_indexer, right_indexer
 | 
			
		||||
        Py_ssize_t left_size, i
 | 
			
		||||
        numeric_t bdiff, fdiff
 | 
			
		||||
 | 
			
		||||
    # search both forward and backward
 | 
			
		||||
    bli, bri = asof_join_backward_on_X_by_Y(
 | 
			
		||||
        left_values,
 | 
			
		||||
        right_values,
 | 
			
		||||
        left_by_values,
 | 
			
		||||
        right_by_values,
 | 
			
		||||
        allow_exact_matches,
 | 
			
		||||
        tolerance,
 | 
			
		||||
        use_hashtable
 | 
			
		||||
    )
 | 
			
		||||
    fli, fri = asof_join_forward_on_X_by_Y(
 | 
			
		||||
        left_values,
 | 
			
		||||
        right_values,
 | 
			
		||||
        left_by_values,
 | 
			
		||||
        right_by_values,
 | 
			
		||||
        allow_exact_matches,
 | 
			
		||||
        tolerance,
 | 
			
		||||
        use_hashtable
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # choose the smaller timestamp
 | 
			
		||||
    left_size = len(left_values)
 | 
			
		||||
    left_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
    right_indexer = np.empty(left_size, dtype=np.intp)
 | 
			
		||||
 | 
			
		||||
    for i in range(len(bri)):
 | 
			
		||||
        # choose timestamp from right with smaller difference
 | 
			
		||||
        if bri[i] != -1 and fri[i] != -1:
 | 
			
		||||
            bdiff = left_values[bli[i]] - right_values[bri[i]]
 | 
			
		||||
            fdiff = right_values[fri[i]] - left_values[fli[i]]
 | 
			
		||||
            right_indexer[i] = bri[i] if bdiff <= fdiff else fri[i]
 | 
			
		||||
        else:
 | 
			
		||||
            right_indexer[i] = bri[i] if bri[i] != -1 else fri[i]
 | 
			
		||||
        left_indexer[i] = bli[i]
 | 
			
		||||
 | 
			
		||||
    return left_indexer, right_indexer
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										23
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/json.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/json.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,23 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Any,
 | 
			
		||||
    Callable,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
def ujson_dumps(
 | 
			
		||||
    obj: Any,
 | 
			
		||||
    ensure_ascii: bool = ...,
 | 
			
		||||
    double_precision: int = ...,
 | 
			
		||||
    indent: int = ...,
 | 
			
		||||
    orient: str = ...,
 | 
			
		||||
    date_unit: str = ...,
 | 
			
		||||
    iso_dates: bool = ...,
 | 
			
		||||
    default_handler: None
 | 
			
		||||
    | Callable[[Any], str | float | bool | list | dict | None] = ...,
 | 
			
		||||
) -> str: ...
 | 
			
		||||
def ujson_loads(
 | 
			
		||||
    s: str,
 | 
			
		||||
    precise_float: bool = ...,
 | 
			
		||||
    numpy: bool = ...,
 | 
			
		||||
    dtype: None = ...,
 | 
			
		||||
    labelled: bool = ...,
 | 
			
		||||
) -> Any: ...
 | 
			
		||||
							
								
								
									
										129
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/khash.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										129
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/khash.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,129 @@
 | 
			
		||||
from cpython.object cimport PyObject
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    complex64_t,
 | 
			
		||||
    complex128_t,
 | 
			
		||||
    float32_t,
 | 
			
		||||
    float64_t,
 | 
			
		||||
    int8_t,
 | 
			
		||||
    int16_t,
 | 
			
		||||
    int32_t,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
    uint16_t,
 | 
			
		||||
    uint32_t,
 | 
			
		||||
    uint64_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef extern from "pandas/vendored/klib/khash_python.h":
 | 
			
		||||
    const int KHASH_TRACE_DOMAIN
 | 
			
		||||
 | 
			
		||||
    ctypedef uint32_t khuint_t
 | 
			
		||||
    ctypedef khuint_t khiter_t
 | 
			
		||||
 | 
			
		||||
    ctypedef struct khcomplex128_t:
 | 
			
		||||
        double real
 | 
			
		||||
        double imag
 | 
			
		||||
 | 
			
		||||
    bint are_equivalent_khcomplex128_t \
 | 
			
		||||
        "kh_complex_hash_equal" (khcomplex128_t a, khcomplex128_t b) nogil
 | 
			
		||||
 | 
			
		||||
    ctypedef struct khcomplex64_t:
 | 
			
		||||
        float real
 | 
			
		||||
        float imag
 | 
			
		||||
 | 
			
		||||
    bint are_equivalent_khcomplex64_t \
 | 
			
		||||
        "kh_complex_hash_equal" (khcomplex64_t a, khcomplex64_t b) nogil
 | 
			
		||||
 | 
			
		||||
    bint are_equivalent_float64_t \
 | 
			
		||||
        "kh_floats_hash_equal" (float64_t a, float64_t b) nogil
 | 
			
		||||
 | 
			
		||||
    bint are_equivalent_float32_t \
 | 
			
		||||
        "kh_floats_hash_equal" (float32_t a, float32_t b) nogil
 | 
			
		||||
 | 
			
		||||
    uint32_t kh_python_hash_func(object key)
 | 
			
		||||
    bint kh_python_hash_equal(object a, object b)
 | 
			
		||||
 | 
			
		||||
    ctypedef struct kh_pymap_t:
 | 
			
		||||
        khuint_t n_buckets, size, n_occupied, upper_bound
 | 
			
		||||
        uint32_t *flags
 | 
			
		||||
        PyObject **keys
 | 
			
		||||
        size_t *vals
 | 
			
		||||
 | 
			
		||||
    kh_pymap_t* kh_init_pymap()
 | 
			
		||||
    void kh_destroy_pymap(kh_pymap_t*)
 | 
			
		||||
    void kh_clear_pymap(kh_pymap_t*)
 | 
			
		||||
    khuint_t kh_get_pymap(kh_pymap_t*, PyObject*)
 | 
			
		||||
    void kh_resize_pymap(kh_pymap_t*, khuint_t)
 | 
			
		||||
    khuint_t kh_put_pymap(kh_pymap_t*, PyObject*, int*)
 | 
			
		||||
    void kh_del_pymap(kh_pymap_t*, khuint_t)
 | 
			
		||||
 | 
			
		||||
    bint kh_exist_pymap(kh_pymap_t*, khiter_t)
 | 
			
		||||
 | 
			
		||||
    ctypedef struct kh_pyset_t:
 | 
			
		||||
        khuint_t n_buckets, size, n_occupied, upper_bound
 | 
			
		||||
        uint32_t *flags
 | 
			
		||||
        PyObject **keys
 | 
			
		||||
        size_t *vals
 | 
			
		||||
 | 
			
		||||
    kh_pyset_t* kh_init_pyset()
 | 
			
		||||
    void kh_destroy_pyset(kh_pyset_t*)
 | 
			
		||||
    void kh_clear_pyset(kh_pyset_t*)
 | 
			
		||||
    khuint_t kh_get_pyset(kh_pyset_t*, PyObject*)
 | 
			
		||||
    void kh_resize_pyset(kh_pyset_t*, khuint_t)
 | 
			
		||||
    khuint_t kh_put_pyset(kh_pyset_t*, PyObject*, int*)
 | 
			
		||||
    void kh_del_pyset(kh_pyset_t*, khuint_t)
 | 
			
		||||
 | 
			
		||||
    bint kh_exist_pyset(kh_pyset_t*, khiter_t)
 | 
			
		||||
 | 
			
		||||
    ctypedef char* kh_cstr_t
 | 
			
		||||
 | 
			
		||||
    ctypedef struct kh_str_t:
 | 
			
		||||
        khuint_t n_buckets, size, n_occupied, upper_bound
 | 
			
		||||
        uint32_t *flags
 | 
			
		||||
        kh_cstr_t *keys
 | 
			
		||||
        size_t *vals
 | 
			
		||||
 | 
			
		||||
    kh_str_t* kh_init_str() nogil
 | 
			
		||||
    void kh_destroy_str(kh_str_t*) nogil
 | 
			
		||||
    void kh_clear_str(kh_str_t*) nogil
 | 
			
		||||
    khuint_t kh_get_str(kh_str_t*, kh_cstr_t) nogil
 | 
			
		||||
    void kh_resize_str(kh_str_t*, khuint_t) nogil
 | 
			
		||||
    khuint_t kh_put_str(kh_str_t*, kh_cstr_t, int*) nogil
 | 
			
		||||
    void kh_del_str(kh_str_t*, khuint_t) nogil
 | 
			
		||||
 | 
			
		||||
    bint kh_exist_str(kh_str_t*, khiter_t) nogil
 | 
			
		||||
 | 
			
		||||
    ctypedef struct kh_str_starts_t:
 | 
			
		||||
        kh_str_t *table
 | 
			
		||||
        int starts[256]
 | 
			
		||||
 | 
			
		||||
    kh_str_starts_t* kh_init_str_starts() nogil
 | 
			
		||||
    khuint_t kh_put_str_starts_item(kh_str_starts_t* table, char* key,
 | 
			
		||||
                                    int* ret) nogil
 | 
			
		||||
    khuint_t kh_get_str_starts_item(kh_str_starts_t* table, char* key) nogil
 | 
			
		||||
    void kh_destroy_str_starts(kh_str_starts_t*) nogil
 | 
			
		||||
    void kh_resize_str_starts(kh_str_starts_t*, khuint_t) nogil
 | 
			
		||||
 | 
			
		||||
    # sweep factorize
 | 
			
		||||
 | 
			
		||||
    ctypedef struct kh_strbox_t:
 | 
			
		||||
        khuint_t n_buckets, size, n_occupied, upper_bound
 | 
			
		||||
        uint32_t *flags
 | 
			
		||||
        kh_cstr_t *keys
 | 
			
		||||
        PyObject **vals
 | 
			
		||||
 | 
			
		||||
    kh_strbox_t* kh_init_strbox() nogil
 | 
			
		||||
    void kh_destroy_strbox(kh_strbox_t*) nogil
 | 
			
		||||
    void kh_clear_strbox(kh_strbox_t*) nogil
 | 
			
		||||
    khuint_t kh_get_strbox(kh_strbox_t*, kh_cstr_t) nogil
 | 
			
		||||
    void kh_resize_strbox(kh_strbox_t*, khuint_t) nogil
 | 
			
		||||
    khuint_t kh_put_strbox(kh_strbox_t*, kh_cstr_t, int*) nogil
 | 
			
		||||
    void kh_del_strbox(kh_strbox_t*, khuint_t) nogil
 | 
			
		||||
 | 
			
		||||
    bint kh_exist_strbox(kh_strbox_t*, khiter_t) nogil
 | 
			
		||||
 | 
			
		||||
    khuint_t kh_needed_n_buckets(khuint_t element_n) nogil
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
include "khash_for_primitive_helper.pxi"
 | 
			
		||||
@@ -0,0 +1,44 @@
 | 
			
		||||
"""
 | 
			
		||||
Template for wrapping khash-tables for each primitive `dtype`
 | 
			
		||||
 | 
			
		||||
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
{{py:
 | 
			
		||||
 | 
			
		||||
# name, c_type
 | 
			
		||||
primitive_types = [('int64', 'int64_t'),
 | 
			
		||||
                   ('uint64', 'uint64_t'),
 | 
			
		||||
                   ('float64', 'float64_t'),
 | 
			
		||||
                   ('int32', 'int32_t'),
 | 
			
		||||
                   ('uint32', 'uint32_t'),
 | 
			
		||||
                   ('float32', 'float32_t'),
 | 
			
		||||
                   ('int16', 'int16_t'),
 | 
			
		||||
                   ('uint16', 'uint16_t'),
 | 
			
		||||
                   ('int8', 'int8_t'),
 | 
			
		||||
                   ('uint8', 'uint8_t'),
 | 
			
		||||
                   ('complex64', 'khcomplex64_t'),
 | 
			
		||||
                   ('complex128', 'khcomplex128_t'),
 | 
			
		||||
                  ]
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
{{for name, c_type in primitive_types}}
 | 
			
		||||
 | 
			
		||||
cdef extern from "pandas/vendored/klib/khash_python.h":
 | 
			
		||||
    ctypedef struct kh_{{name}}_t:
 | 
			
		||||
        khuint_t n_buckets, size, n_occupied, upper_bound
 | 
			
		||||
        uint32_t *flags
 | 
			
		||||
        {{c_type}} *keys
 | 
			
		||||
        size_t *vals
 | 
			
		||||
 | 
			
		||||
    kh_{{name}}_t* kh_init_{{name}}() nogil
 | 
			
		||||
    void kh_destroy_{{name}}(kh_{{name}}_t*) nogil
 | 
			
		||||
    void kh_clear_{{name}}(kh_{{name}}_t*) nogil
 | 
			
		||||
    khuint_t kh_get_{{name}}(kh_{{name}}_t*, {{c_type}}) nogil
 | 
			
		||||
    void kh_resize_{{name}}(kh_{{name}}_t*, khuint_t) nogil
 | 
			
		||||
    khuint_t kh_put_{{name}}(kh_{{name}}_t*, {{c_type}}, int*) nogil
 | 
			
		||||
    void kh_del_{{name}}(kh_{{name}}_t*, khuint_t) nogil
 | 
			
		||||
 | 
			
		||||
    bint kh_exist_{{name}}(kh_{{name}}_t*, khiter_t) nogil
 | 
			
		||||
 | 
			
		||||
{{endfor}}
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										6
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
from numpy cimport ndarray
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint c_is_list_like(object, bint) except -1
 | 
			
		||||
 | 
			
		||||
cpdef ndarray eq_NA_compat(ndarray[object] arr, object key)
 | 
			
		||||
							
								
								
									
										207
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										207
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,207 @@
 | 
			
		||||
# TODO(npdtypes): Many types specified here can be made more specific/accurate;
 | 
			
		||||
#  the more specific versions are specified in comments
 | 
			
		||||
from decimal import Decimal
 | 
			
		||||
from typing import (
 | 
			
		||||
    Any,
 | 
			
		||||
    Callable,
 | 
			
		||||
    Final,
 | 
			
		||||
    Generator,
 | 
			
		||||
    Hashable,
 | 
			
		||||
    Literal,
 | 
			
		||||
    TypeAlias,
 | 
			
		||||
    overload,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._libs.interval import Interval
 | 
			
		||||
from pandas._libs.tslibs import Period
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    ArrayLike,
 | 
			
		||||
    DtypeObj,
 | 
			
		||||
    TypeGuard,
 | 
			
		||||
    npt,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# placeholder until we can specify np.ndarray[object, ndim=2]
 | 
			
		||||
ndarray_obj_2d = np.ndarray
 | 
			
		||||
 | 
			
		||||
from enum import Enum
 | 
			
		||||
 | 
			
		||||
class _NoDefault(Enum):
 | 
			
		||||
    no_default = ...
 | 
			
		||||
 | 
			
		||||
no_default: Final = _NoDefault.no_default
 | 
			
		||||
NoDefault: TypeAlias = Literal[_NoDefault.no_default]
 | 
			
		||||
 | 
			
		||||
i8max: int
 | 
			
		||||
u8max: int
 | 
			
		||||
 | 
			
		||||
def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ...
 | 
			
		||||
def item_from_zerodim(val: object) -> object: ...
 | 
			
		||||
def infer_dtype(value: object, skipna: bool = ...) -> str: ...
 | 
			
		||||
def is_iterator(obj: object) -> bool: ...
 | 
			
		||||
def is_scalar(val: object) -> bool: ...
 | 
			
		||||
def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
 | 
			
		||||
def is_pyarrow_array(obj: object) -> bool: ...
 | 
			
		||||
def is_period(val: object) -> TypeGuard[Period]: ...
 | 
			
		||||
def is_interval(val: object) -> TypeGuard[Interval]: ...
 | 
			
		||||
def is_decimal(val: object) -> TypeGuard[Decimal]: ...
 | 
			
		||||
def is_complex(val: object) -> TypeGuard[complex]: ...
 | 
			
		||||
def is_bool(val: object) -> TypeGuard[bool | np.bool_]: ...
 | 
			
		||||
def is_integer(val: object) -> TypeGuard[int | np.integer]: ...
 | 
			
		||||
def is_int_or_none(obj) -> bool: ...
 | 
			
		||||
def is_float(val: object) -> TypeGuard[float]: ...
 | 
			
		||||
def is_interval_array(values: np.ndarray) -> bool: ...
 | 
			
		||||
def is_datetime64_array(values: np.ndarray) -> bool: ...
 | 
			
		||||
def is_timedelta_or_timedelta64_array(values: np.ndarray) -> bool: ...
 | 
			
		||||
def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ...
 | 
			
		||||
def is_time_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_date_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_datetime_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_string_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_float_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
 | 
			
		||||
def fast_multiget(mapping: dict, keys: np.ndarray, default=...) -> np.ndarray: ...
 | 
			
		||||
def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
 | 
			
		||||
def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
 | 
			
		||||
def map_infer(
 | 
			
		||||
    arr: np.ndarray,
 | 
			
		||||
    f: Callable[[Any], Any],
 | 
			
		||||
    convert: bool = ...,
 | 
			
		||||
    ignore_na: bool = ...,
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_objects(
 | 
			
		||||
    objects: npt.NDArray[np.object_],
 | 
			
		||||
    *,
 | 
			
		||||
    try_float: bool = ...,
 | 
			
		||||
    safe: bool = ...,
 | 
			
		||||
    convert_numeric: bool = ...,
 | 
			
		||||
    convert_non_numeric: Literal[False] = ...,
 | 
			
		||||
    convert_to_nullable_dtype: Literal[False] = ...,
 | 
			
		||||
    dtype_if_all_nat: DtypeObj | None = ...,
 | 
			
		||||
) -> npt.NDArray[np.object_ | np.number]: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_objects(
 | 
			
		||||
    objects: npt.NDArray[np.object_],
 | 
			
		||||
    *,
 | 
			
		||||
    try_float: bool = ...,
 | 
			
		||||
    safe: bool = ...,
 | 
			
		||||
    convert_numeric: bool = ...,
 | 
			
		||||
    convert_non_numeric: bool = ...,
 | 
			
		||||
    convert_to_nullable_dtype: Literal[True] = ...,
 | 
			
		||||
    dtype_if_all_nat: DtypeObj | None = ...,
 | 
			
		||||
) -> ArrayLike: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_objects(
 | 
			
		||||
    objects: npt.NDArray[np.object_],
 | 
			
		||||
    *,
 | 
			
		||||
    try_float: bool = ...,
 | 
			
		||||
    safe: bool = ...,
 | 
			
		||||
    convert_numeric: bool = ...,
 | 
			
		||||
    convert_non_numeric: bool = ...,
 | 
			
		||||
    convert_to_nullable_dtype: bool = ...,
 | 
			
		||||
    dtype_if_all_nat: DtypeObj | None = ...,
 | 
			
		||||
) -> ArrayLike: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_numeric(
 | 
			
		||||
    values: npt.NDArray[np.object_],
 | 
			
		||||
    na_values: set,
 | 
			
		||||
    convert_empty: bool = ...,
 | 
			
		||||
    coerce_numeric: bool = ...,
 | 
			
		||||
    convert_to_masked_nullable: Literal[False] = ...,
 | 
			
		||||
) -> tuple[np.ndarray, None]: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_numeric(
 | 
			
		||||
    values: npt.NDArray[np.object_],
 | 
			
		||||
    na_values: set,
 | 
			
		||||
    convert_empty: bool = ...,
 | 
			
		||||
    coerce_numeric: bool = ...,
 | 
			
		||||
    *,
 | 
			
		||||
    convert_to_masked_nullable: Literal[True],
 | 
			
		||||
) -> tuple[np.ndarray, np.ndarray]: ...
 | 
			
		||||
 | 
			
		||||
# TODO: restrict `arr`?
 | 
			
		||||
def ensure_string_array(
 | 
			
		||||
    arr,
 | 
			
		||||
    na_value: object = ...,
 | 
			
		||||
    convert_na_value: bool = ...,
 | 
			
		||||
    copy: bool = ...,
 | 
			
		||||
    skipna: bool = ...,
 | 
			
		||||
) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
def convert_nans_to_NA(
 | 
			
		||||
    arr: npt.NDArray[np.object_],
 | 
			
		||||
) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ...
 | 
			
		||||
 | 
			
		||||
# TODO: can we be more specific about rows?
 | 
			
		||||
def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ...
 | 
			
		||||
def tuples_to_object_array(
 | 
			
		||||
    tuples: npt.NDArray[np.object_],
 | 
			
		||||
) -> ndarray_obj_2d: ...
 | 
			
		||||
 | 
			
		||||
# TODO: can we be more specific about rows?
 | 
			
		||||
def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ...
 | 
			
		||||
def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ...
 | 
			
		||||
def maybe_booleans_to_slice(
 | 
			
		||||
    mask: npt.NDArray[np.uint8],
 | 
			
		||||
) -> slice | npt.NDArray[np.uint8]: ...
 | 
			
		||||
def maybe_indices_to_slice(
 | 
			
		||||
    indices: npt.NDArray[np.intp],
 | 
			
		||||
    max_len: int,
 | 
			
		||||
) -> slice | npt.NDArray[np.intp]: ...
 | 
			
		||||
def is_all_arraylike(obj: list) -> bool: ...
 | 
			
		||||
 | 
			
		||||
# -----------------------------------------------------------------
 | 
			
		||||
# Functions which in reality take memoryviews
 | 
			
		||||
 | 
			
		||||
def memory_usage_of_objects(arr: np.ndarray) -> int: ...  # object[:]  # np.int64
 | 
			
		||||
def map_infer_mask(
 | 
			
		||||
    arr: np.ndarray,
 | 
			
		||||
    f: Callable[[Any], Any],
 | 
			
		||||
    mask: np.ndarray,  # const uint8_t[:]
 | 
			
		||||
    convert: bool = ...,
 | 
			
		||||
    na_value: Any = ...,
 | 
			
		||||
    dtype: np.dtype = ...,
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
def indices_fast(
 | 
			
		||||
    index: npt.NDArray[np.intp],
 | 
			
		||||
    labels: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    keys: list,
 | 
			
		||||
    sorted_labels: list[npt.NDArray[np.int64]],
 | 
			
		||||
) -> dict[Hashable, npt.NDArray[np.intp]]: ...
 | 
			
		||||
def generate_slices(
 | 
			
		||||
    labels: np.ndarray, ngroups: int  # const intp_t[:]
 | 
			
		||||
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
 | 
			
		||||
def count_level_2d(
 | 
			
		||||
    mask: np.ndarray,  # ndarray[uint8_t, ndim=2, cast=True],
 | 
			
		||||
    labels: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    max_bin: int,
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[np.int64, ndim=2]
 | 
			
		||||
def get_level_sorter(
 | 
			
		||||
    label: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    starts: np.ndarray,  # const intp_t[:]
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[np.intp, ndim=1]
 | 
			
		||||
def generate_bins_dt64(
 | 
			
		||||
    values: npt.NDArray[np.int64],
 | 
			
		||||
    binner: np.ndarray,  # const int64_t[:]
 | 
			
		||||
    closed: object = ...,
 | 
			
		||||
    hasnans: bool = ...,
 | 
			
		||||
) -> np.ndarray: ...  # np.ndarray[np.int64, ndim=1]
 | 
			
		||||
def array_equivalent_object(
 | 
			
		||||
    left: npt.NDArray[np.object_],
 | 
			
		||||
    right: npt.NDArray[np.object_],
 | 
			
		||||
) -> bool: ...
 | 
			
		||||
def has_infs(arr: np.ndarray) -> bool: ...  # const floating[:]
 | 
			
		||||
def has_only_ints_or_nan(arr: np.ndarray) -> bool: ...  # const floating[:]
 | 
			
		||||
def get_reverse_indexer(
 | 
			
		||||
    indexer: np.ndarray,  # const intp_t[:]
 | 
			
		||||
    length: int,
 | 
			
		||||
) -> npt.NDArray[np.intp]: ...
 | 
			
		||||
def is_bool_list(obj: list) -> bool: ...
 | 
			
		||||
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
 | 
			
		||||
def is_range_indexer(
 | 
			
		||||
    left: np.ndarray, n: int  # np.ndarray[np.int64, ndim=1]
 | 
			
		||||
) -> bool: ...
 | 
			
		||||
							
								
								
									
										3169
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3169
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/lib.pyx
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										120
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/meson.build
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/meson.build
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,120 @@
 | 
			
		||||
_algos_take_helper = custom_target('algos_take_helper_pxi',
 | 
			
		||||
    output: 'algos_take_helper.pxi',
 | 
			
		||||
    input: 'algos_take_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_algos_common_helper = custom_target('algos_common_helper_pxi',
 | 
			
		||||
    output: 'algos_common_helper.pxi',
 | 
			
		||||
    input: 'algos_common_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_khash_primitive_helper = custom_target('khash_primitive_helper_pxi',
 | 
			
		||||
    output: 'khash_for_primitive_helper.pxi',
 | 
			
		||||
    input: 'khash_for_primitive_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_hashtable_class_helper = custom_target('hashtable_class_helper_pxi',
 | 
			
		||||
    output: 'hashtable_class_helper.pxi',
 | 
			
		||||
    input: 'hashtable_class_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_hashtable_func_helper = custom_target('hashtable_func_helper_pxi',
 | 
			
		||||
    output: 'hashtable_func_helper.pxi',
 | 
			
		||||
    input: 'hashtable_func_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_index_class_helper = custom_target('index_class_helper_pxi',
 | 
			
		||||
    output: 'index_class_helper.pxi',
 | 
			
		||||
    input: 'index_class_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_sparse_op_helper = custom_target('sparse_op_helper_pxi',
 | 
			
		||||
    output: 'sparse_op_helper.pxi',
 | 
			
		||||
    input: 'sparse_op_helper.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_intervaltree_helper = custom_target('intervaltree_helper_pxi',
 | 
			
		||||
    output: 'intervaltree.pxi',
 | 
			
		||||
    input: 'intervaltree.pxi.in',
 | 
			
		||||
    command: [
 | 
			
		||||
        py, tempita, '@INPUT@', '-o', '@OUTDIR@'
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper)
 | 
			
		||||
 | 
			
		||||
subdir('tslibs')
 | 
			
		||||
 | 
			
		||||
libs_sources = {
 | 
			
		||||
    # Dict of extension name -> dict of {sources, include_dirs, and deps}
 | 
			
		||||
    # numpy include dir is implicitly included
 | 
			
		||||
    'algos': {'sources': ['algos.pyx', _algos_common_helper, _algos_take_helper, _khash_primitive_helper]},
 | 
			
		||||
    'arrays': {'sources': ['arrays.pyx']},
 | 
			
		||||
    'groupby': {'sources': ['groupby.pyx']},
 | 
			
		||||
    'hashing': {'sources': ['hashing.pyx']},
 | 
			
		||||
    'hashtable': {'sources': ['hashtable.pyx', _khash_primitive_helper, _hashtable_class_helper, _hashtable_func_helper]},
 | 
			
		||||
    'index': {'sources': ['index.pyx', _index_class_helper]},
 | 
			
		||||
    'indexing': {'sources': ['indexing.pyx']},
 | 
			
		||||
    'internals': {'sources': ['internals.pyx']},
 | 
			
		||||
    'interval': {'sources': ['interval.pyx', _intervaltree_helper]},
 | 
			
		||||
    'join': {'sources': ['join.pyx', _khash_primitive_helper],
 | 
			
		||||
             'deps': _khash_primitive_helper_dep},
 | 
			
		||||
    'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']},
 | 
			
		||||
    'missing': {'sources': ['missing.pyx']},
 | 
			
		||||
    'pandas_datetime': {'sources': ['src/vendored/numpy/datetime/np_datetime.c',
 | 
			
		||||
                                    'src/vendored/numpy/datetime/np_datetime_strings.c',
 | 
			
		||||
                                    'src/datetime/date_conversions.c',
 | 
			
		||||
                                    'src/datetime/pd_datetime.c']},
 | 
			
		||||
    'pandas_parser': {'sources': ['src/parser/tokenizer.c',
 | 
			
		||||
                                  'src/parser/io.c',
 | 
			
		||||
                                  'src/parser/pd_parser.c']},
 | 
			
		||||
    'parsers': {'sources': ['parsers.pyx', 'src/parser/tokenizer.c', 'src/parser/io.c'],
 | 
			
		||||
                'deps': _khash_primitive_helper_dep},
 | 
			
		||||
    'json': {'sources': ['src/vendored/ujson/python/ujson.c',
 | 
			
		||||
                         'src/vendored/ujson/python/objToJSON.c',
 | 
			
		||||
                         'src/vendored/ujson/python/JSONtoObj.c',
 | 
			
		||||
                         'src/vendored/ujson/lib/ultrajsonenc.c',
 | 
			
		||||
                         'src/vendored/ujson/lib/ultrajsondec.c']},
 | 
			
		||||
    'ops': {'sources': ['ops.pyx']},
 | 
			
		||||
    'ops_dispatch': {'sources': ['ops_dispatch.pyx']},
 | 
			
		||||
    'properties': {'sources': ['properties.pyx']},
 | 
			
		||||
    'reshape': {'sources': ['reshape.pyx']},
 | 
			
		||||
    'sas': {'sources': ['sas.pyx']},
 | 
			
		||||
    'byteswap': {'sources': ['byteswap.pyx']},
 | 
			
		||||
    'sparse': {'sources': ['sparse.pyx', _sparse_op_helper]},
 | 
			
		||||
    'tslib': {'sources': ['tslib.pyx']},
 | 
			
		||||
    'testing': {'sources': ['testing.pyx']},
 | 
			
		||||
    'writers': {'sources': ['writers.pyx']}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
foreach ext_name, ext_dict : libs_sources
 | 
			
		||||
    py.extension_module(
 | 
			
		||||
        ext_name,
 | 
			
		||||
        ext_dict.get('sources'),
 | 
			
		||||
        cython_args: ['--include-dir', meson.current_build_dir(), '-X always_allow_keywords=true'],
 | 
			
		||||
        include_directories: [inc_np, inc_pd],
 | 
			
		||||
        dependencies: ext_dict.get('deps', ''),
 | 
			
		||||
        subdir: 'pandas/_libs',
 | 
			
		||||
        install: true
 | 
			
		||||
    )
 | 
			
		||||
endforeach
 | 
			
		||||
 | 
			
		||||
py.install_sources('__init__.py',
 | 
			
		||||
                    pure: false,
 | 
			
		||||
                    subdir: 'pandas/_libs')
 | 
			
		||||
 | 
			
		||||
subdir('window')
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										20
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pxd
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pxd
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef bint is_matching_na(object left, object right, bint nan_matches_none=*)
 | 
			
		||||
cpdef bint check_na_tuples_nonequal(object left, object right)
 | 
			
		||||
 | 
			
		||||
cpdef bint checknull(object val, bint inf_as_na=*)
 | 
			
		||||
cpdef ndarray[uint8_t] isnaobj(ndarray arr, bint inf_as_na=*)
 | 
			
		||||
 | 
			
		||||
cdef bint is_null_datetime64(v)
 | 
			
		||||
cdef bint is_null_timedelta64(v)
 | 
			
		||||
cdef bint checknull_with_nat_and_na(object obj)
 | 
			
		||||
 | 
			
		||||
cdef class C_NAType:
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
cdef C_NAType C_NA
 | 
			
		||||
							
								
								
									
										17
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
from numpy import typing as npt
 | 
			
		||||
 | 
			
		||||
class NAType:
 | 
			
		||||
    def __new__(cls, *args, **kwargs): ...
 | 
			
		||||
 | 
			
		||||
NA: NAType
 | 
			
		||||
 | 
			
		||||
def is_matching_na(
 | 
			
		||||
    left: object, right: object, nan_matches_none: bool = ...
 | 
			
		||||
) -> bool: ...
 | 
			
		||||
def isposinf_scalar(val: object) -> bool: ...
 | 
			
		||||
def isneginf_scalar(val: object) -> bool: ...
 | 
			
		||||
def checknull(val: object, inf_as_na: bool = ...) -> bool: ...
 | 
			
		||||
def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def is_float_nan(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
							
								
								
									
										544
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										544
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/missing.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,544 @@
 | 
			
		||||
from decimal import Decimal
 | 
			
		||||
import numbers
 | 
			
		||||
from sys import maxsize
 | 
			
		||||
 | 
			
		||||
cimport cython
 | 
			
		||||
from cpython.datetime cimport (
 | 
			
		||||
    date,
 | 
			
		||||
    time,
 | 
			
		||||
    timedelta,
 | 
			
		||||
)
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
cimport numpy as cnp
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    flatiter,
 | 
			
		||||
    float64_t,
 | 
			
		||||
    int64_t,
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
cnp.import_array()
 | 
			
		||||
 | 
			
		||||
from pandas._libs cimport util
 | 
			
		||||
from pandas._libs.tslibs.nattype cimport (
 | 
			
		||||
    c_NaT as NaT,
 | 
			
		||||
    checknull_with_nat,
 | 
			
		||||
    is_dt64nat,
 | 
			
		||||
    is_td64nat,
 | 
			
		||||
)
 | 
			
		||||
from pandas._libs.tslibs.np_datetime cimport (
 | 
			
		||||
    get_datetime64_unit,
 | 
			
		||||
    get_datetime64_value,
 | 
			
		||||
    get_timedelta64_value,
 | 
			
		||||
    import_pandas_datetime,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import_pandas_datetime()
 | 
			
		||||
 | 
			
		||||
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
 | 
			
		||||
 | 
			
		||||
cdef:
 | 
			
		||||
    float64_t INF = <float64_t>np.inf
 | 
			
		||||
    float64_t NEGINF = -INF
 | 
			
		||||
 | 
			
		||||
    int64_t NPY_NAT = util.get_nat()
 | 
			
		||||
 | 
			
		||||
    bint is_32bit = maxsize <= 2 ** 32
 | 
			
		||||
 | 
			
		||||
    type cDecimal = Decimal  # for faster isinstance checks
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef bint check_na_tuples_nonequal(object left, object right):
 | 
			
		||||
    """
 | 
			
		||||
    When we have NA in one of the tuples but not the other we have to check here,
 | 
			
		||||
    because our regular checks fail before with ambiguous boolean value.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    left: Any
 | 
			
		||||
    right: Any
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    True if we are dealing with tuples that have NA on one side and non NA on
 | 
			
		||||
    the other side.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if not isinstance(left, tuple) or not isinstance(right, tuple):
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    if len(left) != len(right):
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    for left_element, right_element in zip(left, right):
 | 
			
		||||
        if left_element is C_NA and right_element is not C_NA:
 | 
			
		||||
            return True
 | 
			
		||||
        elif right_element is C_NA and left_element is not C_NA:
 | 
			
		||||
            return True
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef bint is_matching_na(object left, object right, bint nan_matches_none=False):
 | 
			
		||||
    """
 | 
			
		||||
    Check if two scalars are both NA of matching types.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    left : Any
 | 
			
		||||
    right : Any
 | 
			
		||||
    nan_matches_none : bool, default False
 | 
			
		||||
        For backwards compatibility, consider NaN as matching None.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    bool
 | 
			
		||||
    """
 | 
			
		||||
    if left is None:
 | 
			
		||||
        if nan_matches_none and util.is_nan(right):
 | 
			
		||||
            return True
 | 
			
		||||
        return right is None
 | 
			
		||||
    elif left is C_NA:
 | 
			
		||||
        return right is C_NA
 | 
			
		||||
    elif left is NaT:
 | 
			
		||||
        return right is NaT
 | 
			
		||||
    elif util.is_float_object(left):
 | 
			
		||||
        if nan_matches_none and right is None and util.is_nan(left):
 | 
			
		||||
            return True
 | 
			
		||||
        return (
 | 
			
		||||
            util.is_nan(left)
 | 
			
		||||
            and util.is_float_object(right)
 | 
			
		||||
            and util.is_nan(right)
 | 
			
		||||
        )
 | 
			
		||||
    elif util.is_complex_object(left):
 | 
			
		||||
        return (
 | 
			
		||||
            util.is_nan(left)
 | 
			
		||||
            and util.is_complex_object(right)
 | 
			
		||||
            and util.is_nan(right)
 | 
			
		||||
        )
 | 
			
		||||
    elif util.is_datetime64_object(left):
 | 
			
		||||
        return (
 | 
			
		||||
            get_datetime64_value(left) == NPY_NAT
 | 
			
		||||
            and util.is_datetime64_object(right)
 | 
			
		||||
            and get_datetime64_value(right) == NPY_NAT
 | 
			
		||||
            and get_datetime64_unit(left) == get_datetime64_unit(right)
 | 
			
		||||
        )
 | 
			
		||||
    elif util.is_timedelta64_object(left):
 | 
			
		||||
        return (
 | 
			
		||||
            get_timedelta64_value(left) == NPY_NAT
 | 
			
		||||
            and util.is_timedelta64_object(right)
 | 
			
		||||
            and get_timedelta64_value(right) == NPY_NAT
 | 
			
		||||
            and get_datetime64_unit(left) == get_datetime64_unit(right)
 | 
			
		||||
        )
 | 
			
		||||
    elif is_decimal_na(left):
 | 
			
		||||
        return is_decimal_na(right)
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpdef bint checknull(object val, bint inf_as_na=False):
 | 
			
		||||
    """
 | 
			
		||||
    Return boolean describing of the input is NA-like, defined here as any
 | 
			
		||||
    of:
 | 
			
		||||
     - None
 | 
			
		||||
     - nan
 | 
			
		||||
     - NaT
 | 
			
		||||
     - np.datetime64 representation of NaT
 | 
			
		||||
     - np.timedelta64 representation of NaT
 | 
			
		||||
     - NA
 | 
			
		||||
     - Decimal("NaN")
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    val : object
 | 
			
		||||
    inf_as_na : bool, default False
 | 
			
		||||
        Whether to treat INF and -INF as NA values.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    bool
 | 
			
		||||
    """
 | 
			
		||||
    if val is None or val is NaT or val is C_NA:
 | 
			
		||||
        return True
 | 
			
		||||
    elif util.is_float_object(val) or util.is_complex_object(val):
 | 
			
		||||
        if val != val:
 | 
			
		||||
            return True
 | 
			
		||||
        elif inf_as_na:
 | 
			
		||||
            return val == INF or val == NEGINF
 | 
			
		||||
        return False
 | 
			
		||||
    elif util.is_timedelta64_object(val):
 | 
			
		||||
        return get_timedelta64_value(val) == NPY_NAT
 | 
			
		||||
    elif util.is_datetime64_object(val):
 | 
			
		||||
        return get_datetime64_value(val) == NPY_NAT
 | 
			
		||||
    else:
 | 
			
		||||
        return is_decimal_na(val)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint is_decimal_na(object val):
 | 
			
		||||
    """
 | 
			
		||||
    Is this a decimal.Decimal object Decimal("NAN").
 | 
			
		||||
    """
 | 
			
		||||
    return isinstance(val, cDecimal) and val != val
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
cpdef ndarray[uint8_t] isnaobj(ndarray arr, bint inf_as_na=False):
 | 
			
		||||
    """
 | 
			
		||||
    Return boolean mask denoting which elements of a 1-D array are na-like,
 | 
			
		||||
    according to the criteria defined in `checknull`:
 | 
			
		||||
     - None
 | 
			
		||||
     - nan
 | 
			
		||||
     - NaT
 | 
			
		||||
     - np.datetime64 representation of NaT
 | 
			
		||||
     - np.timedelta64 representation of NaT
 | 
			
		||||
     - NA
 | 
			
		||||
     - Decimal("NaN")
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    arr : ndarray
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : ndarray (dtype=np.bool_)
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = arr.size
 | 
			
		||||
        object val
 | 
			
		||||
        bint is_null
 | 
			
		||||
        ndarray result = np.empty((<object>arr).shape, dtype=np.uint8)
 | 
			
		||||
        flatiter it = cnp.PyArray_IterNew(arr)
 | 
			
		||||
        flatiter it2 = cnp.PyArray_IterNew(result)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        # The PyArray_GETITEM and PyArray_ITER_NEXT are faster
 | 
			
		||||
        #  equivalents to `val = values[i]`
 | 
			
		||||
        val = cnp.PyArray_GETITEM(arr, cnp.PyArray_ITER_DATA(it))
 | 
			
		||||
        cnp.PyArray_ITER_NEXT(it)
 | 
			
		||||
        is_null = checknull(val, inf_as_na=inf_as_na)
 | 
			
		||||
        # Dereference pointer (set value)
 | 
			
		||||
        (<uint8_t *>(cnp.PyArray_ITER_DATA(it2)))[0] = <uint8_t>is_null
 | 
			
		||||
        cnp.PyArray_ITER_NEXT(it2)
 | 
			
		||||
    return result.view(np.bool_)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def isposinf_scalar(val: object) -> bool:
 | 
			
		||||
    return util.is_float_object(val) and val == INF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def isneginf_scalar(val: object) -> bool:
 | 
			
		||||
    return util.is_float_object(val) and val == NEGINF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint is_null_datetime64(v):
 | 
			
		||||
    # determine if we have a null for a datetime (or integer versions),
 | 
			
		||||
    # excluding np.timedelta64('nat')
 | 
			
		||||
    if checknull_with_nat(v) or is_dt64nat(v):
 | 
			
		||||
        return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint is_null_timedelta64(v):
 | 
			
		||||
    # determine if we have a null for a timedelta (or integer versions),
 | 
			
		||||
    # excluding np.datetime64('nat')
 | 
			
		||||
    if checknull_with_nat(v) or is_td64nat(v):
 | 
			
		||||
        return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef bint checknull_with_nat_and_na(object obj):
 | 
			
		||||
    # See GH#32214
 | 
			
		||||
    return checknull_with_nat(obj) or obj is C_NA
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def is_float_nan(values: ndarray) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    True for elements which correspond to a float nan
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray[bool]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        ndarray[uint8_t] result
 | 
			
		||||
        Py_ssize_t i, N
 | 
			
		||||
        object val
 | 
			
		||||
 | 
			
		||||
    N = len(values)
 | 
			
		||||
    result = np.zeros(N, dtype=np.uint8)
 | 
			
		||||
 | 
			
		||||
    for i in range(N):
 | 
			
		||||
        val = values[i]
 | 
			
		||||
        if util.is_nan(val):
 | 
			
		||||
            result[i] = True
 | 
			
		||||
    return result.view(bool)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def is_numeric_na(values: ndarray) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Check for NA values consistent with IntegerArray/FloatingArray.
 | 
			
		||||
 | 
			
		||||
    Similar to a vectorized is_valid_na_for_dtype restricted to numeric dtypes.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray[bool]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        ndarray[uint8_t] result
 | 
			
		||||
        Py_ssize_t i, N
 | 
			
		||||
        object val
 | 
			
		||||
 | 
			
		||||
    N = len(values)
 | 
			
		||||
    result = np.zeros(N, dtype=np.uint8)
 | 
			
		||||
 | 
			
		||||
    for i in range(N):
 | 
			
		||||
        val = values[i]
 | 
			
		||||
        if checknull(val):
 | 
			
		||||
            if val is None or val is C_NA or util.is_nan(val) or is_decimal_na(val):
 | 
			
		||||
                result[i] = True
 | 
			
		||||
            else:
 | 
			
		||||
                raise TypeError(f"'values' contains non-numeric NA {val}")
 | 
			
		||||
    return result.view(bool)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -----------------------------------------------------------------------------
 | 
			
		||||
# Implementation of NA singleton
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _create_binary_propagating_op(name, is_divmod=False):
 | 
			
		||||
    is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"]
 | 
			
		||||
 | 
			
		||||
    def method(self, other):
 | 
			
		||||
        if (other is C_NA or isinstance(other, (str, bytes))
 | 
			
		||||
                or isinstance(other, (numbers.Number, np.bool_))
 | 
			
		||||
                or util.is_array(other) and not other.shape):
 | 
			
		||||
            # Need the other.shape clause to handle NumPy scalars,
 | 
			
		||||
            # since we do a setitem on `out` below, which
 | 
			
		||||
            # won't work for NumPy scalars.
 | 
			
		||||
            if is_divmod:
 | 
			
		||||
                return NA, NA
 | 
			
		||||
            else:
 | 
			
		||||
                return NA
 | 
			
		||||
 | 
			
		||||
        elif util.is_array(other):
 | 
			
		||||
            out = np.empty(other.shape, dtype=object)
 | 
			
		||||
            out[:] = NA
 | 
			
		||||
 | 
			
		||||
            if is_divmod:
 | 
			
		||||
                return out, out.copy()
 | 
			
		||||
            else:
 | 
			
		||||
                return out
 | 
			
		||||
 | 
			
		||||
        elif is_cmp and isinstance(other, (date, time, timedelta)):
 | 
			
		||||
            return NA
 | 
			
		||||
 | 
			
		||||
        elif isinstance(other, date):
 | 
			
		||||
            if name in ["__sub__", "__rsub__"]:
 | 
			
		||||
                return NA
 | 
			
		||||
 | 
			
		||||
        elif isinstance(other, timedelta):
 | 
			
		||||
            if name in ["__sub__", "__rsub__", "__add__", "__radd__"]:
 | 
			
		||||
                return NA
 | 
			
		||||
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    method.__name__ = name
 | 
			
		||||
    return method
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _create_unary_propagating_op(name: str):
 | 
			
		||||
    def method(self):
 | 
			
		||||
        return NA
 | 
			
		||||
 | 
			
		||||
    method.__name__ = name
 | 
			
		||||
    return method
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class C_NAType:
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NAType(C_NAType):
 | 
			
		||||
    """
 | 
			
		||||
    NA ("not available") missing value indicator.
 | 
			
		||||
 | 
			
		||||
    .. warning::
 | 
			
		||||
 | 
			
		||||
       Experimental: the behaviour of NA can still change without warning.
 | 
			
		||||
 | 
			
		||||
    The NA singleton is a missing value indicator defined by pandas. It is
 | 
			
		||||
    used in certain new extension dtypes (currently the "string" dtype).
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
    >>> pd.NA
 | 
			
		||||
    <NA>
 | 
			
		||||
 | 
			
		||||
    >>> True | pd.NA
 | 
			
		||||
    True
 | 
			
		||||
 | 
			
		||||
    >>> True & pd.NA
 | 
			
		||||
    <NA>
 | 
			
		||||
 | 
			
		||||
    >>> pd.NA != pd.NA
 | 
			
		||||
    <NA>
 | 
			
		||||
 | 
			
		||||
    >>> pd.NA == pd.NA
 | 
			
		||||
    <NA>
 | 
			
		||||
 | 
			
		||||
    >>> True | pd.NA
 | 
			
		||||
    True
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    _instance = None
 | 
			
		||||
 | 
			
		||||
    def __new__(cls, *args, **kwargs):
 | 
			
		||||
        if NAType._instance is None:
 | 
			
		||||
            NAType._instance = C_NAType.__new__(cls, *args, **kwargs)
 | 
			
		||||
        return NAType._instance
 | 
			
		||||
 | 
			
		||||
    def __repr__(self) -> str:
 | 
			
		||||
        return "<NA>"
 | 
			
		||||
 | 
			
		||||
    def __format__(self, format_spec) -> str:
 | 
			
		||||
        try:
 | 
			
		||||
            return self.__repr__().__format__(format_spec)
 | 
			
		||||
        except ValueError:
 | 
			
		||||
            return self.__repr__()
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
        raise TypeError("boolean value of NA is ambiguous")
 | 
			
		||||
 | 
			
		||||
    def __hash__(self):
 | 
			
		||||
        # GH 30013: Ensure hash is large enough to avoid hash collisions with integers
 | 
			
		||||
        exponent = 31 if is_32bit else 61
 | 
			
		||||
        return 2 ** exponent - 1
 | 
			
		||||
 | 
			
		||||
    def __reduce__(self):
 | 
			
		||||
        return "NA"
 | 
			
		||||
 | 
			
		||||
    # Binary arithmetic and comparison ops -> propagate
 | 
			
		||||
 | 
			
		||||
    __add__ = _create_binary_propagating_op("__add__")
 | 
			
		||||
    __radd__ = _create_binary_propagating_op("__radd__")
 | 
			
		||||
    __sub__ = _create_binary_propagating_op("__sub__")
 | 
			
		||||
    __rsub__ = _create_binary_propagating_op("__rsub__")
 | 
			
		||||
    __mul__ = _create_binary_propagating_op("__mul__")
 | 
			
		||||
    __rmul__ = _create_binary_propagating_op("__rmul__")
 | 
			
		||||
    __matmul__ = _create_binary_propagating_op("__matmul__")
 | 
			
		||||
    __rmatmul__ = _create_binary_propagating_op("__rmatmul__")
 | 
			
		||||
    __truediv__ = _create_binary_propagating_op("__truediv__")
 | 
			
		||||
    __rtruediv__ = _create_binary_propagating_op("__rtruediv__")
 | 
			
		||||
    __floordiv__ = _create_binary_propagating_op("__floordiv__")
 | 
			
		||||
    __rfloordiv__ = _create_binary_propagating_op("__rfloordiv__")
 | 
			
		||||
    __mod__ = _create_binary_propagating_op("__mod__")
 | 
			
		||||
    __rmod__ = _create_binary_propagating_op("__rmod__")
 | 
			
		||||
    __divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True)
 | 
			
		||||
    __rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True)
 | 
			
		||||
    # __lshift__ and __rshift__ are not implemented
 | 
			
		||||
 | 
			
		||||
    __eq__ = _create_binary_propagating_op("__eq__")
 | 
			
		||||
    __ne__ = _create_binary_propagating_op("__ne__")
 | 
			
		||||
    __le__ = _create_binary_propagating_op("__le__")
 | 
			
		||||
    __lt__ = _create_binary_propagating_op("__lt__")
 | 
			
		||||
    __gt__ = _create_binary_propagating_op("__gt__")
 | 
			
		||||
    __ge__ = _create_binary_propagating_op("__ge__")
 | 
			
		||||
 | 
			
		||||
    # Unary ops
 | 
			
		||||
 | 
			
		||||
    __neg__ = _create_unary_propagating_op("__neg__")
 | 
			
		||||
    __pos__ = _create_unary_propagating_op("__pos__")
 | 
			
		||||
    __abs__ = _create_unary_propagating_op("__abs__")
 | 
			
		||||
    __invert__ = _create_unary_propagating_op("__invert__")
 | 
			
		||||
 | 
			
		||||
    # pow has special
 | 
			
		||||
    def __pow__(self, other):
 | 
			
		||||
        if other is C_NA:
 | 
			
		||||
            return NA
 | 
			
		||||
        elif isinstance(other, (numbers.Number, np.bool_)):
 | 
			
		||||
            if other == 0:
 | 
			
		||||
                # returning positive is correct for +/- 0.
 | 
			
		||||
                return type(other)(1)
 | 
			
		||||
            else:
 | 
			
		||||
                return NA
 | 
			
		||||
        elif util.is_array(other):
 | 
			
		||||
            return np.where(other == 0, other.dtype.type(1), NA)
 | 
			
		||||
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    def __rpow__(self, other):
 | 
			
		||||
        if other is C_NA:
 | 
			
		||||
            return NA
 | 
			
		||||
        elif isinstance(other, (numbers.Number, np.bool_)):
 | 
			
		||||
            if other == 1:
 | 
			
		||||
                return other
 | 
			
		||||
            else:
 | 
			
		||||
                return NA
 | 
			
		||||
        elif util.is_array(other):
 | 
			
		||||
            return np.where(other == 1, other, NA)
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    # Logical ops using Kleene logic
 | 
			
		||||
 | 
			
		||||
    def __and__(self, other):
 | 
			
		||||
        if other is False:
 | 
			
		||||
            return False
 | 
			
		||||
        elif other is True or other is C_NA:
 | 
			
		||||
            return NA
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    __rand__ = __and__
 | 
			
		||||
 | 
			
		||||
    def __or__(self, other):
 | 
			
		||||
        if other is True:
 | 
			
		||||
            return True
 | 
			
		||||
        elif other is False or other is C_NA:
 | 
			
		||||
            return NA
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    __ror__ = __or__
 | 
			
		||||
 | 
			
		||||
    def __xor__(self, other):
 | 
			
		||||
        if other is False or other is True or other is C_NA:
 | 
			
		||||
            return NA
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    __rxor__ = __xor__
 | 
			
		||||
 | 
			
		||||
    __array_priority__ = 1000
 | 
			
		||||
    _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool_)
 | 
			
		||||
 | 
			
		||||
    def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
 | 
			
		||||
        types = self._HANDLED_TYPES + (NAType,)
 | 
			
		||||
        for x in inputs:
 | 
			
		||||
            if not isinstance(x, types):
 | 
			
		||||
                return NotImplemented
 | 
			
		||||
 | 
			
		||||
        if method != "__call__":
 | 
			
		||||
            raise ValueError(f"ufunc method '{method}' not supported for NA")
 | 
			
		||||
        result = maybe_dispatch_ufunc_to_dunder_op(
 | 
			
		||||
            self, ufunc, method, *inputs, **kwargs
 | 
			
		||||
        )
 | 
			
		||||
        if result is NotImplemented:
 | 
			
		||||
            # For a NumPy ufunc that's not a binop, like np.logaddexp
 | 
			
		||||
            index = [i for i, x in enumerate(inputs) if x is NA][0]
 | 
			
		||||
            result = np.broadcast_arrays(*inputs)[index]
 | 
			
		||||
            if result.ndim == 0:
 | 
			
		||||
                result = result.item()
 | 
			
		||||
            if ufunc.nout > 1:
 | 
			
		||||
                result = (NA,) * ufunc.nout
 | 
			
		||||
 | 
			
		||||
        return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
C_NA = NAType()   # C-visible
 | 
			
		||||
NA = C_NA         # Python-visible
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										51
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/ops.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/ops.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,51 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Any,
 | 
			
		||||
    Callable,
 | 
			
		||||
    Iterable,
 | 
			
		||||
    Literal,
 | 
			
		||||
    TypeAlias,
 | 
			
		||||
    overload,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import npt
 | 
			
		||||
 | 
			
		||||
_BinOp: TypeAlias = Callable[[Any, Any], Any]
 | 
			
		||||
_BoolOp: TypeAlias = Callable[[Any, Any], bool]
 | 
			
		||||
 | 
			
		||||
def scalar_compare(
 | 
			
		||||
    values: np.ndarray,  # object[:]
 | 
			
		||||
    val: object,
 | 
			
		||||
    op: _BoolOp,  # {operator.eq, operator.ne, ...}
 | 
			
		||||
) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def vec_compare(
 | 
			
		||||
    left: npt.NDArray[np.object_],
 | 
			
		||||
    right: npt.NDArray[np.object_],
 | 
			
		||||
    op: _BoolOp,  # {operator.eq, operator.ne, ...}
 | 
			
		||||
) -> npt.NDArray[np.bool_]: ...
 | 
			
		||||
def scalar_binop(
 | 
			
		||||
    values: np.ndarray,  # object[:]
 | 
			
		||||
    val: object,
 | 
			
		||||
    op: _BinOp,  # binary operator
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
def vec_binop(
 | 
			
		||||
    left: np.ndarray,  # object[:]
 | 
			
		||||
    right: np.ndarray,  # object[:]
 | 
			
		||||
    op: _BinOp,  # binary operator
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_bool(
 | 
			
		||||
    arr: npt.NDArray[np.object_],
 | 
			
		||||
    true_values: Iterable = ...,
 | 
			
		||||
    false_values: Iterable = ...,
 | 
			
		||||
    convert_to_masked_nullable: Literal[False] = ...,
 | 
			
		||||
) -> tuple[np.ndarray, None]: ...
 | 
			
		||||
@overload
 | 
			
		||||
def maybe_convert_bool(
 | 
			
		||||
    arr: npt.NDArray[np.object_],
 | 
			
		||||
    true_values: Iterable = ...,
 | 
			
		||||
    false_values: Iterable = ...,
 | 
			
		||||
    *,
 | 
			
		||||
    convert_to_masked_nullable: Literal[True],
 | 
			
		||||
) -> tuple[np.ndarray, np.ndarray]: ...
 | 
			
		||||
							
								
								
									
										310
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/ops.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										310
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/ops.pyx
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,310 @@
 | 
			
		||||
import operator
 | 
			
		||||
 | 
			
		||||
cimport cython
 | 
			
		||||
from cpython.object cimport (
 | 
			
		||||
    Py_EQ,
 | 
			
		||||
    Py_GE,
 | 
			
		||||
    Py_GT,
 | 
			
		||||
    Py_LE,
 | 
			
		||||
    Py_LT,
 | 
			
		||||
    Py_NE,
 | 
			
		||||
    PyObject_RichCompareBool,
 | 
			
		||||
)
 | 
			
		||||
from cython cimport Py_ssize_t
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from numpy cimport (
 | 
			
		||||
    import_array,
 | 
			
		||||
    ndarray,
 | 
			
		||||
    uint8_t,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import_array()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from pandas._libs.missing cimport checknull
 | 
			
		||||
from pandas._libs.util cimport is_nan
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def scalar_compare(object[:] values, object val, object op) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Compare each element of `values` array with the scalar `val`, with
 | 
			
		||||
    the comparison operation described by `op`.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    values : ndarray[object]
 | 
			
		||||
    val : object
 | 
			
		||||
    op : {operator.eq, operator.ne,
 | 
			
		||||
          operator.le, operator.lt,
 | 
			
		||||
          operator.ge, operator.gt}
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : ndarray[bool]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = len(values)
 | 
			
		||||
        ndarray[uint8_t, cast=True] result
 | 
			
		||||
        bint isnull_val
 | 
			
		||||
        int flag
 | 
			
		||||
        object x
 | 
			
		||||
 | 
			
		||||
    if op is operator.lt:
 | 
			
		||||
        flag = Py_LT
 | 
			
		||||
    elif op is operator.le:
 | 
			
		||||
        flag = Py_LE
 | 
			
		||||
    elif op is operator.gt:
 | 
			
		||||
        flag = Py_GT
 | 
			
		||||
    elif op is operator.ge:
 | 
			
		||||
        flag = Py_GE
 | 
			
		||||
    elif op is operator.eq:
 | 
			
		||||
        flag = Py_EQ
 | 
			
		||||
    elif op is operator.ne:
 | 
			
		||||
        flag = Py_NE
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError("Unrecognized operator")
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=bool).view(np.uint8)
 | 
			
		||||
    isnull_val = checknull(val)
 | 
			
		||||
 | 
			
		||||
    if flag == Py_NE:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            x = values[i]
 | 
			
		||||
            if checknull(x):
 | 
			
		||||
                result[i] = True
 | 
			
		||||
            elif isnull_val:
 | 
			
		||||
                result[i] = True
 | 
			
		||||
            else:
 | 
			
		||||
                try:
 | 
			
		||||
                    result[i] = PyObject_RichCompareBool(x, val, flag)
 | 
			
		||||
                except TypeError:
 | 
			
		||||
                    result[i] = True
 | 
			
		||||
    elif flag == Py_EQ:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            x = values[i]
 | 
			
		||||
            if checknull(x):
 | 
			
		||||
                result[i] = False
 | 
			
		||||
            elif isnull_val:
 | 
			
		||||
                result[i] = False
 | 
			
		||||
            else:
 | 
			
		||||
                try:
 | 
			
		||||
                    result[i] = PyObject_RichCompareBool(x, val, flag)
 | 
			
		||||
                except TypeError:
 | 
			
		||||
                    result[i] = False
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            x = values[i]
 | 
			
		||||
            if checknull(x):
 | 
			
		||||
                result[i] = False
 | 
			
		||||
            elif isnull_val:
 | 
			
		||||
                result[i] = False
 | 
			
		||||
            else:
 | 
			
		||||
                result[i] = PyObject_RichCompareBool(x, val, flag)
 | 
			
		||||
 | 
			
		||||
    return result.view(bool)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def vec_compare(ndarray[object] left, ndarray[object] right, object op) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Compare the elements of `left` with the elements of `right` pointwise,
 | 
			
		||||
    with the comparison operation described by `op`.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    left : ndarray[object]
 | 
			
		||||
    right : ndarray[object]
 | 
			
		||||
    op : {operator.eq, operator.ne,
 | 
			
		||||
          operator.le, operator.lt,
 | 
			
		||||
          operator.ge, operator.gt}
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : ndarray[bool]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = len(left)
 | 
			
		||||
        ndarray[uint8_t, cast=True] result
 | 
			
		||||
        int flag
 | 
			
		||||
 | 
			
		||||
    if n != <Py_ssize_t>len(right):
 | 
			
		||||
        raise ValueError(f"Arrays were different lengths: {n} vs {len(right)}")
 | 
			
		||||
 | 
			
		||||
    if op is operator.lt:
 | 
			
		||||
        flag = Py_LT
 | 
			
		||||
    elif op is operator.le:
 | 
			
		||||
        flag = Py_LE
 | 
			
		||||
    elif op is operator.gt:
 | 
			
		||||
        flag = Py_GT
 | 
			
		||||
    elif op is operator.ge:
 | 
			
		||||
        flag = Py_GE
 | 
			
		||||
    elif op is operator.eq:
 | 
			
		||||
        flag = Py_EQ
 | 
			
		||||
    elif op is operator.ne:
 | 
			
		||||
        flag = Py_NE
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError("Unrecognized operator")
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=bool).view(np.uint8)
 | 
			
		||||
 | 
			
		||||
    if flag == Py_NE:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            x = left[i]
 | 
			
		||||
            y = right[i]
 | 
			
		||||
 | 
			
		||||
            if checknull(x) or checknull(y):
 | 
			
		||||
                result[i] = True
 | 
			
		||||
            else:
 | 
			
		||||
                result[i] = PyObject_RichCompareBool(x, y, flag)
 | 
			
		||||
    else:
 | 
			
		||||
        for i in range(n):
 | 
			
		||||
            x = left[i]
 | 
			
		||||
            y = right[i]
 | 
			
		||||
 | 
			
		||||
            if checknull(x) or checknull(y):
 | 
			
		||||
                result[i] = False
 | 
			
		||||
            else:
 | 
			
		||||
                result[i] = PyObject_RichCompareBool(x, y, flag)
 | 
			
		||||
 | 
			
		||||
    return result.view(bool)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def scalar_binop(object[:] values, object val, object op) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Apply the given binary operator `op` between each element of the array
 | 
			
		||||
    `values` and the scalar `val`.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    values : ndarray[object]
 | 
			
		||||
    val : object
 | 
			
		||||
    op : binary operator
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : ndarray[object]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = len(values)
 | 
			
		||||
        object[::1] result
 | 
			
		||||
        object x
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=object)
 | 
			
		||||
    if val is None or is_nan(val):
 | 
			
		||||
        result[:] = val
 | 
			
		||||
        return result.base  # `.base` to access underlying np.ndarray
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        x = values[i]
 | 
			
		||||
        if x is None or is_nan(x):
 | 
			
		||||
            result[i] = x
 | 
			
		||||
        else:
 | 
			
		||||
            result[i] = op(x, val)
 | 
			
		||||
 | 
			
		||||
    return maybe_convert_bool(result.base)[0]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cython.wraparound(False)
 | 
			
		||||
@cython.boundscheck(False)
 | 
			
		||||
def vec_binop(object[:] left, object[:] right, object op) -> ndarray:
 | 
			
		||||
    """
 | 
			
		||||
    Apply the given binary operator `op` pointwise to the elements of
 | 
			
		||||
    arrays `left` and `right`.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    left : ndarray[object]
 | 
			
		||||
    right : ndarray[object]
 | 
			
		||||
    op : binary operator
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : ndarray[object]
 | 
			
		||||
    """
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n = len(left)
 | 
			
		||||
        object[::1] result
 | 
			
		||||
 | 
			
		||||
    if n != <Py_ssize_t>len(right):
 | 
			
		||||
        raise ValueError(f"Arrays were different lengths: {n} vs {len(right)}")
 | 
			
		||||
 | 
			
		||||
    result = np.empty(n, dtype=object)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        x = left[i]
 | 
			
		||||
        y = right[i]
 | 
			
		||||
        try:
 | 
			
		||||
            result[i] = op(x, y)
 | 
			
		||||
        except TypeError:
 | 
			
		||||
            if x is None or is_nan(x):
 | 
			
		||||
                result[i] = x
 | 
			
		||||
            elif y is None or is_nan(y):
 | 
			
		||||
                result[i] = y
 | 
			
		||||
            else:
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
    return maybe_convert_bool(result.base)[0]  # `.base` to access np.ndarray
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def maybe_convert_bool(ndarray[object] arr,
 | 
			
		||||
                       true_values=None,
 | 
			
		||||
                       false_values=None,
 | 
			
		||||
                       convert_to_masked_nullable=False
 | 
			
		||||
                       ) -> tuple[np.ndarray, np.ndarray | None]:
 | 
			
		||||
    cdef:
 | 
			
		||||
        Py_ssize_t i, n
 | 
			
		||||
        ndarray[uint8_t] result
 | 
			
		||||
        ndarray[uint8_t] mask
 | 
			
		||||
        object val
 | 
			
		||||
        set true_vals, false_vals
 | 
			
		||||
        bint has_na = False
 | 
			
		||||
 | 
			
		||||
    n = len(arr)
 | 
			
		||||
    result = np.empty(n, dtype=np.uint8)
 | 
			
		||||
    mask = np.zeros(n, dtype=np.uint8)
 | 
			
		||||
    # the defaults
 | 
			
		||||
    true_vals = {"True", "TRUE", "true"}
 | 
			
		||||
    false_vals = {"False", "FALSE", "false"}
 | 
			
		||||
 | 
			
		||||
    if true_values is not None:
 | 
			
		||||
        true_vals = true_vals | set(true_values)
 | 
			
		||||
 | 
			
		||||
    if false_values is not None:
 | 
			
		||||
        false_vals = false_vals | set(false_values)
 | 
			
		||||
 | 
			
		||||
    for i in range(n):
 | 
			
		||||
        val = arr[i]
 | 
			
		||||
 | 
			
		||||
        if isinstance(val, bool):
 | 
			
		||||
            if val is True:
 | 
			
		||||
                result[i] = 1
 | 
			
		||||
            else:
 | 
			
		||||
                result[i] = 0
 | 
			
		||||
        elif val in true_vals:
 | 
			
		||||
            result[i] = 1
 | 
			
		||||
        elif val in false_vals:
 | 
			
		||||
            result[i] = 0
 | 
			
		||||
        elif is_nan(val) or val is None:
 | 
			
		||||
            mask[i] = 1
 | 
			
		||||
            result[i] = 0  # Value here doesn't matter, will be replaced w/ nan
 | 
			
		||||
            has_na = True
 | 
			
		||||
        else:
 | 
			
		||||
            return (arr, None)
 | 
			
		||||
 | 
			
		||||
    if has_na:
 | 
			
		||||
        if convert_to_masked_nullable:
 | 
			
		||||
            return (result.view(np.bool_), mask.view(np.bool_))
 | 
			
		||||
        else:
 | 
			
		||||
            arr = result.view(np.bool_).astype(object)
 | 
			
		||||
            np.putmask(arr, mask, np.nan)
 | 
			
		||||
            return (arr, None)
 | 
			
		||||
    else:
 | 
			
		||||
        return (result.view(np.bool_), None)
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
def maybe_dispatch_ufunc_to_dunder_op(
 | 
			
		||||
    self, ufunc: np.ufunc, method: str, *inputs, **kwargs
 | 
			
		||||
): ...
 | 
			
		||||
@@ -0,0 +1,121 @@
 | 
			
		||||
DISPATCHED_UFUNCS = {
 | 
			
		||||
    "add",
 | 
			
		||||
    "sub",
 | 
			
		||||
    "mul",
 | 
			
		||||
    "pow",
 | 
			
		||||
    "mod",
 | 
			
		||||
    "floordiv",
 | 
			
		||||
    "truediv",
 | 
			
		||||
    "divmod",
 | 
			
		||||
    "eq",
 | 
			
		||||
    "ne",
 | 
			
		||||
    "lt",
 | 
			
		||||
    "gt",
 | 
			
		||||
    "le",
 | 
			
		||||
    "ge",
 | 
			
		||||
    "remainder",
 | 
			
		||||
    "matmul",
 | 
			
		||||
    "or",
 | 
			
		||||
    "xor",
 | 
			
		||||
    "and",
 | 
			
		||||
    "neg",
 | 
			
		||||
    "pos",
 | 
			
		||||
    "abs",
 | 
			
		||||
}
 | 
			
		||||
UNARY_UFUNCS = {
 | 
			
		||||
    "neg",
 | 
			
		||||
    "pos",
 | 
			
		||||
    "abs",
 | 
			
		||||
}
 | 
			
		||||
UFUNC_ALIASES = {
 | 
			
		||||
    "subtract": "sub",
 | 
			
		||||
    "multiply": "mul",
 | 
			
		||||
    "floor_divide": "floordiv",
 | 
			
		||||
    "true_divide": "truediv",
 | 
			
		||||
    "power": "pow",
 | 
			
		||||
    "remainder": "mod",
 | 
			
		||||
    "divide": "truediv",
 | 
			
		||||
    "equal": "eq",
 | 
			
		||||
    "not_equal": "ne",
 | 
			
		||||
    "less": "lt",
 | 
			
		||||
    "less_equal": "le",
 | 
			
		||||
    "greater": "gt",
 | 
			
		||||
    "greater_equal": "ge",
 | 
			
		||||
    "bitwise_or": "or",
 | 
			
		||||
    "bitwise_and": "and",
 | 
			
		||||
    "bitwise_xor": "xor",
 | 
			
		||||
    "negative": "neg",
 | 
			
		||||
    "absolute": "abs",
 | 
			
		||||
    "positive": "pos",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# For op(., Array) -> Array.__r{op}__
 | 
			
		||||
REVERSED_NAMES = {
 | 
			
		||||
    "lt": "__gt__",
 | 
			
		||||
    "le": "__ge__",
 | 
			
		||||
    "gt": "__lt__",
 | 
			
		||||
    "ge": "__le__",
 | 
			
		||||
    "eq": "__eq__",
 | 
			
		||||
    "ne": "__ne__",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def maybe_dispatch_ufunc_to_dunder_op(
 | 
			
		||||
    object self, object ufunc, str method, *inputs, **kwargs
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    Dispatch a ufunc to the equivalent dunder method.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    self : ArrayLike
 | 
			
		||||
        The array whose dunder method we dispatch to
 | 
			
		||||
    ufunc : Callable
 | 
			
		||||
        A NumPy ufunc
 | 
			
		||||
    method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
 | 
			
		||||
    inputs : ArrayLike
 | 
			
		||||
        The input arrays.
 | 
			
		||||
    kwargs : Any
 | 
			
		||||
        The additional keyword arguments, e.g. ``out``.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    result : Any
 | 
			
		||||
        The result of applying the ufunc
 | 
			
		||||
    """
 | 
			
		||||
    # special has the ufuncs we dispatch to the dunder op on
 | 
			
		||||
 | 
			
		||||
    op_name = ufunc.__name__
 | 
			
		||||
    op_name = UFUNC_ALIASES.get(op_name, op_name)
 | 
			
		||||
 | 
			
		||||
    def not_implemented(*args, **kwargs):
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    if kwargs or ufunc.nin > 2:
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
 | 
			
		||||
    if method == "__call__" and op_name in DISPATCHED_UFUNCS:
 | 
			
		||||
 | 
			
		||||
        if inputs[0] is self:
 | 
			
		||||
            name = f"__{op_name}__"
 | 
			
		||||
            meth = getattr(self, name, not_implemented)
 | 
			
		||||
 | 
			
		||||
            if op_name in UNARY_UFUNCS:
 | 
			
		||||
                assert len(inputs) == 1
 | 
			
		||||
                return meth()
 | 
			
		||||
 | 
			
		||||
            return meth(inputs[1])
 | 
			
		||||
 | 
			
		||||
        elif inputs[1] is self:
 | 
			
		||||
            name = REVERSED_NAMES.get(op_name, f"__r{op_name}__")
 | 
			
		||||
 | 
			
		||||
            meth = getattr(self, name, not_implemented)
 | 
			
		||||
            result = meth(inputs[0])
 | 
			
		||||
            return result
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            # should not be reached, but covering our bases
 | 
			
		||||
            return NotImplemented
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        return NotImplemented
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										77
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/parsers.pyi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/parsers.pyi
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,77 @@
 | 
			
		||||
from typing import (
 | 
			
		||||
    Hashable,
 | 
			
		||||
    Literal,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pandas._typing import (
 | 
			
		||||
    ArrayLike,
 | 
			
		||||
    Dtype,
 | 
			
		||||
    npt,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
STR_NA_VALUES: set[str]
 | 
			
		||||
DEFAULT_BUFFER_HEURISTIC: int
 | 
			
		||||
 | 
			
		||||
def sanitize_objects(
 | 
			
		||||
    values: npt.NDArray[np.object_],
 | 
			
		||||
    na_values: set,
 | 
			
		||||
) -> int: ...
 | 
			
		||||
 | 
			
		||||
class TextReader:
 | 
			
		||||
    unnamed_cols: set[str]
 | 
			
		||||
    table_width: int  # int64_t
 | 
			
		||||
    leading_cols: int  # int64_t
 | 
			
		||||
    header: list[list[int]]  # non-negative integers
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        source,
 | 
			
		||||
        delimiter: bytes | str = ...,  # single-character only
 | 
			
		||||
        header=...,
 | 
			
		||||
        header_start: int = ...,  # int64_t
 | 
			
		||||
        header_end: int = ...,  # uint64_t
 | 
			
		||||
        index_col=...,
 | 
			
		||||
        names=...,
 | 
			
		||||
        tokenize_chunksize: int = ...,  # int64_t
 | 
			
		||||
        delim_whitespace: bool = ...,
 | 
			
		||||
        converters=...,
 | 
			
		||||
        skipinitialspace: bool = ...,
 | 
			
		||||
        escapechar: bytes | str | None = ...,  # single-character only
 | 
			
		||||
        doublequote: bool = ...,
 | 
			
		||||
        quotechar: str | bytes | None = ...,  # at most 1 character
 | 
			
		||||
        quoting: int = ...,
 | 
			
		||||
        lineterminator: bytes | str | None = ...,  # at most 1 character
 | 
			
		||||
        comment=...,
 | 
			
		||||
        decimal: bytes | str = ...,  # single-character only
 | 
			
		||||
        thousands: bytes | str | None = ...,  # single-character only
 | 
			
		||||
        dtype: Dtype | dict[Hashable, Dtype] = ...,
 | 
			
		||||
        usecols=...,
 | 
			
		||||
        error_bad_lines: bool = ...,
 | 
			
		||||
        warn_bad_lines: bool = ...,
 | 
			
		||||
        na_filter: bool = ...,
 | 
			
		||||
        na_values=...,
 | 
			
		||||
        na_fvalues=...,
 | 
			
		||||
        keep_default_na: bool = ...,
 | 
			
		||||
        true_values=...,
 | 
			
		||||
        false_values=...,
 | 
			
		||||
        allow_leading_cols: bool = ...,
 | 
			
		||||
        skiprows=...,
 | 
			
		||||
        skipfooter: int = ...,  # int64_t
 | 
			
		||||
        verbose: bool = ...,
 | 
			
		||||
        float_precision: Literal["round_trip", "legacy", "high"] | None = ...,
 | 
			
		||||
        skip_blank_lines: bool = ...,
 | 
			
		||||
        encoding_errors: bytes | str = ...,
 | 
			
		||||
    ) -> None: ...
 | 
			
		||||
    def set_noconvert(self, i: int) -> None: ...
 | 
			
		||||
    def remove_noconvert(self, i: int) -> None: ...
 | 
			
		||||
    def close(self) -> None: ...
 | 
			
		||||
    def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ...
 | 
			
		||||
    def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ...
 | 
			
		||||
 | 
			
		||||
# _maybe_upcast, na_values are only exposed for testing
 | 
			
		||||
na_values: dict
 | 
			
		||||
 | 
			
		||||
def _maybe_upcast(
 | 
			
		||||
    arr, use_dtype_backend: bool = ..., dtype_backend: str = ...
 | 
			
		||||
) -> np.ndarray: ...
 | 
			
		||||
							
								
								
									
										2156
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/parsers.pyx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2156
									
								
								teil20/lib/python3.11/site-packages/pandas/_libs/parsers.pyx
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user