7025 changed files with 1325157 additions and 0 deletions
@ -0,0 +1,83 @@ |
|||
# This file must be used with "source bin/activate" *from bash* |
|||
# you cannot run it directly |
|||
|
|||
|
|||
if [ "${BASH_SOURCE-}" = "$0" ]; then |
|||
echo "You must source this script: \$ source $0" >&2 |
|||
exit 33 |
|||
fi |
|||
|
|||
deactivate () { |
|||
unset -f pydoc >/dev/null 2>&1 || true |
|||
|
|||
# reset old environment variables |
|||
# ! [ -z ${VAR+_} ] returns true if VAR is declared at all |
|||
if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then |
|||
PATH="$_OLD_VIRTUAL_PATH" |
|||
export PATH |
|||
unset _OLD_VIRTUAL_PATH |
|||
fi |
|||
if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then |
|||
PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" |
|||
export PYTHONHOME |
|||
unset _OLD_VIRTUAL_PYTHONHOME |
|||
fi |
|||
|
|||
# The hash command must be called to get it to forget past |
|||
# commands. Without forgetting past commands the $PATH changes |
|||
# we made may not be respected |
|||
hash -r 2>/dev/null |
|||
|
|||
if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then |
|||
PS1="$_OLD_VIRTUAL_PS1" |
|||
export PS1 |
|||
unset _OLD_VIRTUAL_PS1 |
|||
fi |
|||
|
|||
unset VIRTUAL_ENV |
|||
if [ ! "${1-}" = "nondestructive" ] ; then |
|||
# Self destruct! |
|||
unset -f deactivate |
|||
fi |
|||
} |
|||
|
|||
# unset irrelevant variables |
|||
deactivate nondestructive |
|||
|
|||
VIRTUAL_ENV='/home/pi/git/pythonkurs/teil20b' |
|||
if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then |
|||
VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV") |
|||
fi |
|||
export VIRTUAL_ENV |
|||
|
|||
_OLD_VIRTUAL_PATH="$PATH" |
|||
PATH="$VIRTUAL_ENV/bin:$PATH" |
|||
export PATH |
|||
|
|||
# unset PYTHONHOME if set |
|||
if ! [ -z "${PYTHONHOME+_}" ] ; then |
|||
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" |
|||
unset PYTHONHOME |
|||
fi |
|||
|
|||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then |
|||
_OLD_VIRTUAL_PS1="${PS1-}" |
|||
if [ "x" != x ] ; then |
|||
PS1="() ${PS1-}" |
|||
else |
|||
PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}" |
|||
fi |
|||
export PS1 |
|||
fi |
|||
|
|||
# Make sure to unalias pydoc if it's already there |
|||
alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true |
|||
|
|||
pydoc () { |
|||
python -m pydoc "$@" |
|||
} |
|||
|
|||
# The hash command must be called to get it to forget past |
|||
# commands. Without forgetting past commands the $PATH changes |
|||
# we made may not be respected |
|||
hash -r 2>/dev/null |
@ -0,0 +1,55 @@ |
|||
# This file must be used with "source bin/activate.csh" *from csh*. |
|||
# You cannot run it directly. |
|||
# Created by Davide Di Blasi <davidedb@gmail.com>. |
|||
|
|||
set newline='\ |
|||
' |
|||
|
|||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' |
|||
|
|||
# Unset irrelevant variables. |
|||
deactivate nondestructive |
|||
|
|||
setenv VIRTUAL_ENV '/home/pi/git/pythonkurs/teil20b' |
|||
|
|||
set _OLD_VIRTUAL_PATH="$PATH:q" |
|||
setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q" |
|||
|
|||
|
|||
|
|||
if ('' != "") then |
|||
set env_name = '() ' |
|||
else |
|||
set env_name = '('"$VIRTUAL_ENV:t:q"') ' |
|||
endif |
|||
|
|||
if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then |
|||
if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then |
|||
set do_prompt = "1" |
|||
else |
|||
set do_prompt = "0" |
|||
endif |
|||
else |
|||
set do_prompt = "1" |
|||
endif |
|||
|
|||
if ( $do_prompt == "1" ) then |
|||
# Could be in a non-interactive environment, |
|||
# in which case, $prompt is undefined and we wouldn't |
|||
# care about the prompt anyway. |
|||
if ( $?prompt ) then |
|||
set _OLD_VIRTUAL_PROMPT="$prompt:q" |
|||
if ( "$prompt:q" =~ *"$newline:q"* ) then |
|||
: |
|||
else |
|||
set prompt = "$env_name:q$prompt:q" |
|||
endif |
|||
endif |
|||
endif |
|||
|
|||
unset env_name |
|||
unset do_prompt |
|||
|
|||
alias pydoc python -m pydoc |
|||
|
|||
rehash |
@ -0,0 +1,100 @@ |
|||
# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. |
|||
# Do not run it directly. |
|||
|
|||
function _bashify_path -d "Converts a fish path to something bash can recognize" |
|||
set fishy_path $argv |
|||
set bashy_path $fishy_path[1] |
|||
for path_part in $fishy_path[2..-1] |
|||
set bashy_path "$bashy_path:$path_part" |
|||
end |
|||
echo $bashy_path |
|||
end |
|||
|
|||
function _fishify_path -d "Converts a bash path to something fish can recognize" |
|||
echo $argv | tr ':' '\n' |
|||
end |
|||
|
|||
function deactivate -d 'Exit virtualenv mode and return to the normal environment.' |
|||
# reset old environment variables |
|||
if test -n "$_OLD_VIRTUAL_PATH" |
|||
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling |
|||
if test (echo $FISH_VERSION | head -c 1) -lt 3 |
|||
set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH") |
|||
else |
|||
set -gx PATH $_OLD_VIRTUAL_PATH |
|||
end |
|||
set -e _OLD_VIRTUAL_PATH |
|||
end |
|||
|
|||
if test -n "$_OLD_VIRTUAL_PYTHONHOME" |
|||
set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME" |
|||
set -e _OLD_VIRTUAL_PYTHONHOME |
|||
end |
|||
|
|||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE" |
|||
and functions -q _old_fish_prompt |
|||
# Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. |
|||
set -l fish_function_path |
|||
|
|||
# Erase virtualenv's `fish_prompt` and restore the original. |
|||
functions -e fish_prompt |
|||
functions -c _old_fish_prompt fish_prompt |
|||
functions -e _old_fish_prompt |
|||
set -e _OLD_FISH_PROMPT_OVERRIDE |
|||
end |
|||
|
|||
set -e VIRTUAL_ENV |
|||
|
|||
if test "$argv[1]" != 'nondestructive' |
|||
# Self-destruct! |
|||
functions -e pydoc |
|||
functions -e deactivate |
|||
functions -e _bashify_path |
|||
functions -e _fishify_path |
|||
end |
|||
end |
|||
|
|||
# Unset irrelevant variables. |
|||
deactivate nondestructive |
|||
|
|||
set -gx VIRTUAL_ENV '/home/pi/git/pythonkurs/teil20b' |
|||
|
|||
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling |
|||
if test (echo $FISH_VERSION | head -c 1) -lt 3 |
|||
set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) |
|||
else |
|||
set -gx _OLD_VIRTUAL_PATH $PATH |
|||
end |
|||
set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH |
|||
|
|||
# Unset `$PYTHONHOME` if set. |
|||
if set -q PYTHONHOME |
|||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME |
|||
set -e PYTHONHOME |
|||
end |
|||
|
|||
function pydoc |
|||
python -m pydoc $argv |
|||
end |
|||
|
|||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" |
|||
# Copy the current `fish_prompt` function as `_old_fish_prompt`. |
|||
functions -c fish_prompt _old_fish_prompt |
|||
|
|||
function fish_prompt |
|||
# Run the user's prompt first; it might depend on (pipe)status. |
|||
set -l prompt (_old_fish_prompt) |
|||
|
|||
# Prompt override provided? |
|||
# If not, just prepend the environment name. |
|||
if test -n '' |
|||
printf '(%s) ' '' |
|||
else |
|||
printf '(%s) ' (basename "$VIRTUAL_ENV") |
|||
end |
|||
|
|||
string join -- \n $prompt # handle multi-line prompts |
|||
end |
|||
|
|||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" |
|||
end |
@ -0,0 +1,117 @@ |
|||
# virtualenv activation module |
|||
# Activate with `overlay use activate.nu` |
|||
# Deactivate with `deactivate`, as usual |
|||
# |
|||
# To customize the overlay name, you can call `overlay use activate.nu as foo`, |
|||
# but then simply `deactivate` won't work because it is just an alias to hide |
|||
# the "activate" overlay. You'd need to call `overlay hide foo` manually. |
|||
|
|||
export-env { |
|||
def is-string [x] { |
|||
($x | describe) == 'string' |
|||
} |
|||
|
|||
def has-env [name: string] { |
|||
$name in (env).name |
|||
} |
|||
|
|||
# Emulates a `test -z`, but btter as it handles e.g 'false' |
|||
def is-env-true [name: string] { |
|||
if (has-env $name) { |
|||
# Try to parse 'true', '0', '1', and fail if not convertible |
|||
let parsed = do -i { $env | get $name | into bool } |
|||
if ($parsed | describe) == 'bool' { |
|||
$parsed |
|||
} else { |
|||
not ($env | get $name | is-empty) |
|||
} |
|||
} else { |
|||
false |
|||
} |
|||
} |
|||
|
|||
let is_windows = ($nu.os-info.name | str downcase) == 'windows' |
|||
let virtual_env = '/home/pi/git/pythonkurs/teil20b' |
|||
let bin = 'bin' |
|||
let path_sep = (char esep) |
|||
let path_name = if $is_windows { |
|||
if (has-env 'Path') { |
|||
'Path' |
|||
} else { |
|||
'PATH' |
|||
} |
|||
} else { |
|||
'PATH' |
|||
} |
|||
|
|||
let old_path = ( |
|||
if $is_windows { |
|||
if (has-env 'Path') { |
|||
$env.Path |
|||
} else { |
|||
$env.PATH |
|||
} |
|||
} else { |
|||
$env.PATH |
|||
} | if (is-string $in) { |
|||
# if Path/PATH is a string, make it a list |
|||
$in | split row $path_sep | path expand |
|||
} else { |
|||
$in |
|||
} |
|||
) |
|||
|
|||
let venv_path = ([$virtual_env $bin] | path join) |
|||
let new_path = ($old_path | prepend $venv_path | str collect $path_sep) |
|||
|
|||
let new_env = { |
|||
$path_name : $new_path |
|||
VIRTUAL_ENV : $virtual_env |
|||
} |
|||
|
|||
let new_env = if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') { |
|||
$new_env |
|||
} else { |
|||
# Creating the new prompt for the session |
|||
let virtual_prompt = if ('' == '') { |
|||
$'(char lparen)($virtual_env | path basename)(char rparen) ' |
|||
} else { |
|||
'() ' |
|||
} |
|||
|
|||
# Back up the old prompt builder |
|||
let old_prompt_command = if (has-env 'VIRTUAL_ENV') and (has-env '_OLD_PROMPT_COMMAND') { |
|||
$env._OLD_PROMPT_COMMAND |
|||
} else { |
|||
if (has-env 'PROMPT_COMMAND') { |
|||
$env.PROMPT_COMMAND |
|||
} else { |
|||
'' |
|||
} |
|||
} |
|||
|
|||
# If there is no default prompt, then only the env is printed in the prompt |
|||
let new_prompt = if (has-env 'PROMPT_COMMAND') { |
|||
if ($old_prompt_command | describe) == 'block' { |
|||
{ $'($virtual_prompt)(do $old_prompt_command)' } |
|||
} else { |
|||
{ $'($virtual_prompt)($old_prompt_command)' } |
|||
} |
|||
} else { |
|||
{ $'($virtual_prompt)' } |
|||
} |
|||
|
|||
$new_env | merge { |
|||
_OLD_VIRTUAL_PATH : ($old_path | str collect $path_sep) |
|||
_OLD_PROMPT_COMMAND : $old_prompt_command |
|||
PROMPT_COMMAND : $new_prompt |
|||
VIRTUAL_PROMPT : $virtual_prompt |
|||
} |
|||
} |
|||
|
|||
# Environment variables that will be loaded as the virtual env |
|||
load-env $new_env |
|||
} |
|||
|
|||
export alias pydoc = python -m pydoc |
|||
export alias deactivate = overlay hide activate |
@ -0,0 +1,60 @@ |
|||
$script:THIS_PATH = $myinvocation.mycommand.path |
|||
$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent |
|||
|
|||
function global:deactivate([switch] $NonDestructive) { |
|||
if (Test-Path variable:_OLD_VIRTUAL_PATH) { |
|||
$env:PATH = $variable:_OLD_VIRTUAL_PATH |
|||
Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global |
|||
} |
|||
|
|||
if (Test-Path function:_old_virtual_prompt) { |
|||
$function:prompt = $function:_old_virtual_prompt |
|||
Remove-Item function:\_old_virtual_prompt |
|||
} |
|||
|
|||
if ($env:VIRTUAL_ENV) { |
|||
Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue |
|||
} |
|||
|
|||
if (!$NonDestructive) { |
|||
# Self destruct! |
|||
Remove-Item function:deactivate |
|||
Remove-Item function:pydoc |
|||
} |
|||
} |
|||
|
|||
function global:pydoc { |
|||
python -m pydoc $args |
|||
} |
|||
|
|||
# unset irrelevant variables |
|||
deactivate -nondestructive |
|||
|
|||
$VIRTUAL_ENV = $BASE_DIR |
|||
$env:VIRTUAL_ENV = $VIRTUAL_ENV |
|||
|
|||
New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH |
|||
|
|||
$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH |
|||
if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) { |
|||
function global:_old_virtual_prompt { |
|||
"" |
|||
} |
|||
$function:_old_virtual_prompt = $function:prompt |
|||
|
|||
if ("" -ne "") { |
|||
function global:prompt { |
|||
# Add the custom prefix to the existing prompt |
|||
$previous_prompt_value = & $function:_old_virtual_prompt |
|||
("() " + $previous_prompt_value) |
|||
} |
|||
} |
|||
else { |
|||
function global:prompt { |
|||
# Add a prefix to the current prompt, but don't discard it. |
|||
$previous_prompt_value = & $function:_old_virtual_prompt |
|||
$new_prompt_value = "($( Split-Path $env:VIRTUAL_ENV -Leaf )) " |
|||
($new_prompt_value + $previous_prompt_value) |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,31 @@ |
|||
"""Activate virtualenv for current interpreter: |
|||
|
|||
Use exec(open(this_file).read(), {'__file__': this_file}). |
|||
|
|||
This can be used when you must use an existing Python interpreter, not the virtualenv bin/python. |
|||
""" |
|||
import os |
|||
import site |
|||
import sys |
|||
|
|||
try: |
|||
abs_file = os.path.abspath(__file__) |
|||
except NameError: |
|||
raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))") |
|||
|
|||
bin_dir = os.path.dirname(abs_file) |
|||
base = bin_dir[: -len("bin") - 1] # strip away the bin part from the __file__, plus the path separator |
|||
|
|||
# prepend bin to PATH (this file is inside the bin directory) |
|||
os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep)) |
|||
os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory |
|||
|
|||
# add the virtual environments libraries to the host python import mechanism |
|||
prev_length = len(sys.path) |
|||
for lib in "../lib/python3.11/site-packages".split(os.pathsep): |
|||
path = os.path.realpath(os.path.join(bin_dir, lib)) |
|||
site.addsitedir(path.decode("utf-8") if "" else path) |
|||
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] |
|||
|
|||
sys.real_prefix = sys.prefix |
|||
sys.prefix = base |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from numpy.f2py.f2py2e import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from numpy.f2py.f2py2e import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from numpy.f2py.f2py2e import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from ninja import ninja |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(ninja()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from charset_normalizer.cli.normalizer import cli_detect |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(cli_detect()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from pip._internal.cli.main import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from pip._internal.cli.main import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from pip._internal.cli.main import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from pip._internal.cli.main import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1 @@ |
|||
/usr/bin/python3 |
@ -0,0 +1 @@ |
|||
python |
@ -0,0 +1 @@ |
|||
python |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from wheel.cli import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from wheel.cli import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from wheel.cli import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
@ -0,0 +1,8 @@ |
|||
#!/home/pi/git/pythonkurs/teil20b/bin/python |
|||
# -*- coding: utf-8 -*- |
|||
import re |
|||
import sys |
|||
from wheel.cli import main |
|||
if __name__ == '__main__': |
|||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) |
|||
sys.exit(main()) |
Binary file not shown.
Binary file not shown.
@ -0,0 +1,222 @@ |
|||
# don't import any costly modules |
|||
import sys |
|||
import os |
|||
|
|||
|
|||
is_pypy = '__pypy__' in sys.builtin_module_names |
|||
|
|||
|
|||
def warn_distutils_present(): |
|||
if 'distutils' not in sys.modules: |
|||
return |
|||
if is_pypy and sys.version_info < (3, 7): |
|||
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning |
|||
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 |
|||
return |
|||
import warnings |
|||
|
|||
warnings.warn( |
|||
"Distutils was imported before Setuptools, but importing Setuptools " |
|||
"also replaces the `distutils` module in `sys.modules`. This may lead " |
|||
"to undesirable behaviors or errors. To avoid these issues, avoid " |
|||
"using distutils directly, ensure that setuptools is installed in the " |
|||
"traditional way (e.g. not an editable install), and/or make sure " |
|||
"that setuptools is always imported before distutils." |
|||
) |
|||
|
|||
|
|||
def clear_distutils(): |
|||
if 'distutils' not in sys.modules: |
|||
return |
|||
import warnings |
|||
|
|||
warnings.warn("Setuptools is replacing distutils.") |
|||
mods = [ |
|||
name |
|||
for name in sys.modules |
|||
if name == "distutils" or name.startswith("distutils.") |
|||
] |
|||
for name in mods: |
|||
del sys.modules[name] |
|||
|
|||
|
|||
def enabled(): |
|||
""" |
|||
Allow selection of distutils by environment variable. |
|||
""" |
|||
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') |
|||
return which == 'local' |
|||
|
|||
|
|||
def ensure_local_distutils(): |
|||
import importlib |
|||
|
|||
clear_distutils() |
|||
|
|||
# With the DistutilsMetaFinder in place, |
|||
# perform an import to cause distutils to be |
|||
# loaded from setuptools._distutils. Ref #2906. |
|||
with shim(): |
|||
importlib.import_module('distutils') |
|||
|
|||
# check that submodules load as expected |
|||
core = importlib.import_module('distutils.core') |
|||
assert '_distutils' in core.__file__, core.__file__ |
|||
assert 'setuptools._distutils.log' not in sys.modules |
|||
|
|||
|
|||
def do_override(): |
|||
""" |
|||
Ensure that the local copy of distutils is preferred over stdlib. |
|||
|
|||
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 |
|||
for more motivation. |
|||
""" |
|||
if enabled(): |
|||
warn_distutils_present() |
|||
ensure_local_distutils() |
|||
|
|||
|
|||
class _TrivialRe: |
|||
def __init__(self, *patterns): |
|||
self._patterns = patterns |
|||
|
|||
def match(self, string): |
|||
return all(pat in string for pat in self._patterns) |
|||
|
|||
|
|||
class DistutilsMetaFinder: |
|||
def find_spec(self, fullname, path, target=None): |
|||
# optimization: only consider top level modules and those |
|||
# found in the CPython test suite. |
|||
if path is not None and not fullname.startswith('test.'): |
|||
return |
|||
|
|||
method_name = 'spec_for_{fullname}'.format(**locals()) |
|||
method = getattr(self, method_name, lambda: None) |
|||
return method() |
|||
|
|||
def spec_for_distutils(self): |
|||
if self.is_cpython(): |
|||
return |
|||
|
|||
import importlib |
|||
import importlib.abc |
|||
import importlib.util |
|||
|
|||
try: |
|||
mod = importlib.import_module('setuptools._distutils') |
|||
except Exception: |
|||
# There are a couple of cases where setuptools._distutils |
|||
# may not be present: |
|||
# - An older Setuptools without a local distutils is |
|||
# taking precedence. Ref #2957. |
|||
# - Path manipulation during sitecustomize removes |
|||
# setuptools from the path but only after the hook |
|||
# has been loaded. Ref #2980. |
|||
# In either case, fall back to stdlib behavior. |
|||
return |
|||
|
|||
class DistutilsLoader(importlib.abc.Loader): |
|||
def create_module(self, spec): |
|||
mod.__name__ = 'distutils' |
|||
return mod |
|||
|
|||
def exec_module(self, module): |
|||
pass |
|||
|
|||
return importlib.util.spec_from_loader( |
|||
'distutils', DistutilsLoader(), origin=mod.__file__ |
|||
) |
|||
|
|||
@staticmethod |
|||
def is_cpython(): |
|||
""" |
|||
Suppress supplying distutils for CPython (build and tests). |
|||
Ref #2965 and #3007. |
|||
""" |
|||
return os.path.isfile('pybuilddir.txt') |
|||
|
|||
def spec_for_pip(self): |
|||
""" |
|||
Ensure stdlib distutils when running under pip. |
|||
See pypa/pip#8761 for rationale. |
|||
""" |
|||
if self.pip_imported_during_build(): |
|||
return |
|||
clear_distutils() |
|||
self.spec_for_distutils = lambda: None |
|||
|
|||
@classmethod |
|||
def pip_imported_during_build(cls): |
|||
""" |
|||
Detect if pip is being imported in a build script. Ref #2355. |
|||
""" |
|||
import traceback |
|||
|
|||
return any( |
|||
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) |
|||
) |
|||
|
|||
@staticmethod |
|||
def frame_file_is_setup(frame): |
|||
""" |
|||
Return True if the indicated frame suggests a setup.py file. |
|||
""" |
|||
# some frames may not have __file__ (#2940) |
|||
return frame.f_globals.get('__file__', '').endswith('setup.py') |
|||
|
|||
def spec_for_sensitive_tests(self): |
|||
""" |
|||
Ensure stdlib distutils when running select tests under CPython. |
|||
|
|||
python/cpython#91169 |
|||
""" |
|||
clear_distutils() |
|||
self.spec_for_distutils = lambda: None |
|||
|
|||
sensitive_tests = ( |
|||
[ |
|||
'test.test_distutils', |
|||
'test.test_peg_generator', |
|||
'test.test_importlib', |
|||
] |
|||
if sys.version_info < (3, 10) |
|||
else [ |
|||
'test.test_distutils', |
|||
] |
|||
) |
|||
|
|||
|
|||
for name in DistutilsMetaFinder.sensitive_tests: |
|||
setattr( |
|||
DistutilsMetaFinder, |
|||
f'spec_for_{name}', |
|||
DistutilsMetaFinder.spec_for_sensitive_tests, |
|||
) |
|||
|
|||
|
|||
DISTUTILS_FINDER = DistutilsMetaFinder() |
|||
|
|||
|
|||
def add_shim(): |
|||
DISTUTILS_FINDER in sys.meta_path or insert_shim() |
|||
|
|||
|
|||
class shim: |
|||
def __enter__(self): |
|||
insert_shim() |
|||
|
|||
def __exit__(self, exc, value, tb): |
|||
remove_shim() |
|||
|
|||
|
|||
def insert_shim(): |
|||
sys.meta_path.insert(0, DISTUTILS_FINDER) |
|||
|
|||
|
|||
def remove_shim(): |
|||
try: |
|||
sys.meta_path.remove(DISTUTILS_FINDER) |
|||
except ValueError: |
|||
pass |
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||
__import__('_distutils_hack').do_override() |
@ -0,0 +1 @@ |
|||
import _virtualenv |
@ -0,0 +1,130 @@ |
|||
"""Patches that are applied at runtime to the virtual environment""" |
|||
# -*- coding: utf-8 -*- |
|||
|
|||
import os |
|||
import sys |
|||
|
|||
VIRTUALENV_PATCH_FILE = os.path.join(__file__) |
|||
|
|||
|
|||
def patch_dist(dist): |
|||
""" |
|||
Distutils allows user to configure some arguments via a configuration file: |
|||
https://docs.python.org/3/install/index.html#distutils-configuration-files |
|||
|
|||
Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up. |
|||
""" |
|||
# we cannot allow some install config as that would get packages installed outside of the virtual environment |
|||
old_parse_config_files = dist.Distribution.parse_config_files |
|||
|
|||
def parse_config_files(self, *args, **kwargs): |
|||
result = old_parse_config_files(self, *args, **kwargs) |
|||
install = self.get_option_dict("install") |
|||
|
|||
if "prefix" in install: # the prefix governs where to install the libraries |
|||
install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix) |
|||
for base in ("purelib", "platlib", "headers", "scripts", "data"): |
|||
key = "install_{}".format(base) |
|||
if key in install: # do not allow global configs to hijack venv paths |
|||
install.pop(key, None) |
|||
return result |
|||
|
|||
dist.Distribution.parse_config_files = parse_config_files |
|||
|
|||
|
|||
# Import hook that patches some modules to ignore configuration values that break package installation in case |
|||
# of virtual environments. |
|||
_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist" |
|||
if sys.version_info > (3, 4): |
|||
# https://docs.python.org/3/library/importlib.html#setting-up-an-importer |
|||
|
|||
class _Finder: |
|||
"""A meta path finder that allows patching the imported distutils modules""" |
|||
|
|||
fullname = None |
|||
|
|||
# lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup, |
|||
# because there are gevent-based applications that need to be first to import threading by themselves. |
|||
# See https://github.com/pypa/virtualenv/issues/1895 for details. |
|||
lock = [] |
|||
|
|||
def find_spec(self, fullname, path, target=None): # noqa: U100 |
|||
if fullname in _DISTUTILS_PATCH and self.fullname is None: |
|||
# initialize lock[0] lazily |
|||
if len(self.lock) == 0: |
|||
import threading |
|||
|
|||
lock = threading.Lock() |
|||
# there is possibility that two threads T1 and T2 are simultaneously running into find_spec, |
|||
# observing .lock as empty, and further going into hereby initialization. However due to the GIL, |
|||
# list.append() operation is atomic and this way only one of the threads will "win" to put the lock |
|||
# - that every thread will use - into .lock[0]. |
|||
# https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe |
|||
self.lock.append(lock) |
|||
|
|||
from functools import partial |
|||
from importlib.util import find_spec |
|||
|
|||
with self.lock[0]: |
|||
self.fullname = fullname |
|||
try: |
|||
spec = find_spec(fullname, path) |
|||
if spec is not None: |
|||
# https://www.python.org/dev/peps/pep-0451/#how-loading-will-work |
|||
is_new_api = hasattr(spec.loader, "exec_module") |
|||
func_name = "exec_module" if is_new_api else "load_module" |
|||
old = getattr(spec.loader, func_name) |
|||
func = self.exec_module if is_new_api else self.load_module |
|||
if old is not func: |
|||
try: |
|||
setattr(spec.loader, func_name, partial(func, old)) |
|||
except AttributeError: |
|||
pass # C-Extension loaders are r/o such as zipimporter with <python 3.7 |
|||
return spec |
|||
finally: |
|||
self.fullname = None |
|||
|
|||
@staticmethod |
|||
def exec_module(old, module): |
|||
old(module) |
|||
if module.__name__ in _DISTUTILS_PATCH: |
|||
patch_dist(module) |
|||
|
|||
@staticmethod |
|||
def load_module(old, name): |
|||
module = old(name) |
|||
if module.__name__ in _DISTUTILS_PATCH: |
|||
patch_dist(module) |
|||
return module |
|||
|
|||
sys.meta_path.insert(0, _Finder()) |
|||
else: |
|||
# https://www.python.org/dev/peps/pep-0302/ |
|||
from imp import find_module |
|||
from pkgutil import ImpImporter, ImpLoader |
|||
|
|||
class _VirtualenvImporter(object, ImpImporter): |
|||
def __init__(self, path=None): |
|||
object.__init__(self) |
|||
ImpImporter.__init__(self, path) |
|||
|
|||
def find_module(self, fullname, path=None): |
|||
if fullname in _DISTUTILS_PATCH: |
|||
try: |
|||
return _VirtualenvLoader(fullname, *find_module(fullname.split(".")[-1], path)) |
|||
except ImportError: |
|||
pass |
|||
return None |
|||
|
|||
class _VirtualenvLoader(object, ImpLoader): |
|||
def __init__(self, fullname, file, filename, etc): |
|||
object.__init__(self) |
|||
ImpLoader.__init__(self, fullname, file, filename, etc) |
|||
|
|||
def load_module(self, fullname): |
|||
module = super(_VirtualenvLoader, self).load_module(fullname) |
|||
patch_dist(module) |
|||
module.__loader__ = None # distlib fallback |
|||
return module |
|||
|
|||
sys.meta_path.append(_VirtualenvImporter()) |
@ -0,0 +1 @@ |
|||
pip |
@ -0,0 +1,21 @@ |
|||
This package contains a modified version of ca-bundle.crt: |
|||
|
|||
ca-bundle.crt -- Bundle of CA Root Certificates |
|||
|
|||
Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011# |
|||
This is a bundle of X.509 certificates of public Certificate Authorities |
|||
(CA). These were automatically extracted from Mozilla's root certificates |
|||
file (certdata.txt). This file can be found in the mozilla source tree: |
|||
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt |
|||
It contains the certificates in PEM format and therefore |
|||
can be directly used with curl / libcurl / php_curl, or with |
|||
an Apache+mod_ssl webserver for SSL client authentication. |
|||
Just configure this file as the SSLCACertificateFile.# |
|||
|
|||
***** BEGIN LICENSE BLOCK ***** |
|||
This Source Code Form is subject to the terms of the Mozilla Public License, |
|||
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain |
|||
one at http://mozilla.org/MPL/2.0/. |
|||
|
|||
***** END LICENSE BLOCK ***** |
|||
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ |
@ -0,0 +1,66 @@ |
|||
Metadata-Version: 2.1 |
|||
Name: certifi |
|||
Version: 2023.7.22 |
|||
Summary: Python package for providing Mozilla's CA Bundle. |
|||
Home-page: https://github.com/certifi/python-certifi |
|||
Author: Kenneth Reitz |
|||
Author-email: me@kennethreitz.com |
|||
License: MPL-2.0 |
|||
Project-URL: Source, https://github.com/certifi/python-certifi |
|||
Classifier: Development Status :: 5 - Production/Stable |
|||
Classifier: Intended Audience :: Developers |
|||
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) |
|||
Classifier: Natural Language :: English |
|||
Classifier: Programming Language :: Python |
|||
Classifier: Programming Language :: Python :: 3 |
|||
Classifier: Programming Language :: Python :: 3 :: Only |
|||
Classifier: Programming Language :: Python :: 3.6 |
|||
Classifier: Programming Language :: Python :: 3.7 |
|||
Classifier: Programming Language :: Python :: 3.8 |
|||
Classifier: Programming Language :: Python :: 3.9 |
|||
Classifier: Programming Language :: Python :: 3.10 |
|||
Classifier: Programming Language :: Python :: 3.11 |
|||
Requires-Python: >=3.6 |
|||
License-File: LICENSE |
|||
|
|||
Certifi: Python SSL Certificates |
|||
================================ |
|||
|
|||
Certifi provides Mozilla's carefully curated collection of Root Certificates for |
|||
validating the trustworthiness of SSL certificates while verifying the identity |
|||
of TLS hosts. It has been extracted from the `Requests`_ project. |
|||
|
|||
Installation |
|||
------------ |
|||
|
|||
``certifi`` is available on PyPI. Simply install it with ``pip``:: |
|||
|
|||
$ pip install certifi |
|||
|
|||
Usage |
|||
----- |
|||
|
|||
To reference the installed certificate authority (CA) bundle, you can use the |
|||
built-in function:: |
|||
|
|||
>>> import certifi |
|||
|
|||
>>> certifi.where() |
|||
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' |
|||
|
|||
Or from the command line:: |
|||
|
|||
$ python -m certifi |
|||
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem |
|||
|
|||
Enjoy! |
|||
|
|||
.. _`Requests`: https://requests.readthedocs.io/en/master/ |
|||
|
|||
Addition/Removal of Certificates |
|||
-------------------------------- |
|||
|
|||
Certifi does not support any addition/removal or other modification of the |
|||
CA trust store content. This project is intended to provide a reliable and |
|||
highly portable root of trust to python deployments. Look to upstream projects |
|||
for methods to use alternate trust. |
@ -0,0 +1,15 @@ |
|||
certifi-2023.7.22.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 |
|||
certifi-2023.7.22.dist-info/LICENSE,sha256=oC9sY4-fuE0G93ZMOrCF2K9-2luTwWbaVDEkeQd8b7A,1052 |
|||
certifi-2023.7.22.dist-info/METADATA,sha256=RgdzxZw4VOIL_B8Rnp13_JJcWJyeRNQo_N39WoaO6y0,2171 |
|||
certifi-2023.7.22.dist-info/RECORD,, |
|||
certifi-2023.7.22.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 |
|||
certifi-2023.7.22.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 |
|||
certifi-2023.7.22.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 |
|||
certifi/__init__.py,sha256=L_j-d0kYuA_MzA2_2hraF1ovf6KT6DTquRdV3paQwOk,94 |
|||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 |
|||
certifi/__pycache__/__init__.cpython-311.pyc,, |
|||
certifi/__pycache__/__main__.cpython-311.pyc,, |
|||
certifi/__pycache__/core.cpython-311.pyc,, |
|||
certifi/cacert.pem,sha256=eU0Dn_3yd8BH4m8sfVj4Glhl2KDrcCSg-sEWT-pNJ88,281617 |
|||
certifi/core.py,sha256=lhewz0zFb2b4ULsQurElmloYwQoecjWzPqY67P8T7iM,4219 |
|||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 |
@ -0,0 +1,5 @@ |
|||
Wheel-Version: 1.0 |
|||
Generator: bdist_wheel (0.40.0) |
|||
Root-Is-Purelib: true |
|||
Tag: py3-none-any |
|||
|
@ -0,0 +1 @@ |
|||
certifi |
@ -0,0 +1,4 @@ |
|||
from .core import contents, where |
|||
|
|||
__all__ = ["contents", "where"] |
|||
__version__ = "2023.07.22" |
@ -0,0 +1,12 @@ |
|||
import argparse |
|||
|
|||
from certifi import contents, where |
|||
|
|||
parser = argparse.ArgumentParser() |
|||
parser.add_argument("-c", "--contents", action="store_true") |
|||
args = parser.parse_args() |
|||
|
|||
if args.contents: |
|||
print(contents()) |
|||
else: |
|||
print(where()) |
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
@ -0,0 +1,108 @@ |
|||
""" |
|||
certifi.py |
|||
~~~~~~~~~~ |
|||
|
|||
This module returns the installation location of cacert.pem or its contents. |
|||
""" |
|||
import sys |
|||
|
|||
|
|||
if sys.version_info >= (3, 11): |
|||
|
|||
from importlib.resources import as_file, files |
|||
|
|||
_CACERT_CTX = None |
|||
_CACERT_PATH = None |
|||
|
|||
def where() -> str: |
|||
# This is slightly terrible, but we want to delay extracting the file |
|||
# in cases where we're inside of a zipimport situation until someone |
|||
# actually calls where(), but we don't want to re-extract the file |
|||
# on every call of where(), so we'll do it once then store it in a |
|||
# global variable. |
|||
global _CACERT_CTX |
|||
global _CACERT_PATH |
|||
if _CACERT_PATH is None: |
|||
# This is slightly janky, the importlib.resources API wants you to |
|||
# manage the cleanup of this file, so it doesn't actually return a |
|||
# path, it returns a context manager that will give you the path |
|||
# when you enter it and will do any cleanup when you leave it. In |
|||
# the common case of not needing a temporary file, it will just |
|||
# return the file system location and the __exit__() is a no-op. |
|||
# |
|||
# We also have to hold onto the actual context manager, because |
|||
# it will do the cleanup whenever it gets garbage collected, so |
|||
# we will also store that at the global level as well. |
|||
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) |
|||
_CACERT_PATH = str(_CACERT_CTX.__enter__()) |
|||
|
|||
return _CACERT_PATH |
|||
|
|||
def contents() -> str: |
|||
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") |
|||
|
|||
elif sys.version_info >= (3, 7): |
|||
|
|||
from importlib.resources import path as get_path, read_text |
|||
|
|||
_CACERT_CTX = None |
|||
_CACERT_PATH = None |
|||
|
|||
def where() -> str: |
|||
# This is slightly terrible, but we want to delay extracting the |
|||
# file in cases where we're inside of a zipimport situation until |
|||
# someone actually calls where(), but we don't want to re-extract |
|||
# the file on every call of where(), so we'll do it once then store |
|||
# it in a global variable. |
|||
global _CACERT_CTX |
|||
global _CACERT_PATH |
|||
if _CACERT_PATH is None: |
|||
# This is slightly janky, the importlib.resources API wants you |
|||
# to manage the cleanup of this file, so it doesn't actually |
|||
# return a path, it returns a context manager that will give |
|||
# you the path when you enter it and will do any cleanup when |
|||
# you leave it. In the common case of not needing a temporary |
|||
# file, it will just return the file system location and the |
|||
# __exit__() is a no-op. |
|||
# |
|||
# We also have to hold onto the actual context manager, because |
|||
# it will do the cleanup whenever it gets garbage collected, so |
|||
# we will also store that at the global level as well. |
|||
_CACERT_CTX = get_path("certifi", "cacert.pem") |
|||
_CACERT_PATH = str(_CACERT_CTX.__enter__()) |
|||
|
|||
return _CACERT_PATH |
|||
|
|||
def contents() -> str: |
|||
return read_text("certifi", "cacert.pem", encoding="ascii") |
|||
|
|||
else: |
|||
import os |
|||
import types |
|||
from typing import Union |
|||
|
|||
Package = Union[types.ModuleType, str] |
|||
Resource = Union[str, "os.PathLike"] |
|||
|
|||
# This fallback will work for Python versions prior to 3.7 that lack the |
|||
# importlib.resources module but relies on the existing `where` function |
|||
# so won't address issues with environments like PyOxidizer that don't set |
|||
# __file__ on modules. |
|||
def read_text( |
|||
package: Package, |
|||
resource: Resource, |
|||
encoding: str = 'utf-8', |
|||
errors: str = 'strict' |
|||
) -> str: |
|||
with open(where(), encoding=encoding) as data: |
|||
return data.read() |
|||
|
|||
# If we don't have importlib.resources, then we will just do the old logic |
|||
# of assuming we're on the filesystem and munge the path directly. |
|||
def where() -> str: |
|||
f = os.path.dirname(__file__) |
|||
|
|||
return os.path.join(f, "cacert.pem") |
|||
|
|||
def contents() -> str: |
|||
return read_text("certifi", "cacert.pem", encoding="ascii") |
@ -0,0 +1 @@ |
|||
pip |
@ -0,0 +1,21 @@ |
|||
MIT License |
|||
|
|||
Copyright (c) 2019 TAHRI Ahmed R. |
|||
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy |
|||
of this software and associated documentation files (the "Software"), to deal |
|||
in the Software without restriction, including without limitation the rights |
|||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|||
copies of the Software, and to permit persons to whom the Software is |
|||
furnished to do so, subject to the following conditions: |
|||
|
|||
The above copyright notice and this permission notice shall be included in all |
|||
copies or substantial portions of the Software. |
|||
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|||
SOFTWARE. |
@ -0,0 +1,628 @@ |
|||
Metadata-Version: 2.1 |
|||
Name: charset-normalizer |
|||
Version: 3.2.0 |
|||
Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet. |
|||
Home-page: https://github.com/Ousret/charset_normalizer |
|||
Author: Ahmed TAHRI |
|||
Author-email: ahmed.tahri@cloudnursery.dev |
|||
License: MIT |
|||
Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues |
|||
Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest |
|||
Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect |
|||
Classifier: Development Status :: 5 - Production/Stable |
|||
Classifier: License :: OSI Approved :: MIT License |
|||
Classifier: Intended Audience :: Developers |
|||
Classifier: Topic :: Software Development :: Libraries :: Python Modules |
|||
Classifier: Operating System :: OS Independent |
|||
Classifier: Programming Language :: Python |
|||
Classifier: Programming Language :: Python :: 3 |
|||
Classifier: Programming Language :: Python :: 3.7 |
|||
Classifier: Programming Language :: Python :: 3.8 |
|||
Classifier: Programming Language :: Python :: 3.9 |
|||
Classifier: Programming Language :: Python :: 3.10 |
|||
Classifier: Programming Language :: Python :: 3.11 |
|||
Classifier: Programming Language :: Python :: 3.12 |
|||
Classifier: Programming Language :: Python :: Implementation :: PyPy |
|||
Classifier: Topic :: Text Processing :: Linguistic |
|||
Classifier: Topic :: Utilities |
|||
Classifier: Typing :: Typed |
|||
Requires-Python: >=3.7.0 |
|||
Description-Content-Type: text/markdown |
|||
License-File: LICENSE |
|||
Provides-Extra: unicode_backport |
|||
|
|||
<h1 align="center">Charset Detection, for Everyone 👋</h1> |
|||
|
|||
<p align="center"> |
|||
<sup>The Real First Universal Charset Detector</sup><br> |
|||
<a href="https://pypi.org/project/charset-normalizer"> |
|||
<img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" /> |
|||
</a> |
|||
<a href="https://pepy.tech/project/charset-normalizer/"> |
|||
<img alt="Download Count Total" src="https://pepy.tech/badge/charset-normalizer/month" /> |
|||
</a> |
|||
<a href="https://bestpractices.coreinfrastructure.org/projects/7297"> |
|||
<img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge"> |
|||
</a> |
|||
</p> |
|||
|
|||
> A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`, |
|||
> I'm trying to resolve the issue by taking a new approach. |
|||
> All IANA character set names for which the Python core library provides codecs are supported. |
|||
|
|||
<p align="center"> |
|||
>>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<< |
|||
</p> |
|||
|
|||
This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**. |
|||
|
|||
| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) | |
|||
|--------------------------------------------------|:---------------------------------------------:|:------------------------------------------------------------------------------------------------------:|:-----------------------------------------------:| |
|||
| `Fast` | ❌<br> | ✅<br> | ✅ <br> | |
|||
| `Universal**` | ❌ | ✅ | ❌ | |
|||
| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ | |
|||
| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ | |
|||
| `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ | |
|||
| `Native Python` | ✅ | ✅ | ❌ | |
|||
| `Detect spoken language` | ❌ | ✅ | N/A | |
|||
| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ | |
|||
| `Whl Size` | 193.6 kB | 40 kB | ~200 kB | |
|||
| `Supported Encoding` | 33 | 🎉 [90](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 | |
|||
|
|||
<p align="center"> |
|||
<img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/> |
|||
|
|||
*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br> |
|||
Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html) |
|||
|
|||
## ⚡ Performance |
|||
|
|||
This package offer better performance than its counterpart Chardet. Here are some numbers. |
|||
|
|||
| Package | Accuracy | Mean per file (ms) | File per sec (est) | |
|||
|-----------------------------------------------|:--------:|:------------------:|:------------------:| |
|||
| [chardet](https://github.com/chardet/chardet) | 86 % | 200 ms | 5 file/sec | |
|||
| charset-normalizer | **98 %** | **10 ms** | 100 file/sec | |
|||
|
|||
| Package | 99th percentile | 95th percentile | 50th percentile | |
|||
|-----------------------------------------------|:---------------:|:---------------:|:---------------:| |
|||
| [chardet](https://github.com/chardet/chardet) | 1200 ms | 287 ms | 23 ms | |
|||
| charset-normalizer | 100 ms | 50 ms | 5 ms | |
|||
|
|||
Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload. |
|||
|
|||
> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows. |
|||
> And yes, these results might change at any time. The dataset can be updated to include more files. |
|||
> The actual delays heavily depends on your CPU capabilities. The factors should remain the same. |
|||
> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability |
|||
> (eg. Supported Encoding) Challenge-them if you want. |
|||
|
|||
## ✨ Installation |
|||
|
|||
Using pip: |
|||
|
|||
```sh |
|||
pip install charset-normalizer -U |
|||
``` |
|||
|
|||
## 🚀 Basic Usage |
|||
|
|||
### CLI |
|||
This package comes with a CLI. |
|||
|
|||
``` |
|||
usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD] |
|||
file [file ...] |
|||
|
|||
The Real First Universal Charset Detector. Discover originating encoding used |
|||
on text file. Normalize text to unicode. |
|||
|
|||
positional arguments: |
|||
files File(s) to be analysed |
|||
|
|||
optional arguments: |
|||
-h, --help show this help message and exit |
|||
-v, --verbose Display complementary information about file if any. |
|||
Stdout will contain logs about the detection process. |
|||
-a, --with-alternative |
|||
Output complementary possibilities if any. Top-level |
|||
JSON WILL be a list. |
|||
-n, --normalize Permit to normalize input file. If not set, program |
|||
does not write anything. |
|||
-m, --minimal Only output the charset detected to STDOUT. Disabling |
|||
JSON output. |
|||
-r, --replace Replace file when trying to normalize it instead of |
|||
creating a new one. |
|||
-f, --force Replace file without asking if you are sure, use this |
|||
flag with caution. |
|||
-t THRESHOLD, --threshold THRESHOLD |
|||
Define a custom maximum amount of chaos allowed in |
|||
decoded content. 0. <= chaos <= 1. |
|||
--version Show version information and exit. |
|||
``` |
|||
|
|||
```bash |
|||
normalizer ./data/sample.1.fr.srt |
|||
``` |
|||
|
|||
🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format. |
|||
|
|||
```json |
|||
{ |
|||
"path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt", |
|||
"encoding": "cp1252", |
|||
"encoding_aliases": [ |
|||
"1252", |
|||
"windows_1252" |
|||
], |
|||
"alternative_encodings": [ |
|||
"cp1254", |
|||
"cp1256", |
|||
"cp1258", |
|||
"iso8859_14", |
|||
"iso8859_15", |
|||
"iso8859_16", |
|||
"iso8859_3", |
|||
"iso8859_9", |
|||
"latin_1", |
|||
"mbcs" |
|||
], |
|||
"language": "French", |
|||
"alphabets": [ |
|||
"Basic Latin", |
|||
"Latin-1 Supplement" |
|||
], |
|||
"has_sig_or_bom": false, |
|||
"chaos": 0.149, |
|||
"coherence": 97.152, |
|||
"unicode_path": null, |
|||
"is_preferred": true |
|||
} |
|||
``` |
|||
|
|||
### Python |
|||
*Just print out normalized text* |
|||
```python |
|||
from charset_normalizer import from_path |
|||
|
|||
results = from_path('./my_subtitle.srt') |
|||
|
|||
print(str(results.best())) |
|||
``` |
|||
|
|||
*Upgrade your code without effort* |
|||
```python |
|||
from charset_normalizer import detect |
|||
``` |
|||
|
|||
The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible. |
|||
|
|||
See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/) |
|||
|
|||
## 😇 Why |
|||
|
|||
When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a |
|||
reliable alternative using a completely different method. Also! I never back down on a good challenge! |
|||
|
|||
I **don't care** about the **originating charset** encoding, because **two different tables** can |
|||
produce **two identical rendered string.** |
|||
What I want is to get readable text, the best I can. |
|||
|
|||
In a way, **I'm brute forcing text decoding.** How cool is that ? 😎 |
|||
|
|||
Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode. |
|||
|
|||
## 🍰 How |
|||
|
|||
- Discard all charset encoding table that could not fit the binary content. |
|||
- Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding. |
|||
- Extract matches with the lowest mess detected. |
|||
- Additionally, we measure coherence / probe for a language. |
|||
|
|||
**Wait a minute**, what is noise/mess and coherence according to **YOU ?** |
|||
|
|||
*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then |
|||
**I established** some ground rules about **what is obvious** when **it seems like** a mess. |
|||
I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to |
|||
improve or rewrite it. |
|||
|
|||
*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought |
|||
that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design. |
|||
|
|||
## ⚡ Known limitations |
|||
|
|||
- Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters)) |
|||
- Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content. |
|||
|
|||
## ⚠️ About Python EOLs |
|||
|
|||
**If you are running:** |
|||
|
|||
- Python >=2.7,<3.5: Unsupported |
|||
- Python 3.5: charset-normalizer < 2.1 |
|||
- Python 3.6: charset-normalizer < 3.1 |
|||
|
|||
Upgrade your Python interpreter as soon as possible. |
|||
|
|||
## 👤 Contributing |
|||
|
|||
Contributions, issues and feature requests are very much welcome.<br /> |
|||
Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute. |
|||
|
|||
## 📝 License |
|||
|
|||
Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br /> |
|||
This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed. |
|||
|
|||
Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/) |
|||
|
|||
## 💼 For Enterprise |
|||
|
|||
Professional support for charset-normalizer is available as part of the [Tidelift |
|||
Subscription][1]. Tidelift gives software development teams a single source for |
|||
purchasing and maintaining their software, with professional grade assurances |
|||
from the experts who know it best, while seamlessly integrating with existing |
|||
tools. |
|||
|
|||
[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme |
|||
|
|||
# Changelog |
|||
All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). |
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). |
|||
|
|||
## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07) |
|||
|
|||
### Changed |
|||
- Typehint for function `from_path` no longer enforce `PathLike` as its first argument |
|||
- Minor improvement over the global detection reliability |
|||
|
|||
### Added |
|||
- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries |
|||
- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True) |
|||
- Explicit support for Python 3.12 |
|||
|
|||
### Fixed |
|||
- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289) |
|||
|
|||
## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06) |
|||
|
|||
### Added |
|||
- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262) |
|||
|
|||
### Removed |
|||
- Support for Python 3.6 (PR #260) |
|||
|
|||
### Changed |
|||
- Optional speedup provided by mypy/c 1.0.1 |
|||
|
|||
## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18) |
|||
|
|||
### Fixed |
|||
- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233) |
|||
|
|||
### Changed |
|||
- Speedup provided by mypy/c 0.990 on Python >= 3.7 |
|||
|
|||
## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20) |
|||
|
|||
### Added |
|||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results |
|||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES |
|||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio |
|||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) |
|||
|
|||
### Changed |
|||
- Build with static metadata using 'build' frontend |
|||
- Make the language detection stricter |
|||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 |
|||
|
|||
### Fixed |
|||
- CLI with opt --normalize fail when using full path for files |
|||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it |
|||
- Sphinx warnings when generating the documentation |
|||
|
|||
### Removed |
|||
- Coherence detector no longer return 'Simple English' instead return 'English' |
|||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' |
|||
- Breaking: Method `first()` and `best()` from CharsetMatch |
|||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) |
|||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches |
|||
- Breaking: Top-level function `normalize` |
|||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch |
|||
- Support for the backport `unicodedata2` |
|||
|
|||
## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18) |
|||
|
|||
### Added |
|||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results |
|||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES |
|||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio |
|||
|
|||
### Changed |
|||
- Build with static metadata using 'build' frontend |
|||
- Make the language detection stricter |
|||
|
|||
### Fixed |
|||
- CLI with opt --normalize fail when using full path for files |
|||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it |
|||
|
|||
### Removed |
|||
- Coherence detector no longer return 'Simple English' instead return 'English' |
|||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' |
|||
|
|||
## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21) |
|||
|
|||
### Added |
|||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) |
|||
|
|||
### Removed |
|||
- Breaking: Method `first()` and `best()` from CharsetMatch |
|||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) |
|||
|
|||
### Fixed |
|||
- Sphinx warnings when generating the documentation |
|||
|
|||
## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15) |
|||
|
|||
### Changed |
|||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 |
|||
|
|||
### Removed |
|||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches |
|||
- Breaking: Top-level function `normalize` |
|||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch |
|||
- Support for the backport `unicodedata2` |
|||
|
|||
## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19) |
|||
|
|||
### Deprecated |
|||
- Function `normalize` scheduled for removal in 3.0 |
|||
|
|||
### Changed |
|||
- Removed useless call to decode in fn is_unprintable (#206) |
|||
|
|||
### Fixed |
|||
- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204) |
|||
|
|||
## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19) |
|||
|
|||
### Added |
|||
- Output the Unicode table version when running the CLI with `--version` (PR #194) |
|||
|
|||
### Changed |
|||
- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175) |
|||
- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183) |
|||
|
|||
### Fixed |
|||
- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175) |
|||
- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181) |
|||
|
|||
### Removed |
|||
- Support for Python 3.5 (PR #192) |
|||
|
|||
### Deprecated |
|||
- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194) |
|||
|
|||
## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12) |
|||
|
|||
### Fixed |
|||
- ASCII miss-detection on rare cases (PR #170) |
|||
|
|||
## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30) |
|||
|
|||
### Added |
|||
- Explicit support for Python 3.11 (PR #164) |
|||
|
|||
### Changed |
|||
- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165) |
|||
|
|||
## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04) |
|||
|
|||
### Fixed |
|||
- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154) |
|||
|
|||
### Changed |
|||
- Skipping the language-detection (CD) on ASCII (PR #155) |
|||
|
|||
## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03) |
|||
|
|||
### Changed |
|||
- Moderating the logging impact (since 2.0.8) for specific environments (PR #147) |
|||
|
|||
### Fixed |
|||
- Wrong logging level applied when setting kwarg `explain` to True (PR #146) |
|||
|
|||
## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24) |
|||
### Changed |
|||
- Improvement over Vietnamese detection (PR #126) |
|||
- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124) |
|||
- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122) |
|||
- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129) |
|||
- Code style as refactored by Sourcery-AI (PR #131) |
|||
- Minor adjustment on the MD around european words (PR #133) |
|||
- Remove and replace SRTs from assets / tests (PR #139) |
|||
- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135) |
|||
- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135) |
|||
|
|||
### Fixed |
|||
- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137) |
|||
- Avoid using too insignificant chunk (PR #137) |
|||
|
|||
### Added |
|||
- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135) |
|||
- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141) |
|||
|
|||
## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11) |
|||
### Added |
|||
- Add support for Kazakh (Cyrillic) language detection (PR #109) |
|||
|
|||
### Changed |
|||
- Further, improve inferring the language from a given single-byte code page (PR #112) |
|||
- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116) |
|||
- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113) |
|||
- Various detection improvement (MD+CD) (PR #117) |
|||
|
|||
### Removed |
|||
- Remove redundant logging entry about detected language(s) (PR #115) |
|||
|
|||
### Fixed |
|||
- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102) |
|||
|
|||
## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18) |
|||
### Fixed |
|||
- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100) |
|||
- Fix CLI crash when using --minimal output in certain cases (PR #103) |
|||
|
|||
### Changed |
|||
- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101) |
|||
|
|||
## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14) |
|||
### Changed |
|||
- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81) |
|||
- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82) |
|||
- The Unicode detection is slightly improved (PR #93) |
|||
- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91) |
|||
|
|||
### Removed |
|||
- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92) |
|||
|
|||
### Fixed |
|||
- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95) |
|||
- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96) |
|||
- The MANIFEST.in was not exhaustive (PR #78) |
|||
|
|||
## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30) |
|||
### Fixed |
|||
- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70) |
|||
- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68) |
|||
- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72) |
|||
- Submatch factoring could be wrong in rare edge cases (PR #72) |
|||
- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72) |
|||
- Fix line endings from CRLF to LF for certain project files (PR #67) |
|||
|
|||
### Changed |
|||
- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76) |
|||
- Allow fallback on specified encoding if any (PR #71) |
|||
|
|||
## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16) |
|||
### Changed |
|||
- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63) |
|||
- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64) |
|||
|
|||
## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15) |
|||
### Fixed |
|||
- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59) |
|||
|
|||
### Changed |
|||
- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57) |
|||
|
|||
## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13) |
|||
### Fixed |
|||
- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55) |
|||
- Using explain=False permanently disable the verbose output in the current runtime (PR #47) |
|||
- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47) |
|||
- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52) |
|||
|
|||
### Changed |
|||
- Public function normalize default args values were not aligned with from_bytes (PR #53) |
|||
|
|||
### Added |
|||
- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47) |
|||
|
|||
## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02) |
|||
### Changed |
|||
- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet. |
|||
- Accent has been made on UTF-8 detection, should perform rather instantaneous. |
|||
- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible. |
|||
- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time) |
|||
- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+ |
|||
- utf_7 detection has been reinstated. |
|||
|
|||
### Removed |
|||
- This package no longer require anything when used with Python 3.5 (Dropped cached_property) |
|||
- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian. |
|||
- The exception hook on UnicodeDecodeError has been removed. |
|||
|
|||
### Deprecated |
|||
- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0 |
|||
|
|||
### Fixed |
|||
- The CLI output used the relative path of the file(s). Should be absolute. |
|||
|
|||
## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28) |
|||
### Fixed |
|||
- Logger configuration/usage no longer conflict with others (PR #44) |
|||
|
|||
## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21) |
|||
### Removed |
|||
- Using standard logging instead of using the package loguru. |
|||
- Dropping nose test framework in favor of the maintained pytest. |
|||
- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text. |
|||
- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version. |
|||
- Stop support for UTF-7 that does not contain a SIG. |
|||
- Dropping PrettyTable, replaced with pure JSON output in CLI. |
|||
|
|||
### Fixed |
|||
- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process. |
|||
- Not searching properly for the BOM when trying utf32/16 parent codec. |
|||
|
|||
### Changed |
|||
- Improving the package final size by compressing frequencies.json. |
|||
- Huge improvement over the larges payload. |
|||
|
|||
### Added |
|||
- CLI now produces JSON consumable output. |
|||
- Return ASCII if given sequences fit. Given reasonable confidence. |
|||
|
|||
## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13) |
|||
|
|||
### Fixed |
|||
- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40) |
|||
|
|||
## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12) |
|||
|
|||
### Fixed |
|||
- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39) |
|||
|
|||
## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12) |
|||
|
|||
### Fixed |
|||
- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38) |
|||
|
|||
## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09) |
|||
|
|||
### Changed |
|||
- Amend the previous release to allow prettytable 2.0 (PR #35) |
|||
|
|||
## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08) |
|||
|
|||
### Fixed |
|||
- Fix error while using the package with a python pre-release interpreter (PR #33) |
|||
|
|||
### Changed |
|||
- Dependencies refactoring, constraints revised. |
|||
|
|||
### Added |
|||
- Add python 3.9 and 3.10 to the supported interpreters |
|||
|
|||
MIT License |
|||
|
|||
Copyright (c) 2019 TAHRI Ahmed R. |
|||
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy |
|||
of this software and associated documentation files (the "Software"), to deal |
|||
in the Software without restriction, including without limitation the rights |
|||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|||
copies of the Software, and to permit persons to whom the Software is |
|||
furnished to do so, subject to the following conditions: |
|||
|
|||
The above copyright notice and this permission notice shall be included in all |
|||
copies or substantial portions of the Software. |
|||
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|||
SOFTWARE. |
@ -0,0 +1,34 @@ |
|||
../../../bin/normalizer,sha256=hs6-VP19rsd1cFrPju0KEInW3jFGd-KflbqVlRQEgoY,270 |
|||
charset_normalizer-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 |
|||
charset_normalizer-3.2.0.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070 |
|||
charset_normalizer-3.2.0.dist-info/METADATA,sha256=K2QHhX9fQ7jFxO7y4IQk7TqYZSH7iTyxgTJQxA65EH0,31284 |
|||
charset_normalizer-3.2.0.dist-info/RECORD,, |
|||
charset_normalizer-3.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 |
|||
charset_normalizer-3.2.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 |
|||
charset_normalizer-3.2.0.dist-info/entry_points.txt,sha256=uYo8aIGLWv8YgWfSna5HnfY_En4pkF1w4bgawNAXzP0,76 |
|||
charset_normalizer-3.2.0.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19 |
|||
charset_normalizer/__init__.py,sha256=UzI3xC8PhmcLRMzSgPb6minTmRq0kWznnCBJ8ZCc2XI,1577 |
|||
charset_normalizer/__pycache__/__init__.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/api.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/cd.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/constant.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/legacy.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/md.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/models.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/utils.cpython-311.pyc,, |
|||
charset_normalizer/__pycache__/version.cpython-311.pyc,, |
|||
charset_normalizer/api.py,sha256=WOlWjy6wT8SeMYFpaGbXZFN1TMXa-s8vZYfkL4G29iQ,21097 |
|||
charset_normalizer/assets/__init__.py,sha256=wpRfujN7GJuEE5wHHo3wEDVoJ5ovzRIxsImyimCBfGU,20069 |
|||
charset_normalizer/assets/__pycache__/__init__.cpython-311.pyc,, |
|||
charset_normalizer/cd.py,sha256=mZuiTSKq4XpweSDD2H4T4R3Axtaa-QS0tpEWdpMuAzQ,12554 |
|||
charset_normalizer/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 |
|||
charset_normalizer/cli/__pycache__/__init__.cpython-311.pyc,, |
|||
charset_normalizer/cli/__pycache__/normalizer.cpython-311.pyc,, |
|||
charset_normalizer/cli/normalizer.py,sha256=2F-xURZJzo063Ye-2RLJ2wcmURpbKeAzKwpiws65dAs,9744 |
|||
charset_normalizer/constant.py,sha256=PmCeoKXqq3ZbCtCUpKHwwFBIv9DXMT_an1yd24q28mA,19101 |
|||
charset_normalizer/legacy.py,sha256=T-QuVMsMeDiQEk8WSszMrzVJg_14AMeSkmHdRYhdl1k,2071 |
|||
charset_normalizer/md.py,sha256=gEWM354DqBsiSoNkKzFrIW4KRFQjQLbqYnbHAdBwj74,18682 |
|||
charset_normalizer/models.py,sha256=mC11wo84l00u2o03TRNX7M5ItBAbPUKKXgJSFxA35GY,11492 |
|||
charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 |
|||
charset_normalizer/utils.py,sha256=HdwmBy9vRqcRVGHKZqYWtpiS5wA35cLjnlVQCm0Bq9s,11578 |
|||
charset_normalizer/version.py,sha256=LbH8odlzMnwR4xZF9wCsnGXQA19axDO7HZ-J9hegIX0,79 |
@ -0,0 +1,5 @@ |
|||
Wheel-Version: 1.0 |
|||
Generator: bdist_wheel (0.40.0) |
|||
Root-Is-Purelib: true |
|||
Tag: py3-none-any |
|||
|
@ -0,0 +1,2 @@ |
|||
[console_scripts] |
|||
normalizer = charset_normalizer.cli.normalizer:cli_detect |
@ -0,0 +1 @@ |
|||
charset_normalizer |
@ -0,0 +1,46 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
Charset-Normalizer |
|||
~~~~~~~~~~~~~~ |
|||
The Real First Universal Charset Detector. |
|||
A library that helps you read text from an unknown charset encoding. |
|||
Motivated by chardet, This package is trying to resolve the issue by taking a new approach. |
|||
All IANA character set names for which the Python core library provides codecs are supported. |
|||
|
|||
Basic usage: |
|||
>>> from charset_normalizer import from_bytes |
|||
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) |
|||
>>> best_guess = results.best() |
|||
>>> str(best_guess) |
|||
'Bсеки човек има право на образование. Oбразованието!' |
|||
|
|||
Others methods and usages are available - see the full documentation |
|||
at <https://github.com/Ousret/charset_normalizer>. |
|||
:copyright: (c) 2021 by Ahmed TAHRI |
|||
:license: MIT, see LICENSE for more details. |
|||
""" |
|||
import logging |
|||
|
|||
from .api import from_bytes, from_fp, from_path, is_binary |
|||
from .legacy import detect |
|||
from .models import CharsetMatch, CharsetMatches |
|||
from .utils import set_logging_handler |
|||
from .version import VERSION, __version__ |
|||
|
|||
__all__ = ( |
|||
"from_fp", |
|||
"from_path", |
|||
"from_bytes", |
|||
"is_binary", |
|||
"detect", |
|||
"CharsetMatch", |
|||
"CharsetMatches", |
|||
"__version__", |
|||
"VERSION", |
|||
"set_logging_handler", |
|||
) |
|||
|
|||
# Attach a NullHandler to the top level logger by default |
|||
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library |
|||
|
|||
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler()) |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,626 @@ |
|||
import logging |
|||
from os import PathLike |
|||
from typing import BinaryIO, List, Optional, Set, Union |
|||
|
|||
from .cd import ( |
|||
coherence_ratio, |
|||
encoding_languages, |
|||
mb_encoding_languages, |
|||
merge_coherence_ratios, |
|||
) |
|||
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE |
|||
from .md import mess_ratio |
|||
from .models import CharsetMatch, CharsetMatches |
|||
from .utils import ( |
|||
any_specified_encoding, |
|||
cut_sequence_chunks, |
|||
iana_name, |
|||
identify_sig_or_bom, |
|||
is_cp_similar, |
|||
is_multi_byte_encoding, |
|||
should_strip_sig_or_bom, |
|||
) |
|||
|
|||
# Will most likely be controversial |
|||
# logging.addLevelName(TRACE, "TRACE") |
|||
logger = logging.getLogger("charset_normalizer") |
|||
explain_handler = logging.StreamHandler() |
|||
explain_handler.setFormatter( |
|||
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") |
|||
) |
|||
|
|||
|
|||
def from_bytes( |
|||
sequences: Union[bytes, bytearray], |
|||
steps: int = 5, |
|||
chunk_size: int = 512, |
|||
threshold: float = 0.2, |
|||
cp_isolation: Optional[List[str]] = None, |
|||
cp_exclusion: Optional[List[str]] = None, |
|||
preemptive_behaviour: bool = True, |
|||
explain: bool = False, |
|||
language_threshold: float = 0.1, |
|||
enable_fallback: bool = True, |
|||
) -> CharsetMatches: |
|||
""" |
|||
Given a raw bytes sequence, return the best possibles charset usable to render str objects. |
|||
If there is no results, it is a strong indicator that the source is binary/not text. |
|||
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence. |
|||
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. |
|||
|
|||
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page |
|||
but never take it for granted. Can improve the performance. |
|||
|
|||
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that |
|||
purpose. |
|||
|
|||
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. |
|||
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' |
|||
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. |
|||
Custom logging format and handler can be set manually. |
|||
""" |
|||
|
|||
if not isinstance(sequences, (bytearray, bytes)): |
|||
raise TypeError( |
|||
"Expected object of type bytes or bytearray, got: {0}".format( |
|||
type(sequences) |
|||
) |
|||
) |
|||
|
|||
if explain: |
|||
previous_logger_level: int = logger.level |
|||
logger.addHandler(explain_handler) |
|||
logger.setLevel(TRACE) |
|||
|
|||
length: int = len(sequences) |
|||
|
|||
if length == 0: |
|||
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") |
|||
if explain: |
|||
logger.removeHandler(explain_handler) |
|||
logger.setLevel(previous_logger_level or logging.WARNING) |
|||
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) |
|||
|
|||
if cp_isolation is not None: |
|||
logger.log( |
|||
TRACE, |
|||
"cp_isolation is set. use this flag for debugging purpose. " |
|||
"limited list of encoding allowed : %s.", |
|||
", ".join(cp_isolation), |
|||
) |
|||
cp_isolation = [iana_name(cp, False) for cp in cp_isolation] |
|||
else: |
|||
cp_isolation = [] |
|||
|
|||
if cp_exclusion is not None: |
|||
logger.log( |
|||
TRACE, |
|||
"cp_exclusion is set. use this flag for debugging purpose. " |
|||
"limited list of encoding excluded : %s.", |
|||
", ".join(cp_exclusion), |
|||
) |
|||
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] |
|||
else: |
|||
cp_exclusion = [] |
|||
|
|||
if length <= (chunk_size * steps): |
|||
logger.log( |
|||
TRACE, |
|||
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", |
|||
steps, |
|||
chunk_size, |
|||
length, |
|||
) |
|||
steps = 1 |
|||
chunk_size = length |
|||
|
|||
if steps > 1 and length / steps < chunk_size: |
|||
chunk_size = int(length / steps) |
|||
|
|||
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE |
|||
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE |
|||
|
|||
if is_too_small_sequence: |
|||
logger.log( |
|||
TRACE, |
|||
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format( |
|||
length |
|||
), |
|||
) |
|||
elif is_too_large_sequence: |
|||
logger.log( |
|||
TRACE, |
|||
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( |
|||
length |
|||
), |
|||
) |
|||
|
|||
prioritized_encodings: List[str] = [] |
|||
|
|||
specified_encoding: Optional[str] = ( |
|||
any_specified_encoding(sequences) if preemptive_behaviour else None |
|||
) |
|||
|
|||
if specified_encoding is not None: |
|||
prioritized_encodings.append(specified_encoding) |
|||
logger.log( |
|||
TRACE, |
|||
"Detected declarative mark in sequence. Priority +1 given for %s.", |
|||
specified_encoding, |
|||
) |
|||
|
|||
tested: Set[str] = set() |
|||
tested_but_hard_failure: List[str] = [] |
|||
tested_but_soft_failure: List[str] = [] |
|||
|
|||
fallback_ascii: Optional[CharsetMatch] = None |
|||
fallback_u8: Optional[CharsetMatch] = None |
|||
fallback_specified: Optional[CharsetMatch] = None |
|||
|
|||
results: CharsetMatches = CharsetMatches() |
|||
|
|||
sig_encoding, sig_payload = identify_sig_or_bom(sequences) |
|||
|
|||
if sig_encoding is not None: |
|||
prioritized_encodings.append(sig_encoding) |
|||
logger.log( |
|||
TRACE, |
|||
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", |
|||
len(sig_payload), |
|||
sig_encoding, |
|||
) |
|||
|
|||
prioritized_encodings.append("ascii") |
|||
|
|||
if "utf_8" not in prioritized_encodings: |
|||
prioritized_encodings.append("utf_8") |
|||
|
|||
for encoding_iana in prioritized_encodings + IANA_SUPPORTED: |
|||
if cp_isolation and encoding_iana not in cp_isolation: |
|||
continue |
|||
|
|||
if cp_exclusion and encoding_iana in cp_exclusion: |
|||
continue |
|||
|
|||
if encoding_iana in tested: |
|||
continue |
|||
|
|||
tested.add(encoding_iana) |
|||
|
|||
decoded_payload: Optional[str] = None |
|||
bom_or_sig_available: bool = sig_encoding == encoding_iana |
|||
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( |
|||
encoding_iana |
|||
) |
|||
|
|||
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: |
|||
logger.log( |
|||
TRACE, |
|||
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", |
|||
encoding_iana, |
|||
) |
|||
continue |
|||
if encoding_iana in {"utf_7"} and not bom_or_sig_available: |
|||
logger.log( |
|||
TRACE, |
|||
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.", |
|||
encoding_iana, |
|||
) |
|||
continue |
|||
|
|||
try: |
|||
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) |
|||
except (ModuleNotFoundError, ImportError): |
|||
logger.log( |
|||
TRACE, |
|||
"Encoding %s does not provide an IncrementalDecoder", |
|||
encoding_iana, |
|||
) |
|||
continue |
|||
|
|||
try: |
|||
if is_too_large_sequence and is_multi_byte_decoder is False: |
|||
str( |
|||
sequences[: int(50e4)] |
|||
if strip_sig_or_bom is False |
|||
else sequences[len(sig_payload) : int(50e4)], |
|||
encoding=encoding_iana, |
|||
) |
|||
else: |
|||
decoded_payload = str( |
|||
sequences |
|||
if strip_sig_or_bom is False |
|||
else sequences[len(sig_payload) :], |
|||
encoding=encoding_iana, |
|||
) |
|||
except (UnicodeDecodeError, LookupError) as e: |
|||
if not isinstance(e, LookupError): |
|||
logger.log( |
|||
TRACE, |
|||
"Code page %s does not fit given bytes sequence at ALL. %s", |
|||
encoding_iana, |
|||
str(e), |
|||
) |
|||
tested_but_hard_failure.append(encoding_iana) |
|||
continue |
|||
|
|||
similar_soft_failure_test: bool = False |
|||
|
|||
for encoding_soft_failed in tested_but_soft_failure: |
|||
if is_cp_similar(encoding_iana, encoding_soft_failed): |
|||
similar_soft_failure_test = True |
|||
break |
|||
|
|||
if similar_soft_failure_test: |
|||
logger.log( |
|||
TRACE, |
|||
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", |
|||
encoding_iana, |
|||
encoding_soft_failed, |
|||
) |
|||
continue |
|||
|
|||
r_ = range( |
|||
0 if not bom_or_sig_available else len(sig_payload), |
|||
length, |
|||
int(length / steps), |
|||
) |
|||
|
|||
multi_byte_bonus: bool = ( |
|||
is_multi_byte_decoder |
|||
and decoded_payload is not None |
|||
and len(decoded_payload) < length |
|||
) |
|||
|
|||
if multi_byte_bonus: |
|||
logger.log( |
|||
TRACE, |
|||
"Code page %s is a multi byte encoding table and it appear that at least one character " |
|||
"was encoded using n-bytes.", |
|||
encoding_iana, |
|||
) |
|||
|
|||
max_chunk_gave_up: int = int(len(r_) / 4) |
|||
|
|||
max_chunk_gave_up = max(max_chunk_gave_up, 2) |
|||
early_stop_count: int = 0 |
|||
lazy_str_hard_failure = False |
|||
|
|||
md_chunks: List[str] = [] |
|||
md_ratios = [] |
|||
|
|||
try: |
|||
for chunk in cut_sequence_chunks( |
|||
sequences, |
|||
encoding_iana, |
|||
r_, |
|||
chunk_size, |
|||
bom_or_sig_available, |
|||
strip_sig_or_bom, |
|||
sig_payload, |
|||
is_multi_byte_decoder, |
|||
decoded_payload, |
|||
): |
|||
md_chunks.append(chunk) |
|||
|
|||
md_ratios.append( |
|||
mess_ratio( |
|||
chunk, |
|||
threshold, |
|||
explain is True and 1 <= len(cp_isolation) <= 2, |
|||
) |
|||
) |
|||
|
|||
if md_ratios[-1] >= threshold: |
|||
early_stop_count += 1 |
|||
|
|||
if (early_stop_count >= max_chunk_gave_up) or ( |
|||
bom_or_sig_available and strip_sig_or_bom is False |
|||
): |
|||
break |
|||
except ( |
|||
UnicodeDecodeError |
|||
) as e: # Lazy str loading may have missed something there |
|||
logger.log( |
|||
TRACE, |
|||
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", |
|||
encoding_iana, |
|||
str(e), |
|||
) |
|||
early_stop_count = max_chunk_gave_up |
|||
lazy_str_hard_failure = True |
|||
|
|||
# We might want to check the sequence again with the whole content |
|||
# Only if initial MD tests passes |
|||
if ( |
|||
not lazy_str_hard_failure |
|||
and is_too_large_sequence |
|||
and not is_multi_byte_decoder |
|||
): |
|||
try: |
|||
sequences[int(50e3) :].decode(encoding_iana, errors="strict") |
|||
except UnicodeDecodeError as e: |
|||
logger.log( |
|||
TRACE, |
|||
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", |
|||
encoding_iana, |
|||
str(e), |
|||
) |
|||
tested_but_hard_failure.append(encoding_iana) |
|||
continue |
|||
|
|||
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 |
|||
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: |
|||
tested_but_soft_failure.append(encoding_iana) |
|||
logger.log( |
|||
TRACE, |
|||
"%s was excluded because of initial chaos probing. Gave up %i time(s). " |
|||
"Computed mean chaos is %f %%.", |
|||
encoding_iana, |
|||
early_stop_count, |
|||
round(mean_mess_ratio * 100, ndigits=3), |
|||
) |
|||
# Preparing those fallbacks in case we got nothing. |
|||
if ( |
|||
enable_fallback |
|||
and encoding_iana in ["ascii", "utf_8", specified_encoding] |
|||
and not lazy_str_hard_failure |
|||
): |
|||
fallback_entry = CharsetMatch( |
|||
sequences, encoding_iana, threshold, False, [], decoded_payload |
|||
) |
|||
if encoding_iana == specified_encoding: |
|||
fallback_specified = fallback_entry |
|||
elif encoding_iana == "ascii": |
|||
fallback_ascii = fallback_entry |
|||
else: |
|||
fallback_u8 = fallback_entry |
|||
continue |
|||
|
|||
logger.log( |
|||
TRACE, |
|||
"%s passed initial chaos probing. Mean measured chaos is %f %%", |
|||
encoding_iana, |
|||
round(mean_mess_ratio * 100, ndigits=3), |
|||
) |
|||
|
|||
if not is_multi_byte_decoder: |
|||
target_languages: List[str] = encoding_languages(encoding_iana) |
|||
else: |
|||
target_languages = mb_encoding_languages(encoding_iana) |
|||
|
|||
if target_languages: |
|||
logger.log( |
|||
TRACE, |
|||
"{} should target any language(s) of {}".format( |
|||
encoding_iana, str(target_languages) |
|||
), |
|||
) |
|||
|
|||
cd_ratios = [] |
|||
|
|||
# We shall skip the CD when its about ASCII |
|||
# Most of the time its not relevant to run "language-detection" on it. |
|||
if encoding_iana != "ascii": |
|||
for chunk in md_chunks: |
|||
chunk_languages = coherence_ratio( |
|||
chunk, |
|||
language_threshold, |
|||
",".join(target_languages) if target_languages else None, |
|||
) |
|||
|
|||
cd_ratios.append(chunk_languages) |
|||
|
|||
cd_ratios_merged = merge_coherence_ratios(cd_ratios) |
|||
|
|||
if cd_ratios_merged: |
|||
logger.log( |
|||
TRACE, |
|||
"We detected language {} using {}".format( |
|||
cd_ratios_merged, encoding_iana |
|||
), |
|||
) |
|||
|
|||
results.append( |
|||
CharsetMatch( |
|||
sequences, |
|||
encoding_iana, |
|||
mean_mess_ratio, |
|||
bom_or_sig_available, |
|||
cd_ratios_merged, |
|||
decoded_payload, |
|||
) |
|||
) |
|||
|
|||
if ( |
|||
encoding_iana in [specified_encoding, "ascii", "utf_8"] |
|||
and mean_mess_ratio < 0.1 |
|||
): |
|||
logger.debug( |
|||
"Encoding detection: %s is most likely the one.", encoding_iana |
|||
) |
|||
if explain: |
|||
logger.removeHandler(explain_handler) |
|||
logger.setLevel(previous_logger_level) |
|||
return CharsetMatches([results[encoding_iana]]) |
|||
|
|||
if encoding_iana == sig_encoding: |
|||
logger.debug( |
|||
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within " |
|||
"the beginning of the sequence.", |
|||
encoding_iana, |
|||
) |
|||
if explain: |
|||
logger.removeHandler(explain_handler) |
|||
logger.setLevel(previous_logger_level) |
|||
return CharsetMatches([results[encoding_iana]]) |
|||
|
|||
if len(results) == 0: |
|||
if fallback_u8 or fallback_ascii or fallback_specified: |
|||
logger.log( |
|||
TRACE, |
|||
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", |
|||
) |
|||
|
|||
if fallback_specified: |
|||
logger.debug( |
|||
"Encoding detection: %s will be used as a fallback match", |
|||
fallback_specified.encoding, |
|||
) |
|||
results.append(fallback_specified) |
|||
elif ( |
|||
(fallback_u8 and fallback_ascii is None) |
|||
or ( |
|||
fallback_u8 |
|||
and fallback_ascii |
|||
and fallback_u8.fingerprint != fallback_ascii.fingerprint |
|||
) |
|||
or (fallback_u8 is not None) |
|||
): |
|||
logger.debug("Encoding detection: utf_8 will be used as a fallback match") |
|||
results.append(fallback_u8) |
|||
elif fallback_ascii: |
|||
logger.debug("Encoding detection: ascii will be used as a fallback match") |
|||
results.append(fallback_ascii) |
|||
|
|||
if results: |
|||
logger.debug( |
|||
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", |
|||
results.best().encoding, # type: ignore |
|||
len(results) - 1, |
|||
) |
|||
else: |
|||
logger.debug("Encoding detection: Unable to determine any suitable charset.") |
|||
|
|||
if explain: |
|||
logger.removeHandler(explain_handler) |
|||
logger.setLevel(previous_logger_level) |
|||
|
|||
return results |
|||
|
|||
|
|||
def from_fp( |
|||
fp: BinaryIO, |
|||
steps: int = 5, |
|||
chunk_size: int = 512, |
|||
threshold: float = 0.20, |
|||
cp_isolation: Optional[List[str]] = None, |
|||
cp_exclusion: Optional[List[str]] = None, |
|||
preemptive_behaviour: bool = True, |
|||
explain: bool = False, |
|||
language_threshold: float = 0.1, |
|||
enable_fallback: bool = True, |
|||
) -> CharsetMatches: |
|||
""" |
|||
Same thing than the function from_bytes but using a file pointer that is already ready. |
|||
Will not close the file pointer. |
|||
""" |
|||
return from_bytes( |
|||
fp.read(), |
|||
steps, |
|||
chunk_size, |
|||
threshold, |
|||
cp_isolation, |
|||
cp_exclusion, |
|||
preemptive_behaviour, |
|||
explain, |
|||
language_threshold, |
|||
enable_fallback, |
|||
) |
|||
|
|||
|
|||
def from_path( |
|||
path: Union[str, bytes, PathLike], # type: ignore[type-arg] |
|||
steps: int = 5, |
|||
chunk_size: int = 512, |
|||
threshold: float = 0.20, |
|||
cp_isolation: Optional[List[str]] = None, |
|||
cp_exclusion: Optional[List[str]] = None, |
|||
preemptive_behaviour: bool = True, |
|||
explain: bool = False, |
|||
language_threshold: float = 0.1, |
|||
enable_fallback: bool = True, |
|||
) -> CharsetMatches: |
|||
""" |
|||
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. |
|||
Can raise IOError. |
|||
""" |
|||
with open(path, "rb") as fp: |
|||
return from_fp( |
|||
fp, |
|||
steps, |
|||
chunk_size, |
|||
threshold, |
|||
cp_isolation, |
|||
cp_exclusion, |
|||
preemptive_behaviour, |
|||
explain, |
|||
language_threshold, |
|||
enable_fallback, |
|||
) |
|||
|
|||
|
|||
def is_binary( |
|||
fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg] |
|||
steps: int = 5, |
|||
chunk_size: int = 512, |
|||
threshold: float = 0.20, |
|||
cp_isolation: Optional[List[str]] = None, |
|||
cp_exclusion: Optional[List[str]] = None, |
|||
preemptive_behaviour: bool = True, |
|||
explain: bool = False, |
|||
language_threshold: float = 0.1, |
|||
enable_fallback: bool = False, |
|||
) -> bool: |
|||
""" |
|||
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string. |
|||
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match |
|||
are disabled to be stricter around ASCII-compatible but unlikely to be a string. |
|||
""" |
|||
if isinstance(fp_or_path_or_payload, (str, PathLike)): |
|||
guesses = from_path( |
|||
fp_or_path_or_payload, |
|||
steps=steps, |
|||
chunk_size=chunk_size, |
|||
threshold=threshold, |
|||
cp_isolation=cp_isolation, |
|||
cp_exclusion=cp_exclusion, |
|||
preemptive_behaviour=preemptive_behaviour, |
|||
explain=explain, |
|||
language_threshold=language_threshold, |
|||
enable_fallback=enable_fallback, |
|||
) |
|||
elif isinstance( |
|||
fp_or_path_or_payload, |
|||
( |
|||
bytes, |
|||
bytearray, |
|||
), |
|||
): |
|||
guesses = from_bytes( |
|||
fp_or_path_or_payload, |
|||
steps=steps, |
|||
chunk_size=chunk_size, |
|||
threshold=threshold, |
|||
cp_isolation=cp_isolation, |
|||
cp_exclusion=cp_exclusion, |
|||
preemptive_behaviour=preemptive_behaviour, |
|||
explain=explain, |
|||
language_threshold=language_threshold, |
|||
enable_fallback=enable_fallback, |
|||
) |
|||
else: |
|||
guesses = from_fp( |
|||
fp_or_path_or_payload, |
|||
steps=steps, |
|||
chunk_size=chunk_size, |
|||
threshold=threshold, |
|||
cp_isolation=cp_isolation, |
|||
cp_exclusion=cp_exclusion, |
|||
preemptive_behaviour=preemptive_behaviour, |
|||
explain=explain, |
|||
language_threshold=language_threshold, |
|||
enable_fallback=enable_fallback, |
|||
) |
|||
|
|||
return not guesses |
File diff suppressed because it is too large
Binary file not shown.
@ -0,0 +1,390 @@ |
|||
import importlib |
|||
from codecs import IncrementalDecoder |
|||
from collections import Counter |
|||
from functools import lru_cache |
|||
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple |
|||
|
|||
from .assets import FREQUENCIES |
|||
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES |
|||
from .md import is_suspiciously_successive_range |
|||
from .models import CoherenceMatches |
|||
from .utils import ( |
|||
is_accentuated, |
|||
is_latin, |
|||
is_multi_byte_encoding, |
|||
is_unicode_range_secondary, |
|||
unicode_range, |
|||
) |
|||
|
|||
|
|||
def encoding_unicode_range(iana_name: str) -> List[str]: |
|||
""" |
|||
Return associated unicode ranges in a single byte code page. |
|||
""" |
|||
if is_multi_byte_encoding(iana_name): |
|||
raise IOError("Function not supported on multi-byte code page") |
|||
|
|||
decoder = importlib.import_module( |
|||
"encodings.{}".format(iana_name) |
|||
).IncrementalDecoder |
|||
|
|||
p: IncrementalDecoder = decoder(errors="ignore") |
|||
seen_ranges: Dict[str, int] = {} |
|||
character_count: int = 0 |
|||
|
|||
for i in range(0x40, 0xFF): |
|||
chunk: str = p.decode(bytes([i])) |
|||
|
|||
if chunk: |
|||
character_range: Optional[str] = unicode_range(chunk) |
|||
|
|||
if character_range is None: |
|||
continue |
|||
|
|||
if is_unicode_range_secondary(character_range) is False: |
|||
if character_range not in seen_ranges: |
|||
seen_ranges[character_range] = 0 |
|||
seen_ranges[character_range] += 1 |
|||
character_count += 1 |
|||
|
|||
return sorted( |
|||
[ |
|||
character_range |
|||
for character_range in seen_ranges |
|||
if seen_ranges[character_range] / character_count >= 0.15 |
|||
] |
|||
) |
|||
|
|||
|
|||
def unicode_range_languages(primary_range: str) -> List[str]: |
|||
""" |
|||
Return inferred languages used with a unicode range. |
|||
""" |
|||
languages: List[str] = [] |
|||
|
|||
for language, characters in FREQUENCIES.items(): |
|||
for character in characters: |
|||
if unicode_range(character) == primary_range: |
|||
languages.append(language) |
|||
break |
|||
|
|||
return languages |
|||
|
|||
|
|||
@lru_cache() |
|||
def encoding_languages(iana_name: str) -> List[str]: |
|||
""" |
|||
Single-byte encoding language association. Some code page are heavily linked to particular language(s). |
|||
This function does the correspondence. |
|||
""" |
|||
unicode_ranges: List[str] = encoding_unicode_range(iana_name) |
|||
primary_range: Optional[str] = None |
|||
|
|||
for specified_range in unicode_ranges: |
|||
if "Latin" not in specified_range: |
|||
primary_range = specified_range |
|||
break |
|||
|
|||
if primary_range is None: |
|||
return ["Latin Based"] |
|||
|
|||
return unicode_range_languages(primary_range) |
|||
|
|||
|
|||
@lru_cache() |
|||
def mb_encoding_languages(iana_name: str) -> List[str]: |
|||
""" |
|||
Multi-byte encoding language association. Some code page are heavily linked to particular language(s). |
|||
This function does the correspondence. |
|||
""" |
|||
if ( |
|||
iana_name.startswith("shift_") |
|||
or iana_name.startswith("iso2022_jp") |
|||
or iana_name.startswith("euc_j") |
|||
or iana_name == "cp932" |
|||
): |
|||
return ["Japanese"] |
|||
if iana_name.startswith("gb") or iana_name in ZH_NAMES: |
|||
return ["Chinese"] |
|||
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: |
|||
return ["Korean"] |
|||
|
|||
return [] |
|||
|
|||
|
|||
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) |
|||
def get_target_features(language: str) -> Tuple[bool, bool]: |
|||
""" |
|||
Determine main aspects from a supported language if it contains accents and if is pure Latin. |
|||
""" |
|||
target_have_accents: bool = False |
|||
target_pure_latin: bool = True |
|||
|
|||
for character in FREQUENCIES[language]: |
|||
if not target_have_accents and is_accentuated(character): |
|||
target_have_accents = True |
|||
if target_pure_latin and is_latin(character) is False: |
|||
target_pure_latin = False |
|||
|
|||
return target_have_accents, target_pure_latin |
|||
|
|||
|
|||
def alphabet_languages( |
|||
characters: List[str], ignore_non_latin: bool = False |
|||
) -> List[str]: |
|||
""" |
|||
Return associated languages associated to given characters. |
|||
""" |
|||
languages: List[Tuple[str, float]] = [] |
|||
|
|||
source_have_accents = any(is_accentuated(character) for character in characters) |
|||
|
|||
for language, language_characters in FREQUENCIES.items(): |
|||
target_have_accents, target_pure_latin = get_target_features(language) |
|||
|
|||
if ignore_non_latin and target_pure_latin is False: |
|||
continue |
|||
|
|||
if target_have_accents is False and source_have_accents: |
|||
continue |
|||
|
|||
character_count: int = len(language_characters) |
|||
|
|||
character_match_count: int = len( |
|||
[c for c in language_characters if c in characters] |
|||
) |
|||
|
|||
ratio: float = character_match_count / character_count |
|||
|
|||
if ratio >= 0.2: |
|||
languages.append((language, ratio)) |
|||
|
|||
languages = sorted(languages, key=lambda x: x[1], reverse=True) |
|||
|
|||
return [compatible_language[0] for compatible_language in languages] |
|||
|
|||
|
|||
def characters_popularity_compare( |
|||
language: str, ordered_characters: List[str] |
|||
) -> float: |
|||
""" |
|||
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. |
|||
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). |
|||
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) |
|||
""" |
|||
if language not in FREQUENCIES: |
|||
raise ValueError("{} not available".format(language)) |
|||
|
|||
character_approved_count: int = 0 |
|||
FREQUENCIES_language_set = set(FREQUENCIES[language]) |
|||
|
|||
ordered_characters_count: int = len(ordered_characters) |
|||
target_language_characters_count: int = len(FREQUENCIES[language]) |
|||
|
|||
large_alphabet: bool = target_language_characters_count > 26 |
|||
|
|||
for character, character_rank in zip( |
|||
ordered_characters, range(0, ordered_characters_count) |
|||
): |
|||
if character not in FREQUENCIES_language_set: |
|||
continue |
|||
|
|||
character_rank_in_language: int = FREQUENCIES[language].index(character) |
|||
expected_projection_ratio: float = ( |
|||
target_language_characters_count / ordered_characters_count |
|||
) |
|||
character_rank_projection: int = int(character_rank * expected_projection_ratio) |
|||
|
|||
if ( |
|||
large_alphabet is False |
|||
and abs(character_rank_projection - character_rank_in_language) > 4 |
|||
): |
|||
continue |
|||
|
|||
if ( |
|||
large_alphabet is True |
|||
and abs(character_rank_projection - character_rank_in_language) |
|||
< target_language_characters_count / 3 |
|||
): |
|||
character_approved_count += 1 |
|||
continue |
|||
|
|||
characters_before_source: List[str] = FREQUENCIES[language][ |
|||
0:character_rank_in_language |
|||
] |
|||
characters_after_source: List[str] = FREQUENCIES[language][ |
|||
character_rank_in_language: |
|||
] |
|||
characters_before: List[str] = ordered_characters[0:character_rank] |
|||
characters_after: List[str] = ordered_characters[character_rank:] |
|||
|
|||
before_match_count: int = len( |
|||
set(characters_before) & set(characters_before_source) |
|||
) |
|||
|
|||
after_match_count: int = len( |
|||
set(characters_after) & set(characters_after_source) |
|||
) |
|||
|
|||
if len(characters_before_source) == 0 and before_match_count <= 4: |
|||
character_approved_count += 1 |
|||
continue |
|||
|
|||
if len(characters_after_source) == 0 and after_match_count <= 4: |
|||
character_approved_count += 1 |
|||
continue |
|||
|
|||
if ( |
|||
before_match_count / len(characters_before_source) >= 0.4 |
|||
or after_match_count / len(characters_after_source) >= 0.4 |
|||
): |
|||
character_approved_count += 1 |
|||
continue |
|||
|
|||
return character_approved_count / len(ordered_characters) |
|||
|
|||
|
|||
def alpha_unicode_split(decoded_sequence: str) -> List[str]: |
|||
""" |
|||
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. |
|||
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; |
|||
One containing the latin letters and the other hebrew. |
|||
""" |
|||
layers: Dict[str, str] = {} |
|||
|
|||
for character in decoded_sequence: |
|||
if character.isalpha() is False: |
|||
continue |
|||
|
|||
character_range: Optional[str] = unicode_range(character) |
|||
|
|||
if character_range is None: |
|||
continue |
|||
|
|||
layer_target_range: Optional[str] = None |
|||
|
|||
for discovered_range in layers: |
|||
if ( |
|||
is_suspiciously_successive_range(discovered_range, character_range) |
|||
is False |
|||
): |
|||
layer_target_range = discovered_range |
|||
break |
|||
|
|||
if layer_target_range is None: |
|||
layer_target_range = character_range |
|||
|
|||
if layer_target_range not in layers: |
|||
layers[layer_target_range] = character.lower() |
|||
continue |
|||
|
|||
layers[layer_target_range] += character.lower() |
|||
|
|||
return list(layers.values()) |
|||
|
|||
|
|||
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: |
|||
""" |
|||
This function merge results previously given by the function coherence_ratio. |
|||
The return type is the same as coherence_ratio. |
|||
""" |
|||
per_language_ratios: Dict[str, List[float]] = {} |
|||
for result in results: |
|||
for sub_result in result: |
|||
language, ratio = sub_result |
|||
if language not in per_language_ratios: |
|||
per_language_ratios[language] = [ratio] |
|||
continue |
|||
per_language_ratios[language].append(ratio) |
|||
|
|||
merge = [ |
|||
( |
|||
language, |
|||
round( |
|||
sum(per_language_ratios[language]) / len(per_language_ratios[language]), |
|||
4, |
|||
), |
|||
) |
|||
for language in per_language_ratios |
|||
] |
|||
|
|||
return sorted(merge, key=lambda x: x[1], reverse=True) |
|||
|
|||
|
|||
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: |
|||
""" |
|||
We shall NOT return "English—" in CoherenceMatches because it is an alternative |
|||
of "English". This function only keeps the best match and remove the em-dash in it. |
|||
""" |
|||
index_results: Dict[str, List[float]] = dict() |
|||
|
|||
for result in results: |
|||
language, ratio = result |
|||
no_em_name: str = language.replace("—", "") |
|||
|
|||
if no_em_name not in index_results: |
|||
index_results[no_em_name] = [] |
|||
|
|||
index_results[no_em_name].append(ratio) |
|||
|
|||
if any(len(index_results[e]) > 1 for e in index_results): |
|||
filtered_results: CoherenceMatches = [] |
|||
|
|||
for language in index_results: |
|||
filtered_results.append((language, max(index_results[language]))) |
|||
|
|||
return filtered_results |
|||
|
|||
return results |
|||
|
|||
|
|||
@lru_cache(maxsize=2048) |
|||
def coherence_ratio( |
|||
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None |
|||
) -> CoherenceMatches: |
|||
""" |
|||
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. |
|||
A layer = Character extraction by alphabets/ranges. |
|||
""" |
|||
|
|||
results: List[Tuple[str, float]] = [] |
|||
ignore_non_latin: bool = False |
|||
|
|||
sufficient_match_count: int = 0 |
|||
|
|||
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] |
|||
if "Latin Based" in lg_inclusion_list: |
|||
ignore_non_latin = True |
|||
lg_inclusion_list.remove("Latin Based") |
|||
|
|||
for layer in alpha_unicode_split(decoded_sequence): |
|||
sequence_frequencies: TypeCounter[str] = Counter(layer) |
|||
most_common = sequence_frequencies.most_common() |
|||
|
|||
character_count: int = sum(o for c, o in most_common) |
|||
|
|||
if character_count <= TOO_SMALL_SEQUENCE: |
|||
continue |
|||
|
|||
popular_character_ordered: List[str] = [c for c, o in most_common] |
|||
|
|||
for language in lg_inclusion_list or alphabet_languages( |
|||
popular_character_ordered, ignore_non_latin |
|||
): |
|||
ratio: float = characters_popularity_compare( |
|||
language, popular_character_ordered |
|||
) |
|||
|
|||
if ratio < threshold: |
|||
continue |
|||
elif ratio >= 0.8: |
|||
sufficient_match_count += 1 |
|||
|
|||
results.append((language, round(ratio, 4))) |
|||
|
|||
if sufficient_match_count >= 3: |
|||
break |
|||
|
|||
return sorted( |
|||
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True |
|||
) |
Binary file not shown.
Binary file not shown.
@ -0,0 +1,296 @@ |
|||
import argparse |
|||
import sys |
|||
from json import dumps |
|||
from os.path import abspath, basename, dirname, join, realpath |
|||
from platform import python_version |
|||
from typing import List, Optional |
|||
from unicodedata import unidata_version |
|||
|
|||
import charset_normalizer.md as md_module |
|||
from charset_normalizer import from_fp |
|||
from charset_normalizer.models import CliDetectionResult |
|||
from charset_normalizer.version import __version__ |
|||
|
|||
|
|||
def query_yes_no(question: str, default: str = "yes") -> bool: |
|||
"""Ask a yes/no question via input() and return their answer. |
|||
|
|||
"question" is a string that is presented to the user. |
|||
"default" is the presumed answer if the user just hits <Enter>. |
|||
It must be "yes" (the default), "no" or None (meaning |
|||
an answer is required of the user). |
|||
|
|||
The "answer" return value is True for "yes" or False for "no". |
|||
|
|||
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input |
|||
""" |
|||
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} |
|||
if default is None: |
|||
prompt = " [y/n] " |
|||
elif default == "yes": |
|||
prompt = " [Y/n] " |
|||
elif default == "no": |
|||
prompt = " [y/N] " |
|||
else: |
|||
raise ValueError("invalid default answer: '%s'" % default) |
|||
|
|||
while True: |
|||
sys.stdout.write(question + prompt) |
|||
choice = input().lower() |
|||
if default is not None and choice == "": |
|||
return valid[default] |
|||
elif choice in valid: |
|||
return valid[choice] |
|||
else: |
|||
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") |
|||
|
|||
|
|||
def cli_detect(argv: Optional[List[str]] = None) -> int: |
|||
""" |
|||
CLI assistant using ARGV and ArgumentParser |
|||
:param argv: |
|||
:return: 0 if everything is fine, anything else equal trouble |
|||
""" |
|||
parser = argparse.ArgumentParser( |
|||
description="The Real First Universal Charset Detector. " |
|||
"Discover originating encoding used on text file. " |
|||
"Normalize text to unicode." |
|||
) |
|||
|
|||
parser.add_argument( |
|||
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed" |
|||
) |
|||
parser.add_argument( |
|||
"-v", |
|||
"--verbose", |
|||
action="store_true", |
|||
default=False, |
|||
dest="verbose", |
|||
help="Display complementary information about file if any. " |
|||
"Stdout will contain logs about the detection process.", |
|||
) |
|||
parser.add_argument( |
|||
"-a", |
|||
"--with-alternative", |
|||
action="store_true", |
|||
default=False, |
|||
dest="alternatives", |
|||
help="Output complementary possibilities if any. Top-level JSON WILL be a list.", |
|||
) |
|||
parser.add_argument( |
|||
"-n", |
|||
"--normalize", |
|||
action="store_true", |
|||
default=False, |
|||
dest="normalize", |
|||
help="Permit to normalize input file. If not set, program does not write anything.", |
|||
) |
|||
parser.add_argument( |
|||
"-m", |
|||
"--minimal", |
|||
action="store_true", |
|||
default=False, |
|||
dest="minimal", |
|||
help="Only output the charset detected to STDOUT. Disabling JSON output.", |
|||
) |
|||
parser.add_argument( |
|||
"-r", |
|||
"--replace", |
|||
action="store_true", |
|||
default=False, |
|||
dest="replace", |
|||
help="Replace file when trying to normalize it instead of creating a new one.", |
|||
) |
|||
parser.add_argument( |
|||
"-f", |
|||
"--force", |
|||
action="store_true", |
|||
default=False, |
|||
dest="force", |
|||
help="Replace file without asking if you are sure, use this flag with caution.", |
|||
) |
|||
parser.add_argument( |
|||
"-t", |
|||
"--threshold", |
|||
action="store", |
|||
default=0.2, |
|||
type=float, |
|||
dest="threshold", |
|||
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.", |
|||
) |
|||
parser.add_argument( |
|||
"--version", |
|||
action="version", |
|||
version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format( |
|||
__version__, |
|||
python_version(), |
|||
unidata_version, |
|||
"OFF" if md_module.__file__.lower().endswith(".py") else "ON", |
|||
), |
|||
help="Show version information and exit.", |
|||
) |
|||
|
|||
args = parser.parse_args(argv) |
|||
|
|||
if args.replace is True and args.normalize is False: |
|||
print("Use --replace in addition of --normalize only.", file=sys.stderr) |
|||
return 1 |
|||
|
|||
if args.force is True and args.replace is False: |
|||
print("Use --force in addition of --replace only.", file=sys.stderr) |
|||
return 1 |
|||
|
|||
if args.threshold < 0.0 or args.threshold > 1.0: |
|||
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) |
|||
return 1 |
|||
|
|||
x_ = [] |
|||
|
|||
for my_file in args.files: |
|||
matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose) |
|||
|
|||
best_guess = matches.best() |
|||
|
|||
if best_guess is None: |
|||
print( |
|||
'Unable to identify originating encoding for "{}". {}'.format( |
|||
my_file.name, |
|||
"Maybe try increasing maximum amount of chaos." |
|||
if args.threshold < 1.0 |
|||
else "", |
|||
), |
|||
file=sys.stderr, |
|||
) |
|||
x_.append( |
|||
CliDetectionResult( |
|||
abspath(my_file.name), |
|||
None, |
|||
[], |
|||
[], |
|||
"Unknown", |
|||
[], |
|||
False, |
|||
1.0, |
|||
0.0, |
|||
None, |
|||
True, |
|||
) |
|||
) |
|||
else: |
|||
x_.append( |
|||
CliDetectionResult( |
|||
abspath(my_file.name), |
|||
best_guess.encoding, |
|||
best_guess.encoding_aliases, |
|||
[ |
|||
cp |
|||
for cp in best_guess.could_be_from_charset |
|||
if cp != best_guess.encoding |
|||
], |
|||
best_guess.language, |
|||
best_guess.alphabets, |
|||
best_guess.bom, |
|||
best_guess.percent_chaos, |
|||
best_guess.percent_coherence, |
|||
None, |
|||
True, |
|||
) |
|||
) |
|||
|
|||
if len(matches) > 1 and args.alternatives: |
|||
for el in matches: |
|||
if el != best_guess: |
|||
x_.append( |
|||
CliDetectionResult( |
|||
abspath(my_file.name), |
|||
el.encoding, |
|||
el.encoding_aliases, |
|||
[ |
|||
cp |
|||
for cp in el.could_be_from_charset |
|||
if cp != el.encoding |
|||
], |
|||
el.language, |
|||
el.alphabets, |
|||
el.bom, |
|||
el.percent_chaos, |
|||
el.percent_coherence, |
|||
None, |
|||
False, |
|||
) |
|||
) |
|||
|
|||
if args.normalize is True: |
|||
if best_guess.encoding.startswith("utf") is True: |
|||
print( |
|||
'"{}" file does not need to be normalized, as it already came from unicode.'.format( |
|||
my_file.name |
|||
), |
|||
file=sys.stderr, |
|||
) |
|||
if my_file.closed is False: |
|||
my_file.close() |
|||
continue |
|||
|
|||
dir_path = dirname(realpath(my_file.name)) |
|||
file_name = basename(realpath(my_file.name)) |
|||
|
|||
o_: List[str] = file_name.split(".") |
|||
|
|||
if args.replace is False: |
|||
o_.insert(-1, best_guess.encoding) |
|||
if my_file.closed is False: |
|||
my_file.close() |
|||
elif ( |
|||
args.force is False |
|||
and query_yes_no( |
|||
'Are you sure to normalize "{}" by replacing it ?'.format( |
|||
my_file.name |
|||
), |
|||
"no", |
|||
) |
|||
is False |
|||
): |
|||
if my_file.closed is False: |
|||
my_file.close() |
|||
continue |
|||
|
|||
try: |
|||
x_[0].unicode_path = join(dir_path, ".".join(o_)) |
|||
|
|||
with open(x_[0].unicode_path, "w", encoding="utf-8") as fp: |
|||
fp.write(str(best_guess)) |
|||
except IOError as e: |
|||
print(str(e), file=sys.stderr) |
|||
if my_file.closed is False: |
|||
my_file.close() |
|||
return 2 |
|||
|
|||
if my_file.closed is False: |
|||
my_file.close() |
|||
|
|||
if args.minimal is False: |
|||
print( |
|||
dumps( |
|||
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, |
|||
ensure_ascii=True, |
|||
indent=4, |
|||
) |
|||
) |
|||
else: |
|||
for my_file in args.files: |
|||
print( |
|||
", ".join( |
|||
[ |
|||
el.encoding or "undefined" |
|||
for el in x_ |
|||
if el.path == abspath(my_file.name) |
|||
] |
|||
) |
|||
) |
|||
|
|||
return 0 |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
cli_detect() |
@ -0,0 +1,495 @@ |
|||
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE |
|||
from encodings.aliases import aliases |
|||
from re import IGNORECASE, compile as re_compile |
|||
from typing import Dict, List, Set, Union |
|||
|
|||
from .assets import FREQUENCIES |
|||
|
|||
# Contain for each eligible encoding a list of/item bytes SIG/BOM |
|||
ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = { |
|||
"utf_8": BOM_UTF8, |
|||
"utf_7": [ |
|||
b"\x2b\x2f\x76\x38", |
|||
b"\x2b\x2f\x76\x39", |
|||
b"\x2b\x2f\x76\x2b", |
|||
b"\x2b\x2f\x76\x2f", |
|||
b"\x2b\x2f\x76\x38\x2d", |
|||
], |
|||
"gb18030": b"\x84\x31\x95\x33", |
|||
"utf_32": [BOM_UTF32_BE, BOM_UTF32_LE], |
|||
"utf_16": [BOM_UTF16_BE, BOM_UTF16_LE], |
|||
} |
|||
|
|||
TOO_SMALL_SEQUENCE: int = 32 |
|||
TOO_BIG_SEQUENCE: int = int(10e6) |
|||
|
|||
UTF8_MAXIMAL_ALLOCATION: int = 1112064 |
|||
|
|||
UNICODE_RANGES_COMBINED: Dict[str, range] = { |
|||
"Control character": range(31 + 1), |
|||
"Basic Latin": range(32, 127 + 1), |
|||
"Latin-1 Supplement": range(128, 255 + 1), |
|||
"Latin Extended-A": range(256, 383 + 1), |
|||
"Latin Extended-B": range(384, 591 + 1), |
|||
"IPA Extensions": range(592, 687 + 1), |
|||
"Spacing Modifier Letters": range(688, 767 + 1), |
|||
"Combining Diacritical Marks": range(768, 879 + 1), |
|||
"Greek and Coptic": range(880, 1023 + 1), |
|||
"Cyrillic": range(1024, 1279 + 1), |
|||
"Cyrillic Supplement": range(1280, 1327 + 1), |
|||
"Armenian": range(1328, 1423 + 1), |
|||
"Hebrew": range(1424, 1535 + 1), |
|||
"Arabic": range(1536, 1791 + 1), |
|||
"Syriac": range(1792, 1871 + 1), |
|||
"Arabic Supplement": range(1872, 1919 + 1), |
|||
"Thaana": range(1920, 1983 + 1), |
|||
"NKo": range(1984, 2047 + 1), |
|||
"Samaritan": range(2048, 2111 + 1), |
|||
"Mandaic": range(2112, 2143 + 1), |
|||
"Syriac Supplement": range(2144, 2159 + 1), |
|||
"Arabic Extended-A": range(2208, 2303 + 1), |
|||
"Devanagari": range(2304, 2431 + 1), |
|||
"Bengali": range(2432, 2559 + 1), |
|||
"Gurmukhi": range(2560, 2687 + 1), |
|||
"Gujarati": range(2688, 2815 + 1), |
|||
"Oriya": range(2816, 2943 + 1), |
|||
"Tamil": range(2944, 3071 + 1), |
|||
"Telugu": range(3072, 3199 + 1), |
|||
"Kannada": range(3200, 3327 + 1), |
|||
"Malayalam": range(3328, 3455 + 1), |
|||
"Sinhala": range(3456, 3583 + 1), |
|||
"Thai": range(3584, 3711 + 1), |
|||
"Lao": range(3712, 3839 + 1), |
|||
"Tibetan": range(3840, 4095 + 1), |
|||
"Myanmar": range(4096, 4255 + 1), |
|||
"Georgian": range(4256, 4351 + 1), |
|||
"Hangul Jamo": range(4352, 4607 + 1), |
|||
"Ethiopic": range(4608, 4991 + 1), |
|||
"Ethiopic Supplement": range(4992, 5023 + 1), |
|||
"Cherokee": range(5024, 5119 + 1), |
|||
"Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1), |
|||
"Ogham": range(5760, 5791 + 1), |
|||
"Runic": range(5792, 5887 + 1), |
|||
"Tagalog": range(5888, 5919 + 1), |
|||
"Hanunoo": range(5920, 5951 + 1), |
|||
"Buhid": range(5952, 5983 + 1), |
|||
"Tagbanwa": range(5984, 6015 + 1), |
|||
"Khmer": range(6016, 6143 + 1), |
|||
"Mongolian": range(6144, 6319 + 1), |
|||
"Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1), |
|||
"Limbu": range(6400, 6479 + 1), |
|||
"Tai Le": range(6480, 6527 + 1), |
|||
"New Tai Lue": range(6528, 6623 + 1), |
|||
"Khmer Symbols": range(6624, 6655 + 1), |
|||
"Buginese": range(6656, 6687 + 1), |
|||
"Tai Tham": range(6688, 6831 + 1), |
|||
"Combining Diacritical Marks Extended": range(6832, 6911 + 1), |
|||
"Balinese": range(6912, 7039 + 1), |
|||
"Sundanese": range(7040, 7103 + 1), |
|||
"Batak": range(7104, 7167 + 1), |
|||
"Lepcha": range(7168, 7247 + 1), |
|||
"Ol Chiki": range(7248, 7295 + 1), |
|||
"Cyrillic Extended C": range(7296, 7311 + 1), |
|||
"Sundanese Supplement": range(7360, 7375 + 1), |
|||
"Vedic Extensions": range(7376, 7423 + 1), |
|||
"Phonetic Extensions": range(7424, 7551 + 1), |
|||
"Phonetic Extensions Supplement": range(7552, 7615 + 1), |
|||
"Combining Diacritical Marks Supplement": range(7616, 7679 + 1), |
|||
"Latin Extended Additional": range(7680, 7935 + 1), |
|||
"Greek Extended": range(7936, 8191 + 1), |
|||
"General Punctuation": range(8192, 8303 + 1), |
|||
"Superscripts and Subscripts": range(8304, 8351 + 1), |
|||
"Currency Symbols": range(8352, 8399 + 1), |
|||
"Combining Diacritical Marks for Symbols": range(8400, 8447 + 1), |
|||
"Letterlike Symbols": range(8448, 8527 + 1), |
|||
"Number Forms": range(8528, 8591 + 1), |
|||
"Arrows": range(8592, 8703 + 1), |
|||
"Mathematical Operators": range(8704, 8959 + 1), |
|||
"Miscellaneous Technical": range(8960, 9215 + 1), |
|||
"Control Pictures": range(9216, 9279 + 1), |
|||
"Optical Character Recognition": range(9280, 9311 + 1), |
|||
"Enclosed Alphanumerics": range(9312, 9471 + 1), |
|||
"Box Drawing": range(9472, 9599 + 1), |
|||
"Block Elements": range(9600, 9631 + 1), |
|||
"Geometric Shapes": range(9632, 9727 + 1), |
|||
"Miscellaneous Symbols": range(9728, 9983 + 1), |
|||
"Dingbats": range(9984, 10175 + 1), |
|||
"Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1), |
|||
"Supplemental Arrows-A": range(10224, 10239 + 1), |
|||
"Braille Patterns": range(10240, 10495 + 1), |
|||
"Supplemental Arrows-B": range(10496, 10623 + 1), |
|||
"Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1), |
|||
"Supplemental Mathematical Operators": range(10752, 11007 + 1), |
|||
"Miscellaneous Symbols and Arrows": range(11008, 11263 + 1), |
|||
"Glagolitic": range(11264, 11359 + 1), |
|||
"Latin Extended-C": range(11360, 11391 + 1), |
|||
"Coptic": range(11392, 11519 + 1), |
|||
"Georgian Supplement": range(11520, 11567 + 1), |
|||
"Tifinagh": range(11568, 11647 + 1), |
|||
"Ethiopic Extended": range(11648, 11743 + 1), |
|||
"Cyrillic Extended-A": range(11744, 11775 + 1), |
|||
"Supplemental Punctuation": range(11776, 11903 + 1), |
|||
"CJK Radicals Supplement": range(11904, 12031 + 1), |
|||
"Kangxi Radicals": range(12032, 12255 + 1), |
|||
"Ideographic Description Characters": range(12272, 12287 + 1), |
|||
"CJK Symbols and Punctuation": range(12288, 12351 + 1), |
|||
"Hiragana": range(12352, 12447 + 1), |
|||
"Katakana": range(12448, 12543 + 1), |
|||
"Bopomofo": range(12544, 12591 + 1), |
|||
"Hangul Compatibility Jamo": range(12592, 12687 + 1), |
|||
"Kanbun": range(12688, 12703 + 1), |
|||
"Bopomofo Extended": range(12704, 12735 + 1), |
|||
"CJK Strokes": range(12736, 12783 + 1), |
|||
"Katakana Phonetic Extensions": range(12784, 12799 + 1), |
|||
"Enclosed CJK Letters and Months": range(12800, 13055 + 1), |
|||
"CJK Compatibility": range(13056, 13311 + 1), |
|||
"CJK Unified Ideographs Extension A": range(13312, 19903 + 1), |
|||
"Yijing Hexagram Symbols": range(19904, 19967 + 1), |
|||
"CJK Unified Ideographs": range(19968, 40959 + 1), |
|||
"Yi Syllables": range(40960, 42127 + 1), |
|||
"Yi Radicals": range(42128, 42191 + 1), |
|||
"Lisu": range(42192, 42239 + 1), |
|||
"Vai": range(42240, 42559 + 1), |
|||
"Cyrillic Extended-B": range(42560, 42655 + 1), |
|||
"Bamum": range(42656, 42751 + 1), |
|||
"Modifier Tone Letters": range(42752, 42783 + 1), |
|||
"Latin Extended-D": range(42784, 43007 + 1), |
|||
"Syloti Nagri": range(43008, 43055 + 1), |
|||
"Common Indic Number Forms": range(43056, 43071 + 1), |
|||
"Phags-pa": range(43072, 43135 + 1), |
|||
"Saurashtra": range(43136, 43231 + 1), |
|||
"Devanagari Extended": range(43232, 43263 + 1), |
|||
"Kayah Li": range(43264, 43311 + 1), |
|||
"Rejang": range(43312, 43359 + 1), |
|||
"Hangul Jamo Extended-A": range(43360, 43391 + 1), |
|||
"Javanese": range(43392, 43487 + 1), |
|||
"Myanmar Extended-B": range(43488, 43519 + 1), |
|||
"Cham": range(43520, 43615 + 1), |
|||
"Myanmar Extended-A": range(43616, 43647 + 1), |
|||
"Tai Viet": range(43648, 43743 + 1), |
|||
"Meetei Mayek Extensions": range(43744, 43775 + 1), |
|||
"Ethiopic Extended-A": range(43776, 43823 + 1), |
|||
"Latin Extended-E": range(43824, 43887 + 1), |
|||
"Cherokee Supplement": range(43888, 43967 + 1), |
|||
"Meetei Mayek": range(43968, 44031 + 1), |
|||
"Hangul Syllables": range(44032, 55215 + 1), |
|||
"Hangul Jamo Extended-B": range(55216, 55295 + 1), |
|||
"High Surrogates": range(55296, 56191 + 1), |
|||
"High Private Use Surrogates": range(56192, 56319 + 1), |
|||
"Low Surrogates": range(56320, 57343 + 1), |
|||
"Private Use Area": range(57344, 63743 + 1), |
|||
"CJK Compatibility Ideographs": range(63744, 64255 + 1), |
|||
"Alphabetic Presentation Forms": range(64256, 64335 + 1), |
|||
"Arabic Presentation Forms-A": range(64336, 65023 + 1), |
|||
"Variation Selectors": range(65024, 65039 + 1), |
|||
"Vertical Forms": range(65040, 65055 + 1), |
|||
"Combining Half Marks": range(65056, 65071 + 1), |
|||
"CJK Compatibility Forms": range(65072, 65103 + 1), |
|||
"Small Form Variants": range(65104, 65135 + 1), |
|||
"Arabic Presentation Forms-B": range(65136, 65279 + 1), |
|||
"Halfwidth and Fullwidth Forms": range(65280, 65519 + 1), |
|||
"Specials": range(65520, 65535 + 1), |
|||
"Linear B Syllabary": range(65536, 65663 + 1), |
|||
"Linear B Ideograms": range(65664, 65791 + 1), |
|||
"Aegean Numbers": range(65792, 65855 + 1), |
|||
"Ancient Greek Numbers": range(65856, 65935 + 1), |
|||
"Ancient Symbols": range(65936, 65999 + 1), |
|||
"Phaistos Disc": range(66000, 66047 + 1), |
|||
"Lycian": range(66176, 66207 + 1), |
|||
"Carian": range(66208, 66271 + 1), |
|||
"Coptic Epact Numbers": range(66272, 66303 + 1), |
|||
"Old Italic": range(66304, 66351 + 1), |
|||
"Gothic": range(66352, 66383 + 1), |
|||
"Old Permic": range(66384, 66431 + 1), |
|||
"Ugaritic": range(66432, 66463 + 1), |
|||
"Old Persian": range(66464, 66527 + 1), |
|||
"Deseret": range(66560, 66639 + 1), |
|||
"Shavian": range(66640, 66687 + 1), |
|||
"Osmanya": range(66688, 66735 + 1), |
|||
"Osage": range(66736, 66815 + 1), |
|||
"Elbasan": range(66816, 66863 + 1), |
|||
"Caucasian Albanian": range(66864, 66927 + 1), |
|||
"Linear A": range(67072, 67455 + 1), |
|||
"Cypriot Syllabary": range(67584, 67647 + 1), |
|||
"Imperial Aramaic": range(67648, 67679 + 1), |
|||
"Palmyrene": range(67680, 67711 + 1), |
|||
"Nabataean": range(67712, 67759 + 1), |
|||
"Hatran": range(67808, 67839 + 1), |
|||
"Phoenician": range(67840, 67871 + 1), |
|||
"Lydian": range(67872, 67903 + 1), |
|||
"Meroitic Hieroglyphs": range(67968, 67999 + 1), |
|||
"Meroitic Cursive": range(68000, 68095 + 1), |
|||
"Kharoshthi": range(68096, 68191 + 1), |
|||
"Old South Arabian": range(68192, 68223 + 1), |
|||
"Old North Arabian": range(68224, 68255 + 1), |
|||
"Manichaean": range(68288, 68351 + 1), |
|||
"Avestan": range(68352, 68415 + 1), |
|||
"Inscriptional Parthian": range(68416, 68447 + 1), |
|||
"Inscriptional Pahlavi": range(68448, 68479 + 1), |
|||
"Psalter Pahlavi": range(68480, 68527 + 1), |
|||
"Old Turkic": range(68608, 68687 + 1), |
|||
"Old Hungarian": range(68736, 68863 + 1), |
|||
"Rumi Numeral Symbols": range(69216, 69247 + 1), |
|||
"Brahmi": range(69632, 69759 + 1), |
|||
"Kaithi": range(69760, 69839 + 1), |
|||
"Sora Sompeng": range(69840, 69887 + 1), |
|||
"Chakma": range(69888, 69967 + 1), |
|||
"Mahajani": range(69968, 70015 + 1), |
|||
"Sharada": range(70016, 70111 + 1), |
|||
"Sinhala Archaic Numbers": range(70112, 70143 + 1), |
|||
"Khojki": range(70144, 70223 + 1), |
|||
"Multani": range(70272, 70319 + 1), |
|||
"Khudawadi": range(70320, 70399 + 1), |
|||
"Grantha": range(70400, 70527 + 1), |
|||
"Newa": range(70656, 70783 + 1), |
|||
"Tirhuta": range(70784, 70879 + 1), |
|||
"Siddham": range(71040, 71167 + 1), |
|||
"Modi": range(71168, 71263 + 1), |
|||
"Mongolian Supplement": range(71264, 71295 + 1), |
|||
"Takri": range(71296, 71375 + 1), |
|||
"Ahom": range(71424, 71487 + 1), |
|||
"Warang Citi": range(71840, 71935 + 1), |
|||
"Zanabazar Square": range(72192, 72271 + 1), |
|||
"Soyombo": range(72272, 72367 + 1), |
|||
"Pau Cin Hau": range(72384, 72447 + 1), |
|||
"Bhaiksuki": range(72704, 72815 + 1), |
|||
"Marchen": range(72816, 72895 + 1), |
|||
"Masaram Gondi": range(72960, 73055 + 1), |
|||
"Cuneiform": range(73728, 74751 + 1), |
|||
"Cuneiform Numbers and Punctuation": range(74752, 74879 + 1), |
|||
"Early Dynastic Cuneiform": range(74880, 75087 + 1), |
|||
"Egyptian Hieroglyphs": range(77824, 78895 + 1), |
|||
"Anatolian Hieroglyphs": range(82944, 83583 + 1), |
|||
"Bamum Supplement": range(92160, 92735 + 1), |
|||
"Mro": range(92736, 92783 + 1), |
|||
"Bassa Vah": range(92880, 92927 + 1), |
|||
"Pahawh Hmong": range(92928, 93071 + 1), |
|||
"Miao": range(93952, 94111 + 1), |
|||
"Ideographic Symbols and Punctuation": range(94176, 94207 + 1), |
|||
"Tangut": range(94208, 100351 + 1), |
|||
"Tangut Components": range(100352, 101119 + 1), |
|||
"Kana Supplement": range(110592, 110847 + 1), |
|||
"Kana Extended-A": range(110848, 110895 + 1), |
|||
"Nushu": range(110960, 111359 + 1), |
|||
"Duployan": range(113664, 113823 + 1), |
|||
"Shorthand Format Controls": range(113824, 113839 + 1), |
|||
"Byzantine Musical Symbols": range(118784, 119039 + 1), |
|||
"Musical Symbols": range(119040, 119295 + 1), |
|||
"Ancient Greek Musical Notation": range(119296, 119375 + 1), |
|||
"Tai Xuan Jing Symbols": range(119552, 119647 + 1), |
|||
"Counting Rod Numerals": range(119648, 119679 + 1), |
|||
"Mathematical Alphanumeric Symbols": range(119808, 120831 + 1), |
|||
"Sutton SignWriting": range(120832, 121519 + 1), |
|||
"Glagolitic Supplement": range(122880, 122927 + 1), |
|||
"Mende Kikakui": range(124928, 125151 + 1), |
|||
"Adlam": range(125184, 125279 + 1), |
|||
"Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1), |
|||
"Mahjong Tiles": range(126976, 127023 + 1), |
|||
"Domino Tiles": range(127024, 127135 + 1), |
|||
"Playing Cards": range(127136, 127231 + 1), |
|||
"Enclosed Alphanumeric Supplement": range(127232, 127487 + 1), |
|||
"Enclosed Ideographic Supplement": range(127488, 127743 + 1), |
|||
"Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1), |
|||
"Emoticons range(Emoji)": range(128512, 128591 + 1), |
|||
"Ornamental Dingbats": range(128592, 128639 + 1), |
|||
"Transport and Map Symbols": range(128640, 128767 + 1), |
|||
"Alchemical Symbols": range(128768, 128895 + 1), |
|||
"Geometric Shapes Extended": range(128896, 129023 + 1), |
|||
"Supplemental Arrows-C": range(129024, 129279 + 1), |
|||
"Supplemental Symbols and Pictographs": range(129280, 129535 + 1), |
|||
"CJK Unified Ideographs Extension B": range(131072, 173791 + 1), |
|||
"CJK Unified Ideographs Extension C": range(173824, 177983 + 1), |
|||
"CJK Unified Ideographs Extension D": range(177984, 178207 + 1), |
|||
"CJK Unified Ideographs Extension E": range(178208, 183983 + 1), |
|||
"CJK Unified Ideographs Extension F": range(183984, 191471 + 1), |
|||
"CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1), |
|||
"Tags": range(917504, 917631 + 1), |
|||
"Variation Selectors Supplement": range(917760, 917999 + 1), |
|||
} |
|||
|
|||
|
|||
UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [ |
|||
"Supplement", |
|||
"Extended", |
|||
"Extensions", |
|||
"Modifier", |
|||
"Marks", |
|||
"Punctuation", |
|||
"Symbols", |
|||
"Forms", |
|||
"Operators", |
|||
"Miscellaneous", |
|||
"Drawing", |
|||
"Block", |
|||
"Shapes", |
|||
"Supplemental", |
|||
"Tags", |
|||
] |
|||
|
|||
RE_POSSIBLE_ENCODING_INDICATION = re_compile( |
|||
r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)", |
|||
IGNORECASE, |
|||
) |
|||
|
|||
IANA_SUPPORTED: List[str] = sorted( |
|||
filter( |
|||
lambda x: x.endswith("_codec") is False |
|||
and x not in {"rot_13", "tactis", "mbcs"}, |
|||
list(set(aliases.values())), |
|||
) |
|||
) |
|||
|
|||
IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED) |
|||
|
|||
# pre-computed code page that are similar using the function cp_similarity. |
|||
IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = { |
|||
"cp037": ["cp1026", "cp1140", "cp273", "cp500"], |
|||
"cp1026": ["cp037", "cp1140", "cp273", "cp500"], |
|||
"cp1125": ["cp866"], |
|||
"cp1140": ["cp037", "cp1026", "cp273", "cp500"], |
|||
"cp1250": ["iso8859_2"], |
|||
"cp1251": ["kz1048", "ptcp154"], |
|||
"cp1252": ["iso8859_15", "iso8859_9", "latin_1"], |
|||
"cp1253": ["iso8859_7"], |
|||
"cp1254": ["iso8859_15", "iso8859_9", "latin_1"], |
|||
"cp1257": ["iso8859_13"], |
|||
"cp273": ["cp037", "cp1026", "cp1140", "cp500"], |
|||
"cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"], |
|||
"cp500": ["cp037", "cp1026", "cp1140", "cp273"], |
|||
"cp850": ["cp437", "cp857", "cp858", "cp865"], |
|||
"cp857": ["cp850", "cp858", "cp865"], |
|||
"cp858": ["cp437", "cp850", "cp857", "cp865"], |
|||
"cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"], |
|||
"cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"], |
|||
"cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"], |
|||
"cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"], |
|||
"cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"], |
|||
"cp866": ["cp1125"], |
|||
"iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"], |
|||
"iso8859_11": ["tis_620"], |
|||
"iso8859_13": ["cp1257"], |
|||
"iso8859_14": [ |
|||
"iso8859_10", |
|||
"iso8859_15", |
|||
"iso8859_16", |
|||
"iso8859_3", |
|||
"iso8859_9", |
|||
"latin_1", |
|||
], |
|||
"iso8859_15": [ |
|||
"cp1252", |
|||
"cp1254", |
|||
"iso8859_10", |
|||
"iso8859_14", |
|||
"iso8859_16", |
|||
"iso8859_3", |
|||
"iso8859_9", |
|||
"latin_1", |
|||
], |
|||
"iso8859_16": [ |
|||
"iso8859_14", |
|||
"iso8859_15", |
|||
"iso8859_2", |
|||
"iso8859_3", |
|||
"iso8859_9", |
|||
"latin_1", |
|||
], |
|||
"iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"], |
|||
"iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"], |
|||
"iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"], |
|||
"iso8859_7": ["cp1253"], |
|||
"iso8859_9": [ |
|||
"cp1252", |
|||
"cp1254", |
|||
"cp1258", |
|||
"iso8859_10", |
|||
"iso8859_14", |
|||
"iso8859_15", |
|||
"iso8859_16", |
|||
"iso8859_3", |
|||
"iso8859_4", |
|||
"latin_1", |
|||
], |
|||
"kz1048": ["cp1251", "ptcp154"], |
|||
"latin_1": [ |
|||
"cp1252", |
|||
"cp1254", |
|||
"cp1258", |
|||
"iso8859_10", |
|||
"iso8859_14", |
|||
"iso8859_15", |
|||
"iso8859_16", |
|||
"iso8859_3", |
|||
"iso8859_4", |
|||
"iso8859_9", |
|||
], |
|||
"mac_iceland": ["mac_roman", "mac_turkish"], |
|||
"mac_roman": ["mac_iceland", "mac_turkish"], |
|||
"mac_turkish": ["mac_iceland", "mac_roman"], |
|||
"ptcp154": ["cp1251", "kz1048"], |
|||
"tis_620": ["iso8859_11"], |
|||
} |
|||
|
|||
|
|||
CHARDET_CORRESPONDENCE: Dict[str, str] = { |
|||
"iso2022_kr": "ISO-2022-KR", |
|||
"iso2022_jp": "ISO-2022-JP", |
|||
"euc_kr": "EUC-KR", |
|||
"tis_620": "TIS-620", |
|||
"utf_32": "UTF-32", |
|||
"euc_jp": "EUC-JP", |
|||
"koi8_r": "KOI8-R", |
|||
"iso8859_1": "ISO-8859-1", |
|||
"iso8859_2": "ISO-8859-2", |
|||
"iso8859_5": "ISO-8859-5", |
|||
"iso8859_6": "ISO-8859-6", |
|||
"iso8859_7": "ISO-8859-7", |
|||
"iso8859_8": "ISO-8859-8", |
|||
"utf_16": "UTF-16", |
|||
"cp855": "IBM855", |
|||
"mac_cyrillic": "MacCyrillic", |
|||
"gb2312": "GB2312", |
|||
"gb18030": "GB18030", |
|||
"cp932": "CP932", |
|||
"cp866": "IBM866", |
|||
"utf_8": "utf-8", |
|||
"utf_8_sig": "UTF-8-SIG", |
|||
"shift_jis": "SHIFT_JIS", |
|||
"big5": "Big5", |
|||
"cp1250": "windows-1250", |
|||
"cp1251": "windows-1251", |
|||
"cp1252": "Windows-1252", |
|||
"cp1253": "windows-1253", |
|||
"cp1255": "windows-1255", |
|||
"cp1256": "windows-1256", |
|||
"cp1254": "Windows-1254", |
|||
"cp949": "CP949", |
|||
} |
|||
|
|||
|
|||
COMMON_SAFE_ASCII_CHARACTERS: Set[str] = { |
|||
"<", |
|||
">", |
|||
"=", |
|||
":", |
|||
"/", |
|||
"&", |
|||
";", |
|||
"{", |
|||
"}", |
|||
"[", |
|||
"]", |
|||
",", |
|||
"|", |
|||
'"', |
|||
"-", |
|||
} |
|||
|
|||
|
|||
KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"} |
|||
ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"} |
|||
|
|||
LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES) |
|||
|
|||
# Logging LEVEL below DEBUG |
|||
TRACE: int = 5 |
@ -0,0 +1,54 @@ |
|||
from typing import Any, Dict, Optional, Union |
|||
from warnings import warn |
|||
|
|||
from .api import from_bytes |
|||
from .constant import CHARDET_CORRESPONDENCE |
|||
|
|||
|
|||
def detect( |
|||
byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any |
|||
) -> Dict[str, Optional[Union[str, float]]]: |
|||
""" |
|||
chardet legacy method |
|||
Detect the encoding of the given byte string. It should be mostly backward-compatible. |
|||
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it) |
|||
This function is deprecated and should be used to migrate your project easily, consult the documentation for |
|||
further information. Not planned for removal. |
|||
|
|||
:param byte_str: The byte sequence to examine. |
|||
:param should_rename_legacy: Should we rename legacy encodings |
|||
to their more modern equivalents? |
|||
""" |
|||
if len(kwargs): |
|||
warn( |
|||
f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()" |
|||
) |
|||
|
|||
if not isinstance(byte_str, (bytearray, bytes)): |
|||
raise TypeError( # pragma: nocover |
|||
"Expected object of type bytes or bytearray, got: " |
|||
"{0}".format(type(byte_str)) |
|||
) |
|||
|
|||
if isinstance(byte_str, bytearray): |
|||
byte_str = bytes(byte_str) |
|||
|
|||
r = from_bytes(byte_str).best() |
|||
|
|||
encoding = r.encoding if r is not None else None |
|||
language = r.language if r is not None and r.language != "Unknown" else "" |
|||
confidence = 1.0 - r.chaos if r is not None else None |
|||
|
|||
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process |
|||
# but chardet does return 'utf-8-sig' and it is a valid codec name. |
|||
if r is not None and encoding == "utf_8" and r.bom: |
|||
encoding += "_sig" |
|||
|
|||
if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE: |
|||
encoding = CHARDET_CORRESPONDENCE[encoding] |
|||
|
|||
return { |
|||
"encoding": encoding, |
|||
"language": language, |
|||
"confidence": confidence, |
|||
} |
@ -0,0 +1,582 @@ |
|||
from functools import lru_cache |
|||
from logging import getLogger |
|||
from typing import List, Optional |
|||
|
|||
from .constant import ( |
|||
COMMON_SAFE_ASCII_CHARACTERS, |
|||
TRACE, |
|||
UNICODE_SECONDARY_RANGE_KEYWORD, |
|||
) |
|||
from .utils import ( |
|||
is_accentuated, |
|||
is_ascii, |
|||
is_case_variable, |
|||
is_cjk, |
|||
is_emoticon, |
|||
is_hangul, |
|||
is_hiragana, |
|||
is_katakana, |
|||
is_latin, |
|||
is_punctuation, |
|||
is_separator, |
|||
is_symbol, |
|||
is_thai, |
|||
is_unprintable, |
|||
remove_accent, |
|||
unicode_range, |
|||
) |
|||
|
|||
|
|||
class MessDetectorPlugin: |
|||
""" |
|||
Base abstract class used for mess detection plugins. |
|||
All detectors MUST extend and implement given methods. |
|||
""" |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
""" |
|||
Determine if given character should be fed in. |
|||
""" |
|||
raise NotImplementedError # pragma: nocover |
|||
|
|||
def feed(self, character: str) -> None: |
|||
""" |
|||
The main routine to be executed upon character. |
|||
Insert the logic in witch the text would be considered chaotic. |
|||
""" |
|||
raise NotImplementedError # pragma: nocover |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
""" |
|||
Permit to reset the plugin to the initial state. |
|||
""" |
|||
raise NotImplementedError |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
""" |
|||
Compute the chaos ratio based on what your feed() has seen. |
|||
Must NOT be lower than 0.; No restriction gt 0. |
|||
""" |
|||
raise NotImplementedError # pragma: nocover |
|||
|
|||
|
|||
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._punctuation_count: int = 0 |
|||
self._symbol_count: int = 0 |
|||
self._character_count: int = 0 |
|||
|
|||
self._last_printable_char: Optional[str] = None |
|||
self._frenzy_symbol_in_word: bool = False |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return character.isprintable() |
|||
|
|||
def feed(self, character: str) -> None: |
|||
self._character_count += 1 |
|||
|
|||
if ( |
|||
character != self._last_printable_char |
|||
and character not in COMMON_SAFE_ASCII_CHARACTERS |
|||
): |
|||
if is_punctuation(character): |
|||
self._punctuation_count += 1 |
|||
elif ( |
|||
character.isdigit() is False |
|||
and is_symbol(character) |
|||
and is_emoticon(character) is False |
|||
): |
|||
self._symbol_count += 2 |
|||
|
|||
self._last_printable_char = character |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._punctuation_count = 0 |
|||
self._character_count = 0 |
|||
self._symbol_count = 0 |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0: |
|||
return 0.0 |
|||
|
|||
ratio_of_punctuation: float = ( |
|||
self._punctuation_count + self._symbol_count |
|||
) / self._character_count |
|||
|
|||
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 |
|||
|
|||
|
|||
class TooManyAccentuatedPlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._character_count: int = 0 |
|||
self._accentuated_count: int = 0 |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return character.isalpha() |
|||
|
|||
def feed(self, character: str) -> None: |
|||
self._character_count += 1 |
|||
|
|||
if is_accentuated(character): |
|||
self._accentuated_count += 1 |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._character_count = 0 |
|||
self._accentuated_count = 0 |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0 or self._character_count < 8: |
|||
return 0.0 |
|||
ratio_of_accentuation: float = self._accentuated_count / self._character_count |
|||
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 |
|||
|
|||
|
|||
class UnprintablePlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._unprintable_count: int = 0 |
|||
self._character_count: int = 0 |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return True |
|||
|
|||
def feed(self, character: str) -> None: |
|||
if is_unprintable(character): |
|||
self._unprintable_count += 1 |
|||
self._character_count += 1 |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._unprintable_count = 0 |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0: |
|||
return 0.0 |
|||
|
|||
return (self._unprintable_count * 8) / self._character_count |
|||
|
|||
|
|||
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._successive_count: int = 0 |
|||
self._character_count: int = 0 |
|||
|
|||
self._last_latin_character: Optional[str] = None |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return character.isalpha() and is_latin(character) |
|||
|
|||
def feed(self, character: str) -> None: |
|||
self._character_count += 1 |
|||
if ( |
|||
self._last_latin_character is not None |
|||
and is_accentuated(character) |
|||
and is_accentuated(self._last_latin_character) |
|||
): |
|||
if character.isupper() and self._last_latin_character.isupper(): |
|||
self._successive_count += 1 |
|||
# Worse if its the same char duplicated with different accent. |
|||
if remove_accent(character) == remove_accent(self._last_latin_character): |
|||
self._successive_count += 1 |
|||
self._last_latin_character = character |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._successive_count = 0 |
|||
self._character_count = 0 |
|||
self._last_latin_character = None |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0: |
|||
return 0.0 |
|||
|
|||
return (self._successive_count * 2) / self._character_count |
|||
|
|||
|
|||
class SuspiciousRange(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._suspicious_successive_range_count: int = 0 |
|||
self._character_count: int = 0 |
|||
self._last_printable_seen: Optional[str] = None |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return character.isprintable() |
|||
|
|||
def feed(self, character: str) -> None: |
|||
self._character_count += 1 |
|||
|
|||
if ( |
|||
character.isspace() |
|||
or is_punctuation(character) |
|||
or character in COMMON_SAFE_ASCII_CHARACTERS |
|||
): |
|||
self._last_printable_seen = None |
|||
return |
|||
|
|||
if self._last_printable_seen is None: |
|||
self._last_printable_seen = character |
|||
return |
|||
|
|||
unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen) |
|||
unicode_range_b: Optional[str] = unicode_range(character) |
|||
|
|||
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): |
|||
self._suspicious_successive_range_count += 1 |
|||
|
|||
self._last_printable_seen = character |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._character_count = 0 |
|||
self._suspicious_successive_range_count = 0 |
|||
self._last_printable_seen = None |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0: |
|||
return 0.0 |
|||
|
|||
ratio_of_suspicious_range_usage: float = ( |
|||
self._suspicious_successive_range_count * 2 |
|||
) / self._character_count |
|||
|
|||
if ratio_of_suspicious_range_usage < 0.1: |
|||
return 0.0 |
|||
|
|||
return ratio_of_suspicious_range_usage |
|||
|
|||
|
|||
class SuperWeirdWordPlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._word_count: int = 0 |
|||
self._bad_word_count: int = 0 |
|||
self._foreign_long_count: int = 0 |
|||
|
|||
self._is_current_word_bad: bool = False |
|||
self._foreign_long_watch: bool = False |
|||
|
|||
self._character_count: int = 0 |
|||
self._bad_character_count: int = 0 |
|||
|
|||
self._buffer: str = "" |
|||
self._buffer_accent_count: int = 0 |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return True |
|||
|
|||
def feed(self, character: str) -> None: |
|||
if character.isalpha(): |
|||
self._buffer += character |
|||
if is_accentuated(character): |
|||
self._buffer_accent_count += 1 |
|||
if ( |
|||
self._foreign_long_watch is False |
|||
and (is_latin(character) is False or is_accentuated(character)) |
|||
and is_cjk(character) is False |
|||
and is_hangul(character) is False |
|||
and is_katakana(character) is False |
|||
and is_hiragana(character) is False |
|||
and is_thai(character) is False |
|||
): |
|||
self._foreign_long_watch = True |
|||
return |
|||
if not self._buffer: |
|||
return |
|||
if ( |
|||
character.isspace() or is_punctuation(character) or is_separator(character) |
|||
) and self._buffer: |
|||
self._word_count += 1 |
|||
buffer_length: int = len(self._buffer) |
|||
|
|||
self._character_count += buffer_length |
|||
|
|||
if buffer_length >= 4: |
|||
if self._buffer_accent_count / buffer_length > 0.34: |
|||
self._is_current_word_bad = True |
|||
# Word/Buffer ending with an upper case accentuated letter are so rare, |
|||
# that we will consider them all as suspicious. Same weight as foreign_long suspicious. |
|||
if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper(): |
|||
self._foreign_long_count += 1 |
|||
self._is_current_word_bad = True |
|||
if buffer_length >= 24 and self._foreign_long_watch: |
|||
camel_case_dst = [ |
|||
i |
|||
for c, i in zip(self._buffer, range(0, buffer_length)) |
|||
if c.isupper() |
|||
] |
|||
probable_camel_cased: bool = False |
|||
|
|||
if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): |
|||
probable_camel_cased = True |
|||
|
|||
if not probable_camel_cased: |
|||
self._foreign_long_count += 1 |
|||
self._is_current_word_bad = True |
|||
|
|||
if self._is_current_word_bad: |
|||
self._bad_word_count += 1 |
|||
self._bad_character_count += len(self._buffer) |
|||
self._is_current_word_bad = False |
|||
|
|||
self._foreign_long_watch = False |
|||
self._buffer = "" |
|||
self._buffer_accent_count = 0 |
|||
elif ( |
|||
character not in {"<", ">", "-", "=", "~", "|", "_"} |
|||
and character.isdigit() is False |
|||
and is_symbol(character) |
|||
): |
|||
self._is_current_word_bad = True |
|||
self._buffer += character |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._buffer = "" |
|||
self._is_current_word_bad = False |
|||
self._foreign_long_watch = False |
|||
self._bad_word_count = 0 |
|||
self._word_count = 0 |
|||
self._character_count = 0 |
|||
self._bad_character_count = 0 |
|||
self._foreign_long_count = 0 |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._word_count <= 10 and self._foreign_long_count == 0: |
|||
return 0.0 |
|||
|
|||
return self._bad_character_count / self._character_count |
|||
|
|||
|
|||
class CjkInvalidStopPlugin(MessDetectorPlugin): |
|||
""" |
|||
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and |
|||
can be easily detected. Searching for the overuse of '丅' and '丄'. |
|||
""" |
|||
|
|||
def __init__(self) -> None: |
|||
self._wrong_stop_count: int = 0 |
|||
self._cjk_character_count: int = 0 |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return True |
|||
|
|||
def feed(self, character: str) -> None: |
|||
if character in {"丅", "丄"}: |
|||
self._wrong_stop_count += 1 |
|||
return |
|||
if is_cjk(character): |
|||
self._cjk_character_count += 1 |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._wrong_stop_count = 0 |
|||
self._cjk_character_count = 0 |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._cjk_character_count < 16: |
|||
return 0.0 |
|||
return self._wrong_stop_count / self._cjk_character_count |
|||
|
|||
|
|||
class ArchaicUpperLowerPlugin(MessDetectorPlugin): |
|||
def __init__(self) -> None: |
|||
self._buf: bool = False |
|||
|
|||
self._character_count_since_last_sep: int = 0 |
|||
|
|||
self._successive_upper_lower_count: int = 0 |
|||
self._successive_upper_lower_count_final: int = 0 |
|||
|
|||
self._character_count: int = 0 |
|||
|
|||
self._last_alpha_seen: Optional[str] = None |
|||
self._current_ascii_only: bool = True |
|||
|
|||
def eligible(self, character: str) -> bool: |
|||
return True |
|||
|
|||
def feed(self, character: str) -> None: |
|||
is_concerned = character.isalpha() and is_case_variable(character) |
|||
chunk_sep = is_concerned is False |
|||
|
|||
if chunk_sep and self._character_count_since_last_sep > 0: |
|||
if ( |
|||
self._character_count_since_last_sep <= 64 |
|||
and character.isdigit() is False |
|||
and self._current_ascii_only is False |
|||
): |
|||
self._successive_upper_lower_count_final += ( |
|||
self._successive_upper_lower_count |
|||
) |
|||
|
|||
self._successive_upper_lower_count = 0 |
|||
self._character_count_since_last_sep = 0 |
|||
self._last_alpha_seen = None |
|||
self._buf = False |
|||
self._character_count += 1 |
|||
self._current_ascii_only = True |
|||
|
|||
return |
|||
|
|||
if self._current_ascii_only is True and is_ascii(character) is False: |
|||
self._current_ascii_only = False |
|||
|
|||
if self._last_alpha_seen is not None: |
|||
if (character.isupper() and self._last_alpha_seen.islower()) or ( |
|||
character.islower() and self._last_alpha_seen.isupper() |
|||
): |
|||
if self._buf is True: |
|||
self._successive_upper_lower_count += 2 |
|||
self._buf = False |
|||
else: |
|||
self._buf = True |
|||
else: |
|||
self._buf = False |
|||
|
|||
self._character_count += 1 |
|||
self._character_count_since_last_sep += 1 |
|||
self._last_alpha_seen = character |
|||
|
|||
def reset(self) -> None: # pragma: no cover |
|||
self._character_count = 0 |
|||
self._character_count_since_last_sep = 0 |
|||
self._successive_upper_lower_count = 0 |
|||
self._successive_upper_lower_count_final = 0 |
|||
self._last_alpha_seen = None |
|||
self._buf = False |
|||
self._current_ascii_only = True |
|||
|
|||
@property |
|||
def ratio(self) -> float: |
|||
if self._character_count == 0: |
|||
return 0.0 |
|||
|
|||
return self._successive_upper_lower_count_final / self._character_count |
|||
|
|||
|
|||
@lru_cache(maxsize=1024) |
|||
def is_suspiciously_successive_range( |
|||
unicode_range_a: Optional[str], unicode_range_b: Optional[str] |
|||
) -> bool: |
|||
""" |
|||
Determine if two Unicode range seen next to each other can be considered as suspicious. |
|||
""" |
|||
if unicode_range_a is None or unicode_range_b is None: |
|||
return True |
|||
|
|||
if unicode_range_a == unicode_range_b: |
|||
return False |
|||
|
|||
if "Latin" in unicode_range_a and "Latin" in unicode_range_b: |
|||
return False |
|||
|
|||
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: |
|||
return False |
|||
|
|||
# Latin characters can be accompanied with a combining diacritical mark |
|||
# eg. Vietnamese. |
|||
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( |
|||
"Combining" in unicode_range_a or "Combining" in unicode_range_b |
|||
): |
|||
return False |
|||
|
|||
keywords_range_a, keywords_range_b = unicode_range_a.split( |
|||
" " |
|||
), unicode_range_b.split(" ") |
|||
|
|||
for el in keywords_range_a: |
|||
if el in UNICODE_SECONDARY_RANGE_KEYWORD: |
|||
continue |
|||
if el in keywords_range_b: |
|||
return False |
|||
|
|||
# Japanese Exception |
|||
range_a_jp_chars, range_b_jp_chars = ( |
|||
unicode_range_a |
|||
in ( |
|||
"Hiragana", |
|||
"Katakana", |
|||
), |
|||
unicode_range_b in ("Hiragana", "Katakana"), |
|||
) |
|||
if (range_a_jp_chars or range_b_jp_chars) and ( |
|||
"CJK" in unicode_range_a or "CJK" in unicode_range_b |
|||
): |
|||
return False |
|||
if range_a_jp_chars and range_b_jp_chars: |
|||
return False |
|||
|
|||
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: |
|||
if "CJK" in unicode_range_a or "CJK" in unicode_range_b: |
|||
return False |
|||
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": |
|||
return False |
|||
|
|||
# Chinese/Japanese use dedicated range for punctuation and/or separators. |
|||
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( |
|||
unicode_range_a in ["Katakana", "Hiragana"] |
|||
and unicode_range_b in ["Katakana", "Hiragana"] |
|||
): |
|||
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: |
|||
return False |
|||
if "Forms" in unicode_range_a or "Forms" in unicode_range_b: |
|||
return False |
|||
|
|||
return True |
|||
|
|||
|
|||
@lru_cache(maxsize=2048) |
|||
def mess_ratio( |
|||
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False |
|||
) -> float: |
|||
""" |
|||
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. |
|||
""" |
|||
|
|||
detectors: List[MessDetectorPlugin] = [ |
|||
md_class() for md_class in MessDetectorPlugin.__subclasses__() |
|||
] |
|||
|
|||
length: int = len(decoded_sequence) + 1 |
|||
|
|||
mean_mess_ratio: float = 0.0 |
|||
|
|||
if length < 512: |
|||
intermediary_mean_mess_ratio_calc: int = 32 |
|||
elif length <= 1024: |
|||
intermediary_mean_mess_ratio_calc = 64 |
|||
else: |
|||
intermediary_mean_mess_ratio_calc = 128 |
|||
|
|||
for character, index in zip(decoded_sequence + "\n", range(length)): |
|||
for detector in detectors: |
|||
if detector.eligible(character): |
|||
detector.feed(character) |
|||
|
|||
if ( |
|||
index > 0 and index % intermediary_mean_mess_ratio_calc == 0 |
|||
) or index == length - 1: |
|||
mean_mess_ratio = sum(dt.ratio for dt in detectors) |
|||
|
|||
if mean_mess_ratio >= maximum_threshold: |
|||
break |
|||
|
|||
if debug: |
|||
logger = getLogger("charset_normalizer") |
|||
|
|||
logger.log( |
|||
TRACE, |
|||
"Mess-detector extended-analysis start. " |
|||
f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " |
|||
f"maximum_threshold={maximum_threshold}", |
|||
) |
|||
|
|||
if len(decoded_sequence) > 16: |
|||
logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") |
|||
logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") |
|||
|
|||
for dt in detectors: # pragma: nocover |
|||
logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") |
|||
|
|||
return round(mean_mess_ratio, 3) |
@ -0,0 +1,337 @@ |
|||
from encodings.aliases import aliases |
|||
from hashlib import sha256 |
|||
from json import dumps |
|||
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union |
|||
|
|||
from .constant import TOO_BIG_SEQUENCE |
|||
from .utils import iana_name, is_multi_byte_encoding, unicode_range |
|||
|
|||
|
|||
class CharsetMatch: |
|||
def __init__( |
|||
self, |
|||
payload: bytes, |
|||
guessed_encoding: str, |
|||
mean_mess_ratio: float, |
|||
has_sig_or_bom: bool, |
|||
languages: "CoherenceMatches", |
|||
decoded_payload: Optional[str] = None, |
|||
): |
|||
self._payload: bytes = payload |
|||
|
|||
self._encoding: str = guessed_encoding |
|||
self._mean_mess_ratio: float = mean_mess_ratio |
|||
self._languages: CoherenceMatches = languages |
|||
self._has_sig_or_bom: bool = has_sig_or_bom |
|||
self._unicode_ranges: Optional[List[str]] = None |
|||
|
|||
self._leaves: List[CharsetMatch] = [] |
|||
self._mean_coherence_ratio: float = 0.0 |
|||
|
|||
self._output_payload: Optional[bytes] = None |
|||
self._output_encoding: Optional[str] = None |
|||
|
|||
self._string: Optional[str] = decoded_payload |
|||
|
|||
def __eq__(self, other: object) -> bool: |
|||
if not isinstance(other, CharsetMatch): |
|||
raise TypeError( |
|||
"__eq__ cannot be invoked on {} and {}.".format( |
|||
str(other.__class__), str(self.__class__) |
|||
) |
|||
) |
|||
return self.encoding == other.encoding and self.fingerprint == other.fingerprint |
|||
|
|||
def __lt__(self, other: object) -> bool: |
|||
""" |
|||
Implemented to make sorted available upon CharsetMatches items. |
|||
""" |
|||
if not isinstance(other, CharsetMatch): |
|||
raise ValueError |
|||
|
|||
chaos_difference: float = abs(self.chaos - other.chaos) |
|||
coherence_difference: float = abs(self.coherence - other.coherence) |
|||
|
|||
# Below 1% difference --> Use Coherence |
|||
if chaos_difference < 0.01 and coherence_difference > 0.02: |
|||
# When having a tough decision, use the result that decoded as many multi-byte as possible. |
|||
if chaos_difference == 0.0 and self.coherence == other.coherence: |
|||
return self.multi_byte_usage > other.multi_byte_usage |
|||
return self.coherence > other.coherence |
|||
|
|||
return self.chaos < other.chaos |
|||
|
|||
@property |
|||
def multi_byte_usage(self) -> float: |
|||
return 1.0 - len(str(self)) / len(self.raw) |
|||
|
|||
def __str__(self) -> str: |
|||
# Lazy Str Loading |
|||
if self._string is None: |
|||
self._string = str(self._payload, self._encoding, "strict") |
|||
return self._string |
|||
|
|||
def __repr__(self) -> str: |
|||
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint) |
|||
|
|||
def add_submatch(self, other: "CharsetMatch") -> None: |
|||
if not isinstance(other, CharsetMatch) or other == self: |
|||
raise ValueError( |
|||
"Unable to add instance <{}> as a submatch of a CharsetMatch".format( |
|||
other.__class__ |
|||
) |
|||
) |
|||
|
|||
other._string = None # Unload RAM usage; dirty trick. |
|||
self._leaves.append(other) |
|||
|
|||
@property |
|||
def encoding(self) -> str: |
|||
return self._encoding |
|||
|
|||
@property |
|||
def encoding_aliases(self) -> List[str]: |
|||
""" |
|||
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. |
|||
""" |
|||
also_known_as: List[str] = [] |
|||
for u, p in aliases.items(): |
|||
if self.encoding == u: |
|||
also_known_as.append(p) |
|||
elif self.encoding == p: |
|||
also_known_as.append(u) |
|||
return also_known_as |
|||
|
|||
@property |
|||
def bom(self) -> bool: |
|||
return self._has_sig_or_bom |
|||
|
|||
@property |
|||
def byte_order_mark(self) -> bool: |
|||
return self._has_sig_or_bom |
|||
|
|||
@property |
|||
def languages(self) -> List[str]: |
|||
""" |
|||
Return the complete list of possible languages found in decoded sequence. |
|||
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. |
|||
""" |
|||
return [e[0] for e in self._languages] |
|||
|
|||
@property |
|||
def language(self) -> str: |
|||
""" |
|||
Most probable language found in decoded sequence. If none were detected or inferred, the property will return |
|||
"Unknown". |
|||
""" |
|||
if not self._languages: |
|||
# Trying to infer the language based on the given encoding |
|||
# Its either English or we should not pronounce ourselves in certain cases. |
|||
if "ascii" in self.could_be_from_charset: |
|||
return "English" |
|||
|
|||
# doing it there to avoid circular import |
|||
from charset_normalizer.cd import encoding_languages, mb_encoding_languages |
|||
|
|||
languages = ( |
|||
mb_encoding_languages(self.encoding) |
|||
if is_multi_byte_encoding(self.encoding) |
|||
else encoding_languages(self.encoding) |
|||
) |
|||
|
|||
if len(languages) == 0 or "Latin Based" in languages: |
|||
return "Unknown" |
|||
|
|||
return languages[0] |
|||
|
|||
return self._languages[0][0] |
|||
|
|||
@property |
|||
def chaos(self) -> float: |
|||
return self._mean_mess_ratio |
|||
|
|||
@property |
|||
def coherence(self) -> float: |
|||
if not self._languages: |
|||
return 0.0 |
|||
return self._languages[0][1] |
|||
|
|||
@property |
|||
def percent_chaos(self) -> float: |
|||
return round(self.chaos * 100, ndigits=3) |
|||
|
|||
@property |
|||
def percent_coherence(self) -> float: |
|||
return round(self.coherence * 100, ndigits=3) |
|||
|
|||
@property |
|||
def raw(self) -> bytes: |
|||
""" |
|||
Original untouched bytes. |
|||
""" |
|||
return self._payload |
|||
|
|||
@property |
|||
def submatch(self) -> List["CharsetMatch"]: |
|||
return self._leaves |
|||
|
|||
@property |
|||
def has_submatch(self) -> bool: |
|||
return len(self._leaves) > 0 |
|||
|
|||
@property |
|||
def alphabets(self) -> List[str]: |
|||
if self._unicode_ranges is not None: |
|||
return self._unicode_ranges |
|||
# list detected ranges |
|||
detected_ranges: List[Optional[str]] = [ |
|||
unicode_range(char) for char in str(self) |
|||
] |
|||
# filter and sort |
|||
self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) |
|||
return self._unicode_ranges |
|||
|
|||
@property |
|||
def could_be_from_charset(self) -> List[str]: |
|||
""" |
|||
The complete list of encoding that output the exact SAME str result and therefore could be the originating |
|||
encoding. |
|||
This list does include the encoding available in property 'encoding'. |
|||
""" |
|||
return [self._encoding] + [m.encoding for m in self._leaves] |
|||
|
|||
def output(self, encoding: str = "utf_8") -> bytes: |
|||
""" |
|||
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. |
|||
Any errors will be simply ignored by the encoder NOT replaced. |
|||
""" |
|||
if self._output_encoding is None or self._output_encoding != encoding: |
|||
self._output_encoding = encoding |
|||
self._output_payload = str(self).encode(encoding, "replace") |
|||
|
|||
return self._output_payload # type: ignore |
|||
|
|||
@property |
|||
def fingerprint(self) -> str: |
|||
""" |
|||
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. |
|||
""" |
|||
return sha256(self.output()).hexdigest() |
|||
|
|||
|
|||
class CharsetMatches: |
|||
""" |
|||
Container with every CharsetMatch items ordered by default from most probable to the less one. |
|||
Act like a list(iterable) but does not implements all related methods. |
|||
""" |
|||
|
|||
def __init__(self, results: Optional[List[CharsetMatch]] = None): |
|||
self._results: List[CharsetMatch] = sorted(results) if results else [] |
|||
|
|||
def __iter__(self) -> Iterator[CharsetMatch]: |
|||
yield from self._results |
|||
|
|||
def __getitem__(self, item: Union[int, str]) -> CharsetMatch: |
|||
""" |
|||
Retrieve a single item either by its position or encoding name (alias may be used here). |
|||
Raise KeyError upon invalid index or encoding not present in results. |
|||
""" |
|||
if isinstance(item, int): |
|||
return self._results[item] |
|||
if isinstance(item, str): |
|||
item = iana_name(item, False) |
|||
for result in self._results: |
|||
if item in result.could_be_from_charset: |
|||
return result |
|||
raise KeyError |
|||
|
|||
def __len__(self) -> int: |
|||
return len(self._results) |
|||
|
|||
def __bool__(self) -> bool: |
|||
return len(self._results) > 0 |
|||
|
|||
def append(self, item: CharsetMatch) -> None: |
|||
""" |
|||
Insert a single match. Will be inserted accordingly to preserve sort. |
|||
Can be inserted as a submatch. |
|||
""" |
|||
if not isinstance(item, CharsetMatch): |
|||
raise ValueError( |
|||
"Cannot append instance '{}' to CharsetMatches".format( |
|||
str(item.__class__) |
|||
) |
|||
) |
|||
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) |
|||
if len(item.raw) <= TOO_BIG_SEQUENCE: |
|||
for match in self._results: |
|||
if match.fingerprint == item.fingerprint and match.chaos == item.chaos: |
|||
match.add_submatch(item) |
|||
return |
|||
self._results.append(item) |
|||
self._results = sorted(self._results) |
|||
|
|||
def best(self) -> Optional["CharsetMatch"]: |
|||
""" |
|||
Simply return the first match. Strict equivalent to matches[0]. |
|||
""" |
|||
if not self._results: |
|||
return None |
|||
return self._results[0] |
|||
|
|||
def first(self) -> Optional["CharsetMatch"]: |
|||
""" |
|||
Redundant method, call the method best(). Kept for BC reasons. |
|||
""" |
|||
return self.best() |
|||
|
|||
|
|||
CoherenceMatch = Tuple[str, float] |
|||
CoherenceMatches = List[CoherenceMatch] |
|||
|
|||
|
|||
class CliDetectionResult: |
|||
def __init__( |
|||
self, |
|||
path: str, |
|||
encoding: Optional[str], |
|||
encoding_aliases: List[str], |
|||
alternative_encodings: List[str], |
|||
language: str, |
|||
alphabets: List[str], |
|||
has_sig_or_bom: bool, |
|||
chaos: float, |
|||
coherence: float, |
|||
unicode_path: Optional[str], |
|||
is_preferred: bool, |
|||
): |
|||
self.path: str = path |
|||
self.unicode_path: Optional[str] = unicode_path |
|||
self.encoding: Optional[str] = encoding |
|||
self.encoding_aliases: List[str] = encoding_aliases |
|||
self.alternative_encodings: List[str] = alternative_encodings |
|||
self.language: str = language |
|||
self.alphabets: List[str] = alphabets |
|||
self.has_sig_or_bom: bool = has_sig_or_bom |
|||
self.chaos: float = chaos |
|||
self.coherence: float = coherence |
|||
self.is_preferred: bool = is_preferred |
|||
|
|||
@property |
|||
def __dict__(self) -> Dict[str, Any]: # type: ignore |
|||
return { |
|||
"path": self.path, |
|||
"encoding": self.encoding, |
|||
"encoding_aliases": self.encoding_aliases, |
|||
"alternative_encodings": self.alternative_encodings, |
|||
"language": self.language, |
|||
"alphabets": self.alphabets, |
|||
"has_sig_or_bom": self.has_sig_or_bom, |
|||
"chaos": self.chaos, |
|||
"coherence": self.coherence, |
|||
"unicode_path": self.unicode_path, |
|||
"is_preferred": self.is_preferred, |
|||
} |
|||
|
|||
def to_json(self) -> str: |
|||
return dumps(self.__dict__, ensure_ascii=True, indent=4) |
@ -0,0 +1,414 @@ |
|||
import importlib |
|||
import logging |
|||
import unicodedata |
|||
from codecs import IncrementalDecoder |
|||
from encodings.aliases import aliases |
|||
from functools import lru_cache |
|||
from re import findall |
|||
from typing import Generator, List, Optional, Set, Tuple, Union |
|||
|
|||
from _multibytecodec import MultibyteIncrementalDecoder |
|||
|
|||
from .constant import ( |
|||
ENCODING_MARKS, |
|||
IANA_SUPPORTED_SIMILAR, |
|||
RE_POSSIBLE_ENCODING_INDICATION, |
|||
UNICODE_RANGES_COMBINED, |
|||
UNICODE_SECONDARY_RANGE_KEYWORD, |
|||
UTF8_MAXIMAL_ALLOCATION, |
|||
) |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_accentuated(character: str) -> bool: |
|||
try: |
|||
description: str = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
return ( |
|||
"WITH GRAVE" in description |
|||
or "WITH ACUTE" in description |
|||
or "WITH CEDILLA" in description |
|||
or "WITH DIAERESIS" in description |
|||
or "WITH CIRCUMFLEX" in description |
|||
or "WITH TILDE" in description |
|||
) |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def remove_accent(character: str) -> str: |
|||
decomposed: str = unicodedata.decomposition(character) |
|||
if not decomposed: |
|||
return character |
|||
|
|||
codes: List[str] = decomposed.split(" ") |
|||
|
|||
return chr(int(codes[0], 16)) |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def unicode_range(character: str) -> Optional[str]: |
|||
""" |
|||
Retrieve the Unicode range official name from a single character. |
|||
""" |
|||
character_ord: int = ord(character) |
|||
|
|||
for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): |
|||
if character_ord in ord_range: |
|||
return range_name |
|||
|
|||
return None |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_latin(character: str) -> bool: |
|||
try: |
|||
description: str = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
return "LATIN" in description |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_ascii(character: str) -> bool: |
|||
try: |
|||
character.encode("ascii") |
|||
except UnicodeEncodeError: |
|||
return False |
|||
return True |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_punctuation(character: str) -> bool: |
|||
character_category: str = unicodedata.category(character) |
|||
|
|||
if "P" in character_category: |
|||
return True |
|||
|
|||
character_range: Optional[str] = unicode_range(character) |
|||
|
|||
if character_range is None: |
|||
return False |
|||
|
|||
return "Punctuation" in character_range |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_symbol(character: str) -> bool: |
|||
character_category: str = unicodedata.category(character) |
|||
|
|||
if "S" in character_category or "N" in character_category: |
|||
return True |
|||
|
|||
character_range: Optional[str] = unicode_range(character) |
|||
|
|||
if character_range is None: |
|||
return False |
|||
|
|||
return "Forms" in character_range |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_emoticon(character: str) -> bool: |
|||
character_range: Optional[str] = unicode_range(character) |
|||
|
|||
if character_range is None: |
|||
return False |
|||
|
|||
return "Emoticons" in character_range |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_separator(character: str) -> bool: |
|||
if character.isspace() or character in {"|", "+", "<", ">"}: |
|||
return True |
|||
|
|||
character_category: str = unicodedata.category(character) |
|||
|
|||
return "Z" in character_category or character_category in {"Po", "Pd", "Pc"} |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_case_variable(character: str) -> bool: |
|||
return character.islower() != character.isupper() |
|||
|
|||
|
|||
def is_private_use_only(character: str) -> bool: |
|||
character_category: str = unicodedata.category(character) |
|||
|
|||
return character_category == "Co" |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_cjk(character: str) -> bool: |
|||
try: |
|||
character_name = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
|
|||
return "CJK" in character_name |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_hiragana(character: str) -> bool: |
|||
try: |
|||
character_name = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
|
|||
return "HIRAGANA" in character_name |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_katakana(character: str) -> bool: |
|||
try: |
|||
character_name = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
|
|||
return "KATAKANA" in character_name |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_hangul(character: str) -> bool: |
|||
try: |
|||
character_name = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
|
|||
return "HANGUL" in character_name |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_thai(character: str) -> bool: |
|||
try: |
|||
character_name = unicodedata.name(character) |
|||
except ValueError: |
|||
return False |
|||
|
|||
return "THAI" in character_name |
|||
|
|||
|
|||
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) |
|||
def is_unicode_range_secondary(range_name: str) -> bool: |
|||
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) |
|||
|
|||
|
|||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) |
|||
def is_unprintable(character: str) -> bool: |
|||
return ( |
|||
character.isspace() is False # includes \n \t \r \v |
|||
and character.isprintable() is False |
|||
and character != "\x1A" # Why? Its the ASCII substitute character. |
|||
and character != "\ufeff" # bug discovered in Python, |
|||
# Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. |
|||
) |
|||
|
|||
|
|||
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]: |
|||
""" |
|||
Extract using ASCII-only decoder any specified encoding in the first n-bytes. |
|||
""" |
|||
if not isinstance(sequence, bytes): |
|||
raise TypeError |
|||
|
|||
seq_len: int = len(sequence) |
|||
|
|||
results: List[str] = findall( |
|||
RE_POSSIBLE_ENCODING_INDICATION, |
|||
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), |
|||
) |
|||
|
|||
if len(results) == 0: |
|||
return None |
|||
|
|||
for specified_encoding in results: |
|||
specified_encoding = specified_encoding.lower().replace("-", "_") |
|||
|
|||
encoding_alias: str |
|||
encoding_iana: str |
|||
|
|||
for encoding_alias, encoding_iana in aliases.items(): |
|||
if encoding_alias == specified_encoding: |
|||
return encoding_iana |
|||
if encoding_iana == specified_encoding: |
|||
return encoding_iana |
|||
|
|||
return None |
|||
|
|||
|
|||
@lru_cache(maxsize=128) |
|||
def is_multi_byte_encoding(name: str) -> bool: |
|||
""" |
|||
Verify is a specific encoding is a multi byte one based on it IANA name |
|||
""" |
|||
return name in { |
|||
"utf_8", |
|||
"utf_8_sig", |
|||
"utf_16", |
|||
"utf_16_be", |
|||
"utf_16_le", |
|||
"utf_32", |
|||
"utf_32_le", |
|||
"utf_32_be", |
|||
"utf_7", |
|||
} or issubclass( |
|||
importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, |
|||
MultibyteIncrementalDecoder, |
|||
) |
|||
|
|||
|
|||
def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]: |
|||
""" |
|||
Identify and extract SIG/BOM in given sequence. |
|||
""" |
|||
|
|||
for iana_encoding in ENCODING_MARKS: |
|||
marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding] |
|||
|
|||
if isinstance(marks, bytes): |
|||
marks = [marks] |
|||
|
|||
for mark in marks: |
|||
if sequence.startswith(mark): |
|||
return iana_encoding, mark |
|||
|
|||
return None, b"" |
|||
|
|||
|
|||
def should_strip_sig_or_bom(iana_encoding: str) -> bool: |
|||
return iana_encoding not in {"utf_16", "utf_32"} |
|||
|
|||
|
|||
def iana_name(cp_name: str, strict: bool = True) -> str: |
|||
cp_name = cp_name.lower().replace("-", "_") |
|||
|
|||
encoding_alias: str |
|||
encoding_iana: str |
|||
|
|||
for encoding_alias, encoding_iana in aliases.items(): |
|||
if cp_name in [encoding_alias, encoding_iana]: |
|||
return encoding_iana |
|||
|
|||
if strict: |
|||
raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name)) |
|||
|
|||
return cp_name |
|||
|
|||
|
|||
def range_scan(decoded_sequence: str) -> List[str]: |
|||
ranges: Set[str] = set() |
|||
|
|||
for character in decoded_sequence: |
|||
character_range: Optional[str] = unicode_range(character) |
|||
|
|||
if character_range is None: |
|||
continue |
|||
|
|||
ranges.add(character_range) |
|||
|
|||
return list(ranges) |
|||
|
|||
|
|||
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: |
|||
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): |
|||
return 0.0 |
|||
|
|||
decoder_a = importlib.import_module( |
|||
"encodings.{}".format(iana_name_a) |
|||
).IncrementalDecoder |
|||
decoder_b = importlib.import_module( |
|||
"encodings.{}".format(iana_name_b) |
|||
).IncrementalDecoder |
|||
|
|||
id_a: IncrementalDecoder = decoder_a(errors="ignore") |
|||
id_b: IncrementalDecoder = decoder_b(errors="ignore") |
|||
|
|||
character_match_count: int = 0 |
|||
|
|||
for i in range(255): |
|||
to_be_decoded: bytes = bytes([i]) |
|||
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): |
|||
character_match_count += 1 |
|||
|
|||
return character_match_count / 254 |
|||
|
|||
|
|||
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: |
|||
""" |
|||
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using |
|||
the function cp_similarity. |
|||
""" |
|||
return ( |
|||
iana_name_a in IANA_SUPPORTED_SIMILAR |
|||
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] |
|||
) |
|||
|
|||
|
|||
def set_logging_handler( |
|||
name: str = "charset_normalizer", |
|||
level: int = logging.INFO, |
|||
format_string: str = "%(asctime)s | %(levelname)s | %(message)s", |
|||
) -> None: |
|||
logger = logging.getLogger(name) |
|||
logger.setLevel(level) |
|||
|
|||
handler = logging.StreamHandler() |
|||
handler.setFormatter(logging.Formatter(format_string)) |
|||
logger.addHandler(handler) |
|||
|
|||
|
|||
def cut_sequence_chunks( |
|||
sequences: bytes, |
|||
encoding_iana: str, |
|||
offsets: range, |
|||
chunk_size: int, |
|||
bom_or_sig_available: bool, |
|||
strip_sig_or_bom: bool, |
|||
sig_payload: bytes, |
|||
is_multi_byte_decoder: bool, |
|||
decoded_payload: Optional[str] = None, |
|||
) -> Generator[str, None, None]: |
|||
if decoded_payload and is_multi_byte_decoder is False: |
|||
for i in offsets: |
|||
chunk = decoded_payload[i : i + chunk_size] |
|||
if not chunk: |
|||
break |
|||
yield chunk |
|||
else: |
|||
for i in offsets: |
|||
chunk_end = i + chunk_size |
|||
if chunk_end > len(sequences) + 8: |
|||
continue |
|||
|
|||
cut_sequence = sequences[i : i + chunk_size] |
|||
|
|||
if bom_or_sig_available and strip_sig_or_bom is False: |
|||
cut_sequence = sig_payload + cut_sequence |
|||
|
|||
chunk = cut_sequence.decode( |
|||
encoding_iana, |
|||
errors="ignore" if is_multi_byte_decoder else "strict", |
|||
) |
|||
|
|||
# multi-byte bad cutting detector and adjustment |
|||
# not the cleanest way to perform that fix but clever enough for now. |
|||
if is_multi_byte_decoder and i > 0: |
|||
chunk_partial_size_chk: int = min(chunk_size, 16) |
|||
|
|||
if ( |
|||
decoded_payload |
|||
and chunk[:chunk_partial_size_chk] not in decoded_payload |
|||
): |
|||
for j in range(i, i - 4, -1): |
|||
cut_sequence = sequences[j:chunk_end] |
|||
|
|||
if bom_or_sig_available and strip_sig_or_bom is False: |
|||
cut_sequence = sig_payload + cut_sequence |
|||
|
|||
chunk = cut_sequence.decode(encoding_iana, errors="ignore") |
|||
|
|||
if chunk[:chunk_partial_size_chk] in decoded_payload: |
|||
break |
|||
|
|||
yield chunk |
@ -0,0 +1,6 @@ |
|||
""" |
|||
Expose version |
|||
""" |
|||
|
|||
__version__ = "3.2.0" |
|||
VERSION = __version__.split(".") |
@ -0,0 +1,8 @@ |
|||
# -*- coding: utf-8 -*- |
|||
try: |
|||
from ._version import version as __version__ |
|||
except ImportError: |
|||
__version__ = 'unknown' |
|||
|
|||
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', |
|||
'utils', 'zoneinfo'] |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,43 @@ |
|||
""" |
|||
Common code used in multiple modules. |
|||
""" |
|||
|
|||
|
|||
class weekday(object): |
|||
__slots__ = ["weekday", "n"] |
|||
|
|||
def __init__(self, weekday, n=None): |
|||
self.weekday = weekday |
|||
self.n = n |
|||
|
|||
def __call__(self, n): |
|||
if n == self.n: |
|||
return self |
|||
else: |
|||
return self.__class__(self.weekday, n) |
|||
|
|||
def __eq__(self, other): |
|||
try: |
|||
if self.weekday != other.weekday or self.n != other.n: |
|||
return False |
|||
except AttributeError: |
|||
return False |
|||
return True |
|||
|
|||
def __hash__(self): |
|||
return hash(( |
|||
self.weekday, |
|||
self.n, |
|||
)) |
|||
|
|||
def __ne__(self, other): |
|||
return not (self == other) |
|||
|
|||
def __repr__(self): |
|||
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] |
|||
if not self.n: |
|||
return s |
|||
else: |
|||
return "%s(%+d)" % (s, self.n) |
|||
|
|||
# vim:ts=4:sw=4:et |
@ -0,0 +1,5 @@ |
|||
# coding: utf-8 |
|||
# file generated by setuptools_scm |
|||
# don't change, don't track in version control |
|||
version = '2.8.2' |
|||
version_tuple = (2, 8, 2) |
@ -0,0 +1,89 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
This module offers a generic Easter computing method for any given year, using |
|||
Western, Orthodox or Julian algorithms. |
|||
""" |
|||
|
|||
import datetime |
|||
|
|||
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] |
|||
|
|||
EASTER_JULIAN = 1 |
|||
EASTER_ORTHODOX = 2 |
|||
EASTER_WESTERN = 3 |
|||
|
|||
|
|||
def easter(year, method=EASTER_WESTERN): |
|||
""" |
|||
This method was ported from the work done by GM Arts, |
|||
on top of the algorithm by Claus Tondering, which was |
|||
based in part on the algorithm of Ouding (1940), as |
|||
quoted in "Explanatory Supplement to the Astronomical |
|||
Almanac", P. Kenneth Seidelmann, editor. |
|||
|
|||
This algorithm implements three different Easter |
|||
calculation methods: |
|||
|
|||
1. Original calculation in Julian calendar, valid in |
|||
dates after 326 AD |
|||
2. Original method, with date converted to Gregorian |
|||
calendar, valid in years 1583 to 4099 |
|||
3. Revised method, in Gregorian calendar, valid in |
|||
years 1583 to 4099 as well |
|||
|
|||
These methods are represented by the constants: |
|||
|
|||
* ``EASTER_JULIAN = 1`` |
|||
* ``EASTER_ORTHODOX = 2`` |
|||
* ``EASTER_WESTERN = 3`` |
|||
|
|||
The default method is method 3. |
|||
|
|||
More about the algorithm may be found at: |
|||
|
|||
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_ |
|||
|
|||
and |
|||
|
|||
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_ |
|||
|
|||
""" |
|||
|
|||
if not (1 <= method <= 3): |
|||
raise ValueError("invalid method") |
|||
|
|||
# g - Golden year - 1 |
|||
# c - Century |
|||
# h - (23 - Epact) mod 30 |
|||
# i - Number of days from March 21 to Paschal Full Moon |
|||
# j - Weekday for PFM (0=Sunday, etc) |
|||
# p - Number of days from March 21 to Sunday on or before PFM |
|||
# (-6 to 28 methods 1 & 3, to 56 for method 2) |
|||
# e - Extra days to add for method 2 (converting Julian |
|||
# date to Gregorian date) |
|||
|
|||
y = year |
|||
g = y % 19 |
|||
e = 0 |
|||
if method < 3: |
|||
# Old method |
|||
i = (19*g + 15) % 30 |
|||
j = (y + y//4 + i) % 7 |
|||
if method == 2: |
|||
# Extra dates to convert Julian to Gregorian date |
|||
e = 10 |
|||
if y > 1600: |
|||
e = e + y//100 - 16 - (y//100 - 16)//4 |
|||
else: |
|||
# New method |
|||
c = y//100 |
|||
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 |
|||
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) |
|||
j = (y + y//4 + i + 2 - c + c//4) % 7 |
|||
|
|||
# p can be from -6 to 56 corresponding to dates 22 March to 23 May |
|||
# (later dates apply to method 2, although 23 May never actually occurs) |
|||
p = i - j + e |
|||
d = 1 + (p + 27 + (p + 6)//40) % 31 |
|||
m = 3 + (p + 26)//30 |
|||
return datetime.date(int(y), int(m), int(d)) |
@ -0,0 +1,61 @@ |
|||
# -*- coding: utf-8 -*- |
|||
from ._parser import parse, parser, parserinfo, ParserError |
|||
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER |
|||
from ._parser import UnknownTimezoneWarning |
|||
|
|||
from ._parser import __doc__ |
|||
|
|||
from .isoparser import isoparser, isoparse |
|||
|
|||
__all__ = ['parse', 'parser', 'parserinfo', |
|||
'isoparse', 'isoparser', |
|||
'ParserError', |
|||
'UnknownTimezoneWarning'] |
|||
|
|||
|
|||
### |
|||
# Deprecate portions of the private interface so that downstream code that |
|||
# is improperly relying on it is given *some* notice. |
|||
|
|||
|
|||
def __deprecated_private_func(f): |
|||
from functools import wraps |
|||
import warnings |
|||
|
|||
msg = ('{name} is a private function and may break without warning, ' |
|||
'it will be moved and or renamed in future versions.') |
|||
msg = msg.format(name=f.__name__) |
|||
|
|||
@wraps(f) |
|||
def deprecated_func(*args, **kwargs): |
|||
warnings.warn(msg, DeprecationWarning) |
|||
return f(*args, **kwargs) |
|||
|
|||
return deprecated_func |
|||
|
|||
def __deprecate_private_class(c): |
|||
import warnings |
|||
|
|||
msg = ('{name} is a private class and may break without warning, ' |
|||
'it will be moved and or renamed in future versions.') |
|||
msg = msg.format(name=c.__name__) |
|||
|
|||
class private_class(c): |
|||
__doc__ = c.__doc__ |
|||
|
|||
def __init__(self, *args, **kwargs): |
|||
warnings.warn(msg, DeprecationWarning) |
|||
super(private_class, self).__init__(*args, **kwargs) |
|||
|
|||
private_class.__name__ = c.__name__ |
|||
|
|||
return private_class |
|||
|
|||
|
|||
from ._parser import _timelex, _resultbase |
|||
from ._parser import _tzparser, _parsetz |
|||
|
|||
_timelex = __deprecate_private_class(_timelex) |
|||
_tzparser = __deprecate_private_class(_tzparser) |
|||
_resultbase = __deprecate_private_class(_resultbase) |
|||
_parsetz = __deprecated_private_func(_parsetz) |
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
@ -0,0 +1,416 @@ |
|||
# -*- coding: utf-8 -*- |
|||
""" |
|||
This module offers a parser for ISO-8601 strings |
|||
|
|||
It is intended to support all valid date, time and datetime formats per the |
|||
ISO-8601 specification. |
|||
|
|||
..versionadded:: 2.7.0 |
|||
""" |
|||
from datetime import datetime, timedelta, time, date |
|||
import calendar |
|||
from dateutil import tz |
|||
|
|||
from functools import wraps |
|||
|
|||
import re |
|||
import six |
|||
|
|||
__all__ = ["isoparse", "isoparser"] |
|||
|
|||
|
|||
def _takes_ascii(f): |
|||
@wraps(f) |
|||
def func(self, str_in, *args, **kwargs): |
|||
# If it's a stream, read the whole thing |
|||
str_in = getattr(str_in, 'read', lambda: str_in)() |
|||
|
|||
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII |
|||
if isinstance(str_in, six.text_type): |
|||
# ASCII is the same in UTF-8 |
|||
try: |
|||
str_in = str_in.encode('ascii') |
|||
except UnicodeEncodeError as e: |
|||
msg = 'ISO-8601 strings should contain only ASCII characters' |
|||
six.raise_from(ValueError(msg), e) |
|||
|
|||
return f(self, str_in, *args, **kwargs) |
|||
|
|||
return func |
|||
|
|||
|
|||
class isoparser(object): |
|||
def __init__(self, sep=None): |
|||
""" |
|||
:param sep: |
|||
A single character that separates date and time portions. If |
|||
``None``, the parser will accept any single character. |
|||
For strict ISO-8601 adherence, pass ``'T'``. |
|||
""" |
|||
if sep is not None: |
|||
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): |
|||
raise ValueError('Separator must be a single, non-numeric ' + |
|||
'ASCII character') |
|||
|
|||
sep = sep.encode('ascii') |
|||
|
|||
self._sep = sep |
|||
|
|||
@_takes_ascii |
|||
def isoparse(self, dt_str): |
|||
""" |
|||
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. |
|||
|
|||
An ISO-8601 datetime string consists of a date portion, followed |
|||
optionally by a time portion - the date and time portions are separated |
|||
by a single character separator, which is ``T`` in the official |
|||
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be |
|||
combined with a time portion. |
|||
|
|||
Supported date formats are: |
|||
|
|||
Common: |
|||
|
|||
- ``YYYY`` |
|||
- ``YYYY-MM`` or ``YYYYMM`` |
|||
- ``YYYY-MM-DD`` or ``YYYYMMDD`` |
|||
|
|||
Uncommon: |
|||
|
|||
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) |
|||
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day |
|||
|
|||
The ISO week and day numbering follows the same logic as |
|||
:func:`datetime.date.isocalendar`. |
|||
|
|||
Supported time formats are: |
|||
|
|||
- ``hh`` |
|||
- ``hh:mm`` or ``hhmm`` |
|||
- ``hh:mm:ss`` or ``hhmmss`` |
|||
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) |
|||
|
|||
Midnight is a special case for `hh`, as the standard supports both |
|||
00:00 and 24:00 as a representation. The decimal separator can be |
|||
either a dot or a comma. |
|||
|
|||
|
|||
.. caution:: |
|||
|
|||
Support for fractional components other than seconds is part of the |
|||
ISO-8601 standard, but is not currently implemented in this parser. |
|||
|
|||
Supported time zone offset formats are: |
|||
|
|||
- `Z` (UTC) |
|||
- `±HH:MM` |
|||
- `±HHMM` |
|||
- `±HH` |
|||
|
|||
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, |
|||
with the exception of UTC, which will be represented as |
|||
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such |
|||
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. |
|||
|
|||
:param dt_str: |
|||
A string or stream containing only an ISO-8601 datetime string |
|||
|
|||
:return: |
|||
Returns a :class:`datetime.datetime` representing the string. |
|||
Unspecified components default to their lowest value. |
|||
|
|||
.. warning:: |
|||
|
|||
As of version 2.7.0, the strictness of the parser should not be |
|||
considered a stable part of the contract. Any valid ISO-8601 string |
|||
that parses correctly with the default settings will continue to |
|||
parse correctly in future versions, but invalid strings that |
|||
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not |
|||
guaranteed to continue failing in future versions if they encode |
|||
a valid date. |
|||
|
|||
.. versionadded:: 2.7.0 |
|||
""" |
|||
components, pos = self._parse_isodate(dt_str) |
|||
|
|||
if len(dt_str) > pos: |
|||
if self._sep is None or dt_str[pos:pos + 1] == self._sep: |
|||
components += self._parse_isotime(dt_str[pos + 1:]) |
|||
else: |
|||
raise ValueError('String contains unknown ISO components') |
|||
|
|||
if len(components) > 3 and components[3] == 24: |
|||
components[3] = 0 |
|||
return datetime(*components) + timedelta(days=1) |
|||
|
|||
return datetime(*components) |
|||
|
|||
@_takes_ascii |
|||
def parse_isodate(self, datestr): |
|||
""" |
|||
Parse the date portion of an ISO string. |
|||
|
|||
:param datestr: |
|||
The string portion of an ISO string, without a separator |
|||
|
|||
:return: |
|||
Returns a :class:`datetime.date` object |
|||
""" |
|||
components, pos = self._parse_isodate(datestr) |
|||
if pos < len(datestr): |
|||
raise ValueError('String contains unknown ISO ' + |
|||
'components: {!r}'.format(datestr.decode('ascii'))) |
|||
return date(*components) |
|||
|
|||
@_takes_ascii |
|||
def parse_isotime(self, timestr): |
|||
""" |
|||
Parse the time portion of an ISO string. |
|||
|
|||
:param timestr: |
|||
The time portion of an ISO string, without a separator |
|||
|
|||
:return: |
|||
Returns a :class:`datetime.time` object |
|||
""" |
|||
components = self._parse_isotime(timestr) |
|||
if components[0] == 24: |
|||
components[0] = 0 |
|||
return time(*components) |
|||
|
|||
@_takes_ascii |
|||
def parse_tzstr(self, tzstr, zero_as_utc=True): |
|||
""" |
|||
Parse a valid ISO time zone string. |
|||
|
|||
See :func:`isoparser.isoparse` for details on supported formats. |
|||
|
|||
:param tzstr: |
|||
A string representing an ISO time zone offset |
|||
|
|||
:param zero_as_utc: |
|||
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones |
|||
|
|||
:return: |
|||
Returns :class:`dateutil.tz.tzoffset` for offsets and |
|||
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is |
|||
specified) offsets equivalent to UTC. |
|||
""" |
|||
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) |
|||
|
|||
# Constants |
|||
_DATE_SEP = b'-' |
|||
_TIME_SEP = b':' |
|||
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') |
|||
|
|||
def _parse_isodate(self, dt_str): |
|||
try: |
|||
return self._parse_isodate_common(dt_str) |
|||
except ValueError: |
|||
return self._parse_isodate_uncommon(dt_str) |
|||
|
|||
def _parse_isodate_common(self, dt_str): |
|||
len_str = len(dt_str) |
|||
components = [1, 1, 1] |
|||
|
|||
if len_str < 4: |
|||
raise ValueError('ISO string too short') |
|||
|
|||
# Year |
|||
components[0] = int(dt_str[0:4]) |
|||
pos = 4 |
|||
if pos >= len_str: |
|||
return components, pos |
|||
|
|||
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP |
|||
if has_sep: |
|||
pos += 1 |
|||
|
|||
# Month |
|||
if len_str - pos < 2: |
|||
raise ValueError('Invalid common month') |
|||
|
|||
components[1] = int(dt_str[pos:pos + 2]) |
|||
pos += 2 |
|||
|
|||
if pos >= len_str: |
|||
if has_sep: |
|||
return components, pos |
|||
else: |
|||
raise ValueError('Invalid ISO format') |
|||
|
|||
if has_sep: |
|||
if dt_str[pos:pos + 1] != self._DATE_SEP: |
|||
raise ValueError('Invalid separator in ISO string') |
|||
pos += 1 |
|||
|
|||
# Day |
|||
if len_str - pos < 2: |
|||
raise ValueError('Invalid common day') |
|||
components[2] = int(dt_str[pos:pos + 2]) |
|||
return components, pos + 2 |
|||
|
|||
def _parse_isodate_uncommon(self, dt_str): |
|||
if len(dt_str) < 4: |
|||
raise ValueError('ISO string too short') |
|||
|
|||
# All ISO formats start with the year |
|||
year = int(dt_str[0:4]) |
|||
|
|||
has_sep = dt_str[4:5] == self._DATE_SEP |
|||
|
|||
pos = 4 + has_sep # Skip '-' if it's there |
|||
if dt_str[pos:pos + 1] == b'W': |
|||
# YYYY-?Www-?D? |
|||
pos += 1 |
|||
weekno = int(dt_str[pos:pos + 2]) |
|||
pos += 2 |
|||
|
|||
dayno = 1 |
|||
if len(dt_str) > pos: |
|||
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: |
|||
raise ValueError('Inconsistent use of dash separator') |
|||
|
|||
pos += has_sep |
|||
|
|||
dayno = int(dt_str[pos:pos + 1]) |
|||
pos += 1 |
|||
|
|||
base_date = self._calculate_weekdate(year, weekno, dayno) |
|||
else: |
|||
# YYYYDDD or YYYY-DDD |
|||
if len(dt_str) - pos < 3: |
|||
raise ValueError('Invalid ordinal day') |
|||
|
|||
ordinal_day = int(dt_str[pos:pos + 3]) |
|||
pos += 3 |
|||
|
|||
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): |
|||
raise ValueError('Invalid ordinal day' + |
|||
' {} for year {}'.format(ordinal_day, year)) |
|||
|
|||
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) |
|||
|
|||
components = [base_date.year, base_date.month, base_date.day] |
|||
return components, pos |
|||
|
|||
def _calculate_weekdate(self, year, week, day): |
|||
""" |
|||
Calculate the day of corresponding to the ISO year-week-day calendar. |
|||
|
|||
This function is effectively the inverse of |
|||
:func:`datetime.date.isocalendar`. |
|||
|
|||
:param year: |
|||
The year in the ISO calendar |
|||
|
|||
:param week: |
|||
The week in the ISO calendar - range is [1, 53] |
|||
|
|||
:param day: |
|||
The day in the ISO calendar - range is [1 (MON), 7 (SUN)] |
|||
|
|||
:return: |
|||
Returns a :class:`datetime.date` |
|||
""" |
|||
if not 0 < week < 54: |
|||
raise ValueError('Invalid week: {}'.format(week)) |
|||
|
|||
if not 0 < day < 8: # Range is 1-7 |
|||
raise ValueError('Invalid weekday: {}'.format(day)) |
|||
|
|||
# Get week 1 for the specific year: |
|||
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it |
|||
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) |
|||
|
|||
# Now add the specific number of weeks and days to get what we want |
|||
week_offset = (week - 1) * 7 + (day - 1) |
|||
return week_1 + timedelta(days=week_offset) |
|||
|
|||
def _parse_isotime(self, timestr): |
|||
len_str = len(timestr) |
|||
components = [0, 0, 0, 0, None] |
|||
pos = 0 |
|||
comp = -1 |
|||
|
|||
if len_str < 2: |
|||
raise ValueError('ISO time too short') |
|||
|
|||
has_sep = False |
|||
|
|||
while pos < len_str and comp < 5: |
|||
comp += 1 |
|||
|
|||
if timestr[pos:pos + 1] in b'-+Zz': |
|||
# Detect time zone boundary |
|||
components[-1] = self._parse_tzstr(timestr[pos:]) |
|||
pos = len_str |
|||
break |
|||
|
|||
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP: |
|||
has_sep = True |
|||
pos += 1 |
|||
elif comp == 2 and has_sep: |
|||
if timestr[pos:pos+1] != self._TIME_SEP: |
|||
raise ValueError('Inconsistent use of colon separator') |
|||
pos += 1 |
|||
|
|||
if comp < 3: |
|||
# Hour, minute, second |
|||
components[comp] = int(timestr[pos:pos + 2]) |
|||
pos += 2 |
|||
|
|||
if comp == 3: |
|||
# Fraction of a second |
|||
frac = self._FRACTION_REGEX.match(timestr[pos:]) |
|||
if not frac: |
|||
continue |
|||
|
|||
us_str = frac.group(1)[:6] # Truncate to microseconds |
|||
components[comp] = int(us_str) * 10**(6 - len(us_str)) |
|||
pos += len(frac.group()) |
|||
|
|||
if pos < len_str: |
|||
raise ValueError('Unused components in ISO string') |
|||
|
|||
if components[0] == 24: |
|||
# Standard supports 00:00 and 24:00 as representations of midnight |
|||
if any(component != 0 for component in components[1:4]): |
|||
raise ValueError('Hour may only be 24 at 24:00:00.000') |
|||
|
|||
return components |
|||
|
|||
def _parse_tzstr(self, tzstr, zero_as_utc=True): |
|||
if tzstr == b'Z' or tzstr == b'z': |
|||
return tz.UTC |
|||
|
|||
if len(tzstr) not in {3, 5, 6}: |
|||
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') |
|||
|
|||
if tzstr[0:1] == b'-': |
|||
mult = -1 |
|||
elif tzstr[0:1] == b'+': |
|||
mult = 1 |
|||
else: |
|||
raise ValueError('Time zone offset requires sign') |
|||
|
|||
hours = int(tzstr[1:3]) |
|||
if len(tzstr) == 3: |
|||
minutes = 0 |
|||
else: |
|||
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) |
|||
|
|||
if zero_as_utc and hours == 0 and minutes == 0: |
|||
return tz.UTC |
|||
else: |
|||
if minutes > 59: |
|||
raise ValueError('Invalid minutes in time zone offset') |
|||
|
|||
if hours > 23: |
|||
raise ValueError('Invalid hours in time zone offset') |
|||
|
|||
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) |
|||
|
|||
|
|||
DEFAULT_ISOPARSER = isoparser() |
|||
isoparse = DEFAULT_ISOPARSER.isoparse |
@ -0,0 +1,599 @@ |
|||
# -*- coding: utf-8 -*- |
|||
import datetime |
|||
import calendar |
|||
|
|||
import operator |
|||
from math import copysign |
|||
|
|||
from six import integer_types |
|||
from warnings import warn |
|||
|
|||
from ._common import weekday |
|||
|
|||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) |
|||
|
|||
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] |
|||
|
|||
|
|||
class relativedelta(object): |
|||
""" |
|||
The relativedelta type is designed to be applied to an existing datetime and |
|||
can replace specific components of that datetime, or represents an interval |
|||
of time. |
|||
|
|||
It is based on the specification of the excellent work done by M.-A. Lemburg |
|||
in his |
|||
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension. |
|||
However, notice that this type does *NOT* implement the same algorithm as |
|||
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. |
|||
|
|||
There are two different ways to build a relativedelta instance. The |
|||
first one is passing it two date/datetime classes:: |
|||
|
|||
relativedelta(datetime1, datetime2) |
|||
|
|||
The second one is passing it any number of the following keyword arguments:: |
|||
|
|||
relativedelta(arg1=x,arg2=y,arg3=z...) |
|||
|
|||
year, month, day, hour, minute, second, microsecond: |
|||
Absolute information (argument is singular); adding or subtracting a |
|||
relativedelta with absolute information does not perform an arithmetic |
|||
operation, but rather REPLACES the corresponding value in the |
|||
original datetime with the value(s) in relativedelta. |
|||
|
|||
years, months, weeks, days, hours, minutes, seconds, microseconds: |
|||
Relative information, may be negative (argument is plural); adding |
|||
or subtracting a relativedelta with relative information performs |
|||
the corresponding arithmetic operation on the original datetime value |
|||
with the information in the relativedelta. |
|||
|
|||
weekday: |
|||
One of the weekday instances (MO, TU, etc) available in the |
|||
relativedelta module. These instances may receive a parameter N, |
|||
specifying the Nth weekday, which could be positive or negative |
|||
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying |
|||
+1. You can also use an integer, where 0=MO. This argument is always |
|||
relative e.g. if the calculated date is already Monday, using MO(1) |
|||
or MO(-1) won't change the day. To effectively make it absolute, use |
|||
it in combination with the day argument (e.g. day=1, MO(1) for first |
|||
Monday of the month). |
|||
|
|||
leapdays: |
|||
Will add given days to the date found, if year is a leap |
|||
year, and the date found is post 28 of february. |
|||
|
|||
yearday, nlyearday: |
|||
Set the yearday or the non-leap year day (jump leap days). |
|||
These are converted to day/month/leapdays information. |
|||
|
|||
There are relative and absolute forms of the keyword |
|||
arguments. The plural is relative, and the singular is |
|||
absolute. For each argument in the order below, the absolute form |
|||
is applied first (by setting each attribute to that value) and |
|||
then the relative form (by adding the value to the attribute). |
|||
|
|||
The order of attributes considered when this relativedelta is |
|||
added to a datetime is: |
|||
|
|||
1. Year |
|||
2. Month |
|||
3. Day |
|||
4. Hours |
|||
5. Minutes |
|||
6. Seconds |
|||
7. Microseconds |
|||
|
|||
Finally, weekday is applied, using the rule described above. |
|||
|
|||
For example |
|||
|
|||
>>> from datetime import datetime |
|||
>>> from dateutil.relativedelta import relativedelta, MO |
|||
>>> dt = datetime(2018, 4, 9, 13, 37, 0) |
|||
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) |
|||
>>> dt + delta |
|||
datetime.datetime(2018, 4, 2, 14, 37) |
|||
|
|||
First, the day is set to 1 (the first of the month), then 25 hours |
|||
are added, to get to the 2nd day and 14th hour, finally the |
|||
weekday is applied, but since the 2nd is already a Monday there is |
|||
no effect. |
|||
|
|||
""" |
|||
|
|||
def __init__(self, dt1=None, dt2=None, |
|||
years=0, months=0, days=0, leapdays=0, weeks=0, |
|||
hours=0, minutes=0, seconds=0, microseconds=0, |
|||
year=None, month=None, day=None, weekday=None, |
|||
yearday=None, nlyearday=None, |
|||
hour=None, minute=None, second=None, microsecond=None): |
|||
|
|||
if dt1 and dt2: |
|||
# datetime is a subclass of date. So both must be date |
|||
if not (isinstance(dt1, datetime.date) and |
|||
isinstance(dt2, datetime.date)): |
|||
raise TypeError("relativedelta only diffs datetime/date") |
|||
|
|||
# We allow two dates, or two datetimes, so we coerce them to be |
|||
# of the same type |
|||
if (isinstance(dt1, datetime.datetime) != |
|||
isinstance(dt2, datetime.datetime)): |
|||
if not isinstance(dt1, datetime.datetime): |
|||
dt1 = datetime.datetime.fromordinal(dt1.toordinal()) |
|||
elif not isinstance(dt2, datetime.datetime): |
|||
dt2 = datetime.datetime.fromordinal(dt2.toordinal()) |
|||
|
|||
self.years = 0 |
|||
self.months = 0 |
|||
self.days = 0 |
|||
self.leapdays = 0 |
|||
self.hours = 0 |
|||
self.minutes = 0 |
|||
self.seconds = 0 |
|||
self.microseconds = 0 |
|||
self.year = None |
|||
self.month = None |
|||
self.day = None |
|||
self.weekday = None |
|||
self.hour = None |
|||
self.minute = None |
|||
self.second = None |
|||
self.microsecond = None |
|||
self._has_time = 0 |
|||
|
|||
# Get year / month delta between the two |
|||
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) |
|||
self._set_months(months) |
|||
|
|||
# Remove the year/month delta so the timedelta is just well-defined |
|||
# time units (seconds, days and microseconds) |
|||
dtm = self.__radd__(dt2) |
|||
|
|||
# If we've overshot our target, make an adjustment |
|||
if dt1 < dt2: |
|||
compare = operator.gt |
|||
increment = 1 |
|||
else: |
|||
compare = operator.lt |
|||
increment = -1 |
|||
|
|||
while compare(dt1, dtm): |
|||
months += increment |
|||
self._set_months(months) |
|||
dtm = self.__radd__(dt2) |
|||
|
|||
# Get the timedelta between the "months-adjusted" date and dt1 |
|||
delta = dt1 - dtm |
|||
self.seconds = delta.seconds + delta.days * 86400 |
|||
self.microseconds = delta.microseconds |
|||
else: |
|||
# Check for non-integer values in integer-only quantities |
|||
if any(x is not None and x != int(x) for x in (years, months)): |
|||
raise ValueError("Non-integer years and months are " |
|||
"ambiguous and not currently supported.") |
|||
|
|||
# Relative information |
|||
self.years = int(years) |
|||
self.months = int(months) |
|||
self.days = days + weeks * 7 |
|||
self.leapdays = leapdays |
|||
self.hours = hours |
|||
self.minutes = minutes |
|||
self.seconds = seconds |
|||
self.microseconds = microseconds |
|||
|
|||
# Absolute information |
|||
self.year = year |
|||
self.month = month |
|||
self.day = day |
|||
self.hour = hour |
|||
self.minute = minute |
|||
self.second = second |
|||
self.microsecond = microsecond |
|||
|
|||
if any(x is not None and int(x) != x |
|||
for x in (year, month, day, hour, |
|||
minute, second, microsecond)): |
|||
# For now we'll deprecate floats - later it'll be an error. |
|||
warn("Non-integer value passed as absolute information. " + |
|||
"This is not a well-defined condition and will raise " + |
|||
"errors in future versions.", DeprecationWarning) |
|||
|
|||
if isinstance(weekday, integer_types): |
|||
self.weekday = weekdays[weekday] |
|||
else: |
|||
self.weekday = weekday |
|||
|
|||
yday = 0 |
|||
if nlyearday: |
|||
yday = nlyearday |
|||
elif yearday: |
|||
yday = yearday |
|||
if yearday > 59: |
|||
self.leapdays = -1 |
|||
if yday: |
|||
ydayidx = [31, 59, 90, 120, 151, 181, 212, |
|||
243, 273, 304, 334, 366] |
|||
for idx, ydays in enumerate(ydayidx): |
|||
if yday <= ydays: |
|||
self.month = idx+1 |
|||
if idx == 0: |
|||
self.day = yday |
|||
else: |
|||
self.day = yday-ydayidx[idx-1] |
|||
break |
|||
else: |
|||
raise ValueError("invalid year day (%d)" % yday) |
|||
|
|||
self._fix() |
|||
|
|||
def _fix(self): |
|||
if abs(self.microseconds) > 999999: |
|||
s = _sign(self.microseconds) |
|||
div, mod = divmod(self.microseconds * s, 1000000) |
|||
self.microseconds = mod * s |
|||
self.seconds += div * s |
|||
if abs(self.seconds) > 59: |
|||
s = _sign(self.seconds) |
|||
div, mod = divmod(self.seconds * s, 60) |
|||
self.seconds = mod * s |
|||
self.minutes += div * s |
|||
if abs(self.minutes) > 59: |
|||
s = _sign(self.minutes) |
|||
div, mod = divmod(self.minutes * s, 60) |
|||
self.minutes = mod * s |
|||
self.hours += div * s |
|||
if abs(self.hours) > 23: |
|||
s = _sign(self.hours) |
|||
div, mod = divmod(self.hours * s, 24) |
|||
self.hours = mod * s |
|||
self.days += div * s |
|||
if abs(self.months) > 11: |
|||
s = _sign(self.months) |
|||
div, mod = divmod(self.months * s, 12) |
|||
self.months = mod * s |
|||
self.years += div * s |
|||
if (self.hours or self.minutes or self.seconds or self.microseconds |
|||
or self.hour is not None or self.minute is not None or |
|||
self.second is not None or self.microsecond is not None): |
|||
self._has_time = 1 |
|||
else: |
|||
self._has_time = 0 |
|||
|
|||
@property |
|||
def weeks(self): |
|||
return int(self.days / 7.0) |
|||
|
|||
@weeks.setter |
|||
def weeks(self, value): |
|||
self.days = self.days - (self.weeks * 7) + value * 7 |
|||
|
|||
def _set_months(self, months): |
|||
self.months = months |
|||
if abs(self.months) > 11: |
|||
s = _sign(self.months) |
|||
div, mod = divmod(self.months * s, 12) |
|||
self.months = mod * s |
|||
self.years = div * s |
|||
else: |
|||
self.years = 0 |
|||
|
|||
def normalized(self): |
|||
""" |
|||
Return a version of this object represented entirely using integer |
|||
values for the relative attributes. |
|||
|
|||
>>> relativedelta(days=1.5, hours=2).normalized() |
|||
relativedelta(days=+1, hours=+14) |
|||
|
|||
:return: |
|||
Returns a :class:`dateutil.relativedelta.relativedelta` object. |
|||
""" |
|||
# Cascade remainders down (rounding each to roughly nearest microsecond) |
|||
days = int(self.days) |
|||
|
|||
hours_f = round(self.hours + 24 * (self.days - days), 11) |
|||
hours = int(hours_f) |
|||
|
|||
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) |
|||
minutes = int(minutes_f) |
|||
|
|||
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) |
|||
seconds = int(seconds_f) |
|||
|
|||
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) |
|||
|
|||
# Constructor carries overflow back up with call to _fix() |
|||
return self.__class__(years=self.years, months=self.months, |
|||
days=days, hours=hours, minutes=minutes, |
|||
seconds=seconds, microseconds=microseconds, |
|||
leapdays=self.leapdays, year=self.year, |
|||
month=self.month, day=self.day, |
|||
weekday=self.weekday, hour=self.hour, |
|||
minute=self.minute, second=self.second, |
|||
microsecond=self.microsecond) |
|||
|
|||
def __add__(self, other): |
|||
if isinstance(other, relativedelta): |
|||
return self.__class__(years=other.years + self.years, |
|||
months=other.months + self.months, |
|||
days=other.days + self.days, |
|||
hours=other.hours + self.hours, |
|||
minutes=other.minutes + self.minutes, |
|||
seconds=other.seconds + self.seconds, |
|||
microseconds=(other.microseconds + |
|||
self.microseconds), |
|||
leapdays=other.leapdays or self.leapdays, |
|||
year=(other.year if other.year is not None |
|||
else self.year), |
|||
month=(other.month if other.month is not None |
|||
else self.month), |
|||
day=(other.day if other.day is not None |
|||
else self.day), |
|||
weekday=(other.weekday if other.weekday is not None |
|||
else self.weekday), |
|||
hour=(other.hour if other.hour is not None |
|||
else self.hour), |
|||
minute=(other.minute if other.minute is not None |
|||
else self.minute), |
|||
second=(other.second if other.second is not None |
|||
else self.second), |
|||
microsecond=(other.microsecond if other.microsecond |
|||
is not None else |
|||
self.microsecond)) |
|||
if isinstance(other, datetime.timedelta): |
|||
return self.__class__(years=self.years, |
|||
months=self.months, |
|||
days=self.days + other.days, |
|||
hours=self.hours, |
|||
minutes=self.minutes, |
|||
seconds=self.seconds + other.seconds, |
|||
microseconds=self.microseconds + other.microseconds, |
|||
leapdays=self.leapdays, |
|||
year=self.year, |
|||
month=self.month, |
|||
day=self.day, |
|||
weekday=self.weekday, |
|||
hour=self.hour, |
|||
minute=self.minute, |
|||
second=self.second, |
|||
microsecond=self.microsecond) |
|||
if not isinstance(other, datetime.date): |
|||
return NotImplemented |
|||
elif self._has_time and not isinstance(other, datetime.datetime): |
|||
other = datetime.datetime.fromordinal(other.toordinal()) |
|||
year = (self.year or other.year)+self.years |
|||
month = self.month or other.month |
|||
if self.months: |
|||
assert 1 <= abs(self.months) <= 12 |
|||
month += self.months |
|||
if month > 12: |
|||
year += 1 |
|||
month -= 12 |
|||
elif month < 1: |
|||
year -= 1 |
|||
month += 12 |
|||
day = min(calendar.monthrange(year, month)[1], |
|||
self.day or other.day) |
|||
repl = {"year": year, "month": month, "day": day} |
|||
for attr in ["hour", "minute", "second", "microsecond"]: |
|||
value = getattr(self, attr) |
|||
if value is not None: |
|||
repl[attr] = value |
|||
days = self.days |
|||
if self.leapdays and month > 2 and calendar.isleap(year): |
|||
days += self.leapdays |
|||
ret = (other.replace(**repl) |
|||
+ datetime.timedelta(days=days, |
|||
hours=self.hours, |
|||
minutes=self.minutes, |
|||
seconds=self.seconds, |
|||
microseconds=self.microseconds)) |
|||
if self.weekday: |
|||
weekday, nth = self.weekday.weekday, self.weekday.n or 1 |
|||
jumpdays = (abs(nth) - 1) * 7 |
|||
if nth > 0: |
|||
jumpdays += (7 - ret.weekday() + weekday) % 7 |
|||
else: |
|||
jumpdays += (ret.weekday() - weekday) % 7 |
|||
jumpdays *= -1 |
|||
ret += datetime.timedelta(days=jumpdays) |
|||
return ret |
|||
|
|||
def __radd__(self, other): |
|||
return self.__add__(other) |
|||
|
|||
def __rsub__(self, other): |
|||
return self.__neg__().__radd__(other) |
|||
|
|||
def __sub__(self, other): |
|||
if not isinstance(other, relativedelta): |
|||
return NotImplemented # In case the other object defines __rsub__ |
|||
return self.__class__(years=self.years - other.years, |
|||
months=self.months - other.months, |
|||
days=self.days - other.days, |
|||
hours=self.hours - other.hours, |
|||
minutes=self.minutes - other.minutes, |
|||
seconds=self.seconds - other.seconds, |
|||
microseconds=self.microseconds - other.microseconds, |
|||
leapdays=self.leapdays or other.leapdays, |
|||
year=(self.year if self.year is not None |
|||
else other.year), |
|||
month=(self.month if self.month is not None else |
|||
other.month), |
|||
day=(self.day if self.day is not None else |
|||
other.day), |
|||
weekday=(self.weekday if self.weekday is not None else |
|||
other.weekday), |
|||
hour=(self.hour if self.hour is not None else |
|||
other.hour), |
|||
minute=(self.minute if self.minute is not None else |
|||
other.minute), |
|||
second=(self.second if self.second is not None else |
|||
other.second), |
|||
microsecond=(self.microsecond if self.microsecond |
|||
is not None else |
|||
other.microsecond)) |
|||
|
|||
def __abs__(self): |
|||
return self.__class__(years=abs(self.years), |
|||
months=abs(self.months), |
|||
days=abs(self.days), |
|||
hours=abs(self.hours), |
|||
minutes=abs(self.minutes), |
|||
seconds=abs(self.seconds), |
|||
microseconds=abs(self.microseconds), |
|||
leapdays=self.leapdays, |
|||
year=self.year, |
|||
month=self.month, |
|||
day=self.day, |
|||
weekday=self.weekday, |
|||
hour=self.hour, |
|||
minute=self.minute, |
|||
second=self.second, |
|||
microsecond=self.microsecond) |
|||
|
|||
def __neg__(self): |
|||
return self.__class__(years=-self.years, |
|||
months=-self.months, |
|||
days=-self.days, |
|||
hours=-self.hours, |
|||
minutes=-self.minutes, |
|||
seconds=-self.seconds, |
|||
microseconds=-self.microseconds, |
|||
leapdays=self.leapdays, |
|||
year=self.year, |
|||
month=self.month, |
|||
day=self.day, |
|||
weekday=self.weekday, |
|||
hour=self.hour, |
|||
minute=self.minute, |
|||
second=self.second, |
|||
microsecond=self.microsecond) |
|||
|
|||
def __bool__(self): |
|||
return not (not self.years and |
|||
not self.months and |
|||
not self.days and |
|||
not self.hours and |
|||
not self.minutes and |
|||
not self.seconds and |
|||
not self.microseconds and |
|||
not self.leapdays and |
|||
self.year is None and |
|||
self.month is None and |
|||
self.day is None and |
|||
self.weekday is None and |
|||
self.hour is None and |
|||
self.minute is None and |
|||
self.second is None and |
|||
self.microsecond is None) |
|||
# Compatibility with Python 2.x |
|||
__nonzero__ = __bool__ |
|||
|
|||
def __mul__(self, other): |
|||
try: |
|||
f = float(other) |
|||
except TypeError: |
|||
return NotImplemented |
|||
|
|||
return self.__class__(years=int(self.years * f), |
|||
months=int(self.months * f), |
|||
days=int(self.days * f), |
|||
hours=int(self.hours * f), |
|||
minutes=int(self.minutes * f), |
|||
seconds=int(self.seconds * f), |
|||
microseconds=int(self.microseconds * f), |
|||
leapdays=self.leapdays, |
|||
year=self.year, |
|||
month=self.month, |
|||
day=self.day, |
|||
weekday=self.weekday, |
|||
hour=self.hour, |
|||
minute=self.minute, |
|||
second=self.second, |
|||
microsecond=self.microsecond) |
|||
|
|||
__rmul__ = __mul__ |
|||
|
|||
def __eq__(self, other): |
|||
if not isinstance(other, relativedelta): |
|||
return NotImplemented |
|||
if self.weekday or other.weekday: |
|||
if not self.weekday or not other.weekday: |
|||
return False |
|||
if self.weekday.weekday != other.weekday.weekday: |
|||
return False |
|||
n1, n2 = self.weekday.n, other.weekday.n |
|||
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): |
|||
return False |
|||
return (self.years == other.years and |
|||
self.months == other.months and |
|||
self.days == other.days and |
|||
self.hours == other.hours and |
|||
self.minutes == other.minutes and |
|||
self.seconds == other.seconds and |
|||
self.microseconds == other.microseconds and |
|||
self.leapdays == other.leapdays and |
|||
self.year == other.year and |
|||
self.month == other.month and |
|||
self.day == other.day and |
|||
self.hour == other.hour and |
|||
self.minute == other.minute and |
|||
self.second == other.second and |
|||
self.microsecond == other.microsecond) |
|||
|
|||
def __hash__(self): |
|||
return hash(( |
|||
self.weekday, |
|||
self.years, |
|||
self.months, |
|||
self.days, |
|||
self.hours, |
|||
self.minutes, |
|||
self.seconds, |
|||
self.microseconds, |
|||
self.leapdays, |
|||
self.year, |
|||
self.month, |
|||
self.day, |
|||
self.hour, |
|||
self.minute, |
|||
self.second, |
|||
self.microsecond, |
|||
)) |
|||
|
|||
def __ne__(self, other): |
|||
return not self.__eq__(other) |
|||
|
|||
def __div__(self, other): |
|||
try: |
|||
reciprocal = 1 / float(other) |
|||
except TypeError: |
|||
return NotImplemented |
|||
|
|||
return self.__mul__(reciprocal) |
|||
|
|||
__truediv__ = __div__ |
|||
|
|||
def __repr__(self): |
|||
l = [] |
|||
for attr in ["years", "months", "days", "leapdays", |
|||
"hours", "minutes", "seconds", "microseconds"]: |
|||
value = getattr(self, attr) |
|||
if value: |
|||
l.append("{attr}={value:+g}".format(attr=attr, value=value)) |
|||
for attr in ["year", "month", "day", "weekday", |
|||
"hour", "minute", "second", "microsecond"]: |
|||
value = getattr(self, attr) |
|||
if value is not None: |
|||
l.append("{attr}={value}".format(attr=attr, value=repr(value))) |
|||
return "{classname}({attrs})".format(classname=self.__class__.__name__, |
|||
attrs=", ".join(l)) |
|||
|
|||
|
|||
def _sign(x): |
|||
return int(copysign(1, x)) |
|||
|
|||
# vim:ts=4:sw=4:et |
File diff suppressed because it is too large
@ -0,0 +1,12 @@ |
|||
# -*- coding: utf-8 -*- |
|||
from .tz import * |
|||
from .tz import __doc__ |
|||
|
|||
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", |
|||
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", |
|||
"enfold", "datetime_ambiguous", "datetime_exists", |
|||
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] |
|||
|
|||
|
|||
class DeprecatedTzFormatWarning(Warning): |
|||
"""Warning raised when time zones are parsed from deprecated formats.""" |
Binary file not shown.
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue