Compare commits
51 Commits
Author | SHA1 | Date | |
---|---|---|---|
eaac3d8e0d | |||
7478e0fc78 | |||
939a72c21e | |||
7bfca82f33 | |||
58b4c6e116 | |||
![]() |
61e8c85bbe | ||
![]() |
dac83ccbf9 | ||
![]() |
4d2b58b744 | ||
6db74b04f5 | |||
060c235e3f | |||
![]() |
ea14cde427 | ||
![]() |
fec0969d7f | ||
![]() |
af36c44e66 | ||
b913b2cb01 | |||
![]() |
37cbae2683 | ||
![]() |
14d3064309 | ||
![]() |
fd50a8628c | ||
d4165b0582 | |||
69338434ac | |||
c0a7704db8 | |||
1aa5d6f2cc | |||
![]() |
0b864dfee9 | ||
a758f26285 | |||
![]() |
f421a8abd6 | ||
e3d5c7c7a7 | |||
1d52a1dabb | |||
1fdfb491cf | |||
![]() |
0c76dc11d7 | ||
![]() |
965ede729c | ||
![]() |
432e73375a | ||
![]() |
6eccf95cf8 | ||
![]() |
3607e2ef69 | ||
b655926202 | |||
0d63af7bbc | |||
b94e080c96 | |||
31915dc7d6 | |||
dceb2d7730 | |||
504013d890 | |||
65eb1436d6 | |||
e2676beee0 | |||
3963f98baa | |||
![]() |
c01833f411 | ||
![]() |
f7110aa125 | ||
![]() |
1dce636192 | ||
![]() |
2a998611bf | ||
0882dd06f3 | |||
ec60c85695 | |||
278d79f6d0 | |||
![]() |
c04c88e2af | ||
c9aee44812 | |||
69da4cda15 |
@@ -24,4 +24,13 @@ CC-BY-SA Olli Graf
|
||||
|17 | Datenbank|
|
||||
|18 | Generatoren und list comprehension|
|
||||
|19 | Webseiten (Flask)|
|
||||
|20 | virtuelle Umgebungen|
|
||||
|21 | Interrupts & Signale|
|
||||
|22 | NumPy|
|
||||
|23 | MatPlotLib|
|
||||
|24 | match|
|
||||
|25 | reguläre Ausdrücke|
|
||||
|26 | lambda Funktionen|
|
||||
|27 | logging.config|
|
||||
|28 | Decorators|
|
||||
|
||||
|
26
argparse/action.py
Executable file
26
argparse/action.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import argparse
|
||||
|
||||
def do_test():
|
||||
print('do_test()')
|
||||
|
||||
parser = argparse.ArgumentParser(prog='action',description='Demonstration der Action',epilog='Ende der Hilfe')
|
||||
|
||||
parser.add_argument('--update',help='update Hilfe')
|
||||
parser.add_argument('--dry-run',help='dry-run Hilfe')
|
||||
parser.add_argument('--verbose','-v', action='count')
|
||||
parser.add_argument('--true', action='store_true')
|
||||
parser.add_argument('-append', action='append', nargs=1)
|
||||
parser.add_argument('--false', action='store_false')
|
||||
parser.add_argument('--store', action='store')
|
||||
parser.add_argument('--const', action = 'store_const', const=19)
|
||||
|
||||
parser.add_argument('--delete', action = argparse.BooleanOptionalAction)
|
||||
#parser.add_argument('--no-delete', action = argparse.BooleanOptionalAction)
|
||||
|
||||
|
||||
args=parser.parse_args()
|
||||
print(f'args={args}')
|
||||
print(f'verbose={args.verbose}')
|
||||
|
11
argparse/help.py
Executable file
11
argparse/help.py
Executable file
@@ -0,0 +1,11 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(prog='HILFE',description='Demonstration der Hilfeseite',epilog='Ende der Hilfe im Epilog')
|
||||
|
||||
parser.add_argument('--update',help='update Hilfe')
|
||||
parser.add_argument('--dry-run',help='dry-run Hilfe')
|
||||
|
||||
args=parser.parse_args()
|
||||
|
3
camera/.gitignore
vendored
Normal file
3
camera/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
bin
|
||||
lib
|
||||
*.jpg
|
35
camera/pistill.py
Executable file
35
camera/pistill.py
Executable file
@@ -0,0 +1,35 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from picamera2 import Picamera2
|
||||
from libcamera import controls
|
||||
from time import sleep
|
||||
import sys
|
||||
|
||||
to_file=''
|
||||
picam = Picamera2()
|
||||
|
||||
def take_still(filename):
|
||||
config = picam.create_preview_configuration()
|
||||
picam.configure(config)
|
||||
|
||||
picam.start()
|
||||
picam.set_controls({"AfMode": controls.AfModeEnum.Continuous})
|
||||
sleep(5)
|
||||
picam.capture_file(filename)
|
||||
picam.close()
|
||||
|
||||
def parseArgs(argv):
|
||||
global to_file
|
||||
count = 0
|
||||
for arg in argv:
|
||||
if arg == '-o':
|
||||
to_file = argv[count+1]
|
||||
|
||||
count += 1
|
||||
|
||||
if __name__ =='__main__':
|
||||
parseArgs(sys.argv)
|
||||
print(f'to_file={to_file}')
|
||||
take_still(to_file)
|
||||
|
||||
|
BIN
camera/test-python.jpg
Normal file
BIN
camera/test-python.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 66 KiB |
15
camera/testcam.py
Normal file
15
camera/testcam.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from picamera2 import Picamera2
|
||||
from libcamera import controls
|
||||
from time import sleep
|
||||
|
||||
picam = Picamera2()
|
||||
|
||||
config = picam.create_preview_configuration()
|
||||
picam.configure(config)
|
||||
|
||||
picam.start()
|
||||
picam.set_controls({"AfMode": controls.AfModeEnum.Continuous})
|
||||
sleep(2)
|
||||
picam.capture_file("/tank/cameratests/terri/test-python.jpg")
|
||||
|
||||
picam.close()
|
52
date_diff.py
Executable file
52
date_diff.py
Executable file
@@ -0,0 +1,52 @@
|
||||
#! /usr/bin/python
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
def date_diff_in_days(date1, date2):
|
||||
try:
|
||||
# Konvertiere die übergebenen Datumsangaben in datetime-Objekte
|
||||
if date1 == '$today':
|
||||
print('date1 ist heutiges Datum')
|
||||
date1_obj = datetime.today()
|
||||
date1_obj = date1_obj.replace(hour=0,minute=0,second=0,microsecond=0)
|
||||
else:
|
||||
print(f'konvertiere erstes Datum {date1}')
|
||||
date1_obj = datetime.strptime(date1, "%d.%m.%Y")
|
||||
|
||||
if date2 == '$today':
|
||||
print('date2 ist heutiges Datum')
|
||||
date2_obj = datetime.today()
|
||||
date2_obj = date2_obj.replace(hour=0,minute=0,second=0,microsecond=0)
|
||||
else:
|
||||
print(f'konvertiere zweites Datum {date2}')
|
||||
date2_obj = datetime.strptime(date2, "%d.%m.%Y")
|
||||
|
||||
print(f'konvertiere zweites Datum {date2}')
|
||||
|
||||
|
||||
# Berechne die Differenz zwischen den beiden Datumsangaben
|
||||
print(f'erstes Datum: {date1_obj}, zweites Datum: {date2_obj}')
|
||||
diff = abs(date1_obj - date2_obj).days
|
||||
return diff
|
||||
except ValueError as e:
|
||||
print("Fehler beim Parsen der Datumsangaben:", e)
|
||||
return None
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Überprüfe, ob genau zwei Datumsangaben als Parameter übergeben wurden
|
||||
print(f'Params: {sys.argv}')
|
||||
print(f'Anzahl Param: {len(sys.argv)}')
|
||||
if len(sys.argv) != 3:
|
||||
print("Bitte geben Sie zwei Datumsangaben im Format YYYY-MM-DD als Kommandozeilenparameter ein.")
|
||||
else:
|
||||
date1 = sys.argv[1]
|
||||
date2 = sys.argv[2]
|
||||
|
||||
# Berechne die Differenz in Tagen zwischen den beiden Datumsangaben
|
||||
difference = date_diff_in_days(date1, date2)
|
||||
if difference is not None:
|
||||
if sys.argv[1] == '$today':
|
||||
date1= 'heutigen Tag'
|
||||
if sys.argv[2] == '$today':
|
||||
date2= 'heutigen Tag'
|
||||
print(f"Zwischen dem {date1} und dem {date2} liegen {difference} Tage.")
|
22
docker/Dockerfile
Normal file
22
docker/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
# Python Basis-Image
|
||||
FROM python:3.11-alpine
|
||||
|
||||
# Arbeitsverzeichnis innerhalb des Containers
|
||||
WORKDIR /app
|
||||
|
||||
# Anwendungsabhängigkeiten in das Container-Image kopieren
|
||||
COPY requirements.txt .
|
||||
|
||||
# Abhängigkeiten installieren
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Port der Flask-Anwendung
|
||||
EXPOSE 8085
|
||||
|
||||
# eigenen Code in das Container-Image kopieren
|
||||
COPY fib/*.py /app/
|
||||
COPY fib/static /app/static
|
||||
COPY fib/templates /app/templates
|
||||
|
||||
# Befehl, der die Anwendung startet
|
||||
CMD ["python", "/app/app.py"]
|
9
docker/docker-compose.yml
Normal file
9
docker/docker-compose.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
version: "2.1"
|
||||
services:
|
||||
fibserver:
|
||||
image: hans:5000/fibserver:1
|
||||
container_name: fibserver
|
||||
ports:
|
||||
- 8085:8085
|
||||
restart: unless-stopped
|
45
docker/fib/app.py
Normal file
45
docker/fib/app.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from flask import Flask,render_template,request
|
||||
from fib_gen import fib_generator
|
||||
import logging
|
||||
|
||||
logging.basicConfig( format='%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s', level=logging.DEBUG)
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
return render_template('index.html')
|
||||
|
||||
@app.route('/fib',methods=['GET','POST'])
|
||||
def fib():
|
||||
fibg = fib_generator()
|
||||
result = ''
|
||||
logging.debug(f'request.method={request.method}')
|
||||
if request.method == 'POST':
|
||||
element = request.form['element']
|
||||
|
||||
logging.debug(f'berechne Element:{element}')
|
||||
|
||||
n= int(element)
|
||||
#formel= str(n-2) + '+' + str(n-1)
|
||||
formel = '' # Berechnungsformel initialisieren.
|
||||
if element != None:
|
||||
for i in range(n+1):
|
||||
fib = next(fibg)
|
||||
if i == n-2: # fib(n-2) zur Berechnungsformel dazu
|
||||
|
||||
formel += str(fib)
|
||||
if i == n-1: # fib(n-1) mit + Zeichen zur Berechnungsformel dazu
|
||||
formel += '+' + str(fib)
|
||||
|
||||
logging.debug(f'Fibonacci-Zahl: {fib}')
|
||||
logging.debug(f'formel={formel}')
|
||||
result=str(fib)
|
||||
elif request.method == 'GET':
|
||||
logging.debug('GET-Request')
|
||||
return render_template('index.html', result=result,formel=formel)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host='0.0.0.0',port=8085, debug=True)
|
||||
|
||||
|
||||
|
12
docker/fib/fib_gen.py
Normal file
12
docker/fib/fib_gen.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import sys
|
||||
|
||||
def fib_generator():
|
||||
a,b = 0,1
|
||||
|
||||
while True:
|
||||
yield a
|
||||
a,b = b, a+b
|
||||
|
||||
|
||||
|
||||
|
427
docker/fib/static/css/normalize.css
vendored
Normal file
427
docker/fib/static/css/normalize.css
vendored
Normal file
@@ -0,0 +1,427 @@
|
||||
/*! normalize.css v3.0.2 | MIT License | git.io/normalize */
|
||||
|
||||
/**
|
||||
* 1. Set default font family to sans-serif.
|
||||
* 2. Prevent iOS text size adjust after orientation change, without disabling
|
||||
* user zoom.
|
||||
*/
|
||||
|
||||
html {
|
||||
font-family: sans-serif; /* 1 */
|
||||
-ms-text-size-adjust: 100%; /* 2 */
|
||||
-webkit-text-size-adjust: 100%; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove default margin.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
/* HTML5 display definitions
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Correct `block` display not defined for any HTML5 element in IE 8/9.
|
||||
* Correct `block` display not defined for `details` or `summary` in IE 10/11
|
||||
* and Firefox.
|
||||
* Correct `block` display not defined for `main` in IE 11.
|
||||
*/
|
||||
|
||||
article,
|
||||
aside,
|
||||
details,
|
||||
figcaption,
|
||||
figure,
|
||||
footer,
|
||||
header,
|
||||
hgroup,
|
||||
main,
|
||||
menu,
|
||||
nav,
|
||||
section,
|
||||
summary {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Correct `inline-block` display not defined in IE 8/9.
|
||||
* 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.
|
||||
*/
|
||||
|
||||
audio,
|
||||
canvas,
|
||||
progress,
|
||||
video {
|
||||
display: inline-block; /* 1 */
|
||||
vertical-align: baseline; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevent modern browsers from displaying `audio` without controls.
|
||||
* Remove excess height in iOS 5 devices.
|
||||
*/
|
||||
|
||||
audio:not([controls]) {
|
||||
display: none;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address `[hidden]` styling not present in IE 8/9/10.
|
||||
* Hide the `template` element in IE 8/9/11, Safari, and Firefox < 22.
|
||||
*/
|
||||
|
||||
[hidden],
|
||||
template {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Links
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Remove the gray background color from active links in IE 10.
|
||||
*/
|
||||
|
||||
a {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Improve readability when focused and also mouse hovered in all browsers.
|
||||
*/
|
||||
|
||||
a:active,
|
||||
a:hover {
|
||||
outline: 0;
|
||||
}
|
||||
|
||||
/* Text-level semantics
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Address styling not present in IE 8/9/10/11, Safari, and Chrome.
|
||||
*/
|
||||
|
||||
abbr[title] {
|
||||
border-bottom: 1px dotted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address style set to `bolder` in Firefox 4+, Safari, and Chrome.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address styling not present in Safari and Chrome.
|
||||
*/
|
||||
|
||||
dfn {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address variable `h1` font-size and margin within `section` and `article`
|
||||
* contexts in Firefox 4+, Safari, and Chrome.
|
||||
*/
|
||||
|
||||
h1 {
|
||||
font-size: 2em;
|
||||
margin: 0.67em 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address styling not present in IE 8/9.
|
||||
*/
|
||||
|
||||
mark {
|
||||
background: #ff0;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address inconsistent and variable font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevent `sub` and `sup` affecting `line-height` in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
/* Embedded content
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Remove border when inside `a` element in IE 8/9/10.
|
||||
*/
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Correct overflow not hidden in IE 9/10/11.
|
||||
*/
|
||||
|
||||
svg:not(:root) {
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* Grouping content
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Address margin not present in IE 8/9 and Safari.
|
||||
*/
|
||||
|
||||
figure {
|
||||
margin: 1em 40px;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address differences between Firefox and other browsers.
|
||||
*/
|
||||
|
||||
hr {
|
||||
-moz-box-sizing: content-box;
|
||||
box-sizing: content-box;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Contain overflow in all browsers.
|
||||
*/
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address odd `em`-unit font size rendering in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
pre,
|
||||
samp {
|
||||
font-family: monospace, monospace;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
/* Forms
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Known limitation: by default, Chrome and Safari on OS X allow very limited
|
||||
* styling of `select`, unless a `border` property is set.
|
||||
*/
|
||||
|
||||
/**
|
||||
* 1. Correct color not being inherited.
|
||||
* Known issue: affects color of disabled elements.
|
||||
* 2. Correct font properties not being inherited.
|
||||
* 3. Address margins set differently in Firefox 4+, Safari, and Chrome.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
color: inherit; /* 1 */
|
||||
font: inherit; /* 2 */
|
||||
margin: 0; /* 3 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Address `overflow` set to `hidden` in IE 8/9/10/11.
|
||||
*/
|
||||
|
||||
button {
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address inconsistent `text-transform` inheritance for `button` and `select`.
|
||||
* All other form control elements do not inherit `text-transform` values.
|
||||
* Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.
|
||||
* Correct `select` style inheritance in Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
|
||||
* and `video` controls.
|
||||
* 2. Correct inability to style clickable `input` types in iOS.
|
||||
* 3. Improve usability and consistency of cursor style between image-type
|
||||
* `input` and others.
|
||||
*/
|
||||
|
||||
button,
|
||||
html input[type="button"], /* 1 */
|
||||
input[type="reset"],
|
||||
input[type="submit"] {
|
||||
-webkit-appearance: button; /* 2 */
|
||||
cursor: pointer; /* 3 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-set default cursor for disabled elements.
|
||||
*/
|
||||
|
||||
button[disabled],
|
||||
html input[disabled] {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove inner padding and border in Firefox 4+.
|
||||
*/
|
||||
|
||||
button::-moz-focus-inner,
|
||||
input::-moz-focus-inner {
|
||||
border: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Address Firefox 4+ setting `line-height` on `input` using `!important` in
|
||||
* the UA stylesheet.
|
||||
*/
|
||||
|
||||
input {
|
||||
line-height: normal;
|
||||
}
|
||||
|
||||
/**
|
||||
* It's recommended that you don't attempt to style these elements.
|
||||
* Firefox's implementation doesn't respect box-sizing, padding, or width.
|
||||
*
|
||||
* 1. Address box sizing set to `content-box` in IE 8/9/10.
|
||||
* 2. Remove excess padding in IE 8/9/10.
|
||||
*/
|
||||
|
||||
input[type="checkbox"],
|
||||
input[type="radio"] {
|
||||
box-sizing: border-box; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Fix the cursor style for Chrome's increment/decrement buttons. For certain
|
||||
* `font-size` values of the `input`, it causes the cursor style of the
|
||||
* decrement button to change from `default` to `text`.
|
||||
*/
|
||||
|
||||
input[type="number"]::-webkit-inner-spin-button,
|
||||
input[type="number"]::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Address `appearance` set to `searchfield` in Safari and Chrome.
|
||||
* 2. Address `box-sizing` set to `border-box` in Safari and Chrome
|
||||
* (include `-moz` to future-proof).
|
||||
*/
|
||||
|
||||
input[type="search"] {
|
||||
-webkit-appearance: textfield; /* 1 */
|
||||
-moz-box-sizing: content-box;
|
||||
-webkit-box-sizing: content-box; /* 2 */
|
||||
box-sizing: content-box;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove inner padding and search cancel button in Safari and Chrome on OS X.
|
||||
* Safari (but not Chrome) clips the cancel button when the search input has
|
||||
* padding (and `textfield` appearance).
|
||||
*/
|
||||
|
||||
input[type="search"]::-webkit-search-cancel-button,
|
||||
input[type="search"]::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/**
|
||||
* Define consistent border, margin, and padding.
|
||||
*/
|
||||
|
||||
fieldset {
|
||||
border: 1px solid #c0c0c0;
|
||||
margin: 0 2px;
|
||||
padding: 0.35em 0.625em 0.75em;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Correct `color` not being inherited in IE 8/9/10/11.
|
||||
* 2. Remove padding so people aren't caught out if they zero out fieldsets.
|
||||
*/
|
||||
|
||||
legend {
|
||||
border: 0; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove default vertical scrollbar in IE 8/9/10/11.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
/**
|
||||
* Don't inherit the `font-weight` (applied by a rule above).
|
||||
* NOTE: the default cannot safely be changed in Chrome and Safari on OS X.
|
||||
*/
|
||||
|
||||
optgroup {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/* Tables
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
* Remove most spacing between table cells.
|
||||
*/
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
}
|
||||
|
||||
td,
|
||||
th {
|
||||
padding: 0;
|
||||
}
|
418
docker/fib/static/css/skeleton.css
vendored
Normal file
418
docker/fib/static/css/skeleton.css
vendored
Normal file
@@ -0,0 +1,418 @@
|
||||
/*
|
||||
* Skeleton V2.0.4
|
||||
* Copyright 2014, Dave Gamache
|
||||
* www.getskeleton.com
|
||||
* Free to use under the MIT license.
|
||||
* http://www.opensource.org/licenses/mit-license.php
|
||||
* 12/29/2014
|
||||
*/
|
||||
|
||||
|
||||
/* Table of contents
|
||||
––––––––––––––––––––––––––––––––––––––––––––––––––
|
||||
- Grid
|
||||
- Base Styles
|
||||
- Typography
|
||||
- Links
|
||||
- Buttons
|
||||
- Forms
|
||||
- Lists
|
||||
- Code
|
||||
- Tables
|
||||
- Spacing
|
||||
- Utilities
|
||||
- Clearing
|
||||
- Media Queries
|
||||
*/
|
||||
|
||||
|
||||
/* Grid
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
.container {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
max-width: 960px;
|
||||
margin: 0 auto;
|
||||
padding: 0 20px;
|
||||
box-sizing: border-box; }
|
||||
.column,
|
||||
.columns {
|
||||
width: 100%;
|
||||
float: left;
|
||||
box-sizing: border-box; }
|
||||
|
||||
/* For devices larger than 400px */
|
||||
@media (min-width: 400px) {
|
||||
.container {
|
||||
width: 85%;
|
||||
padding: 0; }
|
||||
}
|
||||
|
||||
/* For devices larger than 550px */
|
||||
@media (min-width: 550px) {
|
||||
.container {
|
||||
width: 80%; }
|
||||
.column,
|
||||
.columns {
|
||||
margin-left: 4%; }
|
||||
.column:first-child,
|
||||
.columns:first-child {
|
||||
margin-left: 0; }
|
||||
|
||||
.one.column,
|
||||
.one.columns { width: 4.66666666667%; }
|
||||
.two.columns { width: 13.3333333333%; }
|
||||
.three.columns { width: 22%; }
|
||||
.four.columns { width: 30.6666666667%; }
|
||||
.five.columns { width: 39.3333333333%; }
|
||||
.six.columns { width: 48%; }
|
||||
.seven.columns { width: 56.6666666667%; }
|
||||
.eight.columns { width: 65.3333333333%; }
|
||||
.nine.columns { width: 74.0%; }
|
||||
.ten.columns { width: 82.6666666667%; }
|
||||
.eleven.columns { width: 91.3333333333%; }
|
||||
.twelve.columns { width: 100%; margin-left: 0; }
|
||||
|
||||
.one-third.column { width: 30.6666666667%; }
|
||||
.two-thirds.column { width: 65.3333333333%; }
|
||||
|
||||
.one-half.column { width: 48%; }
|
||||
|
||||
/* Offsets */
|
||||
.offset-by-one.column,
|
||||
.offset-by-one.columns { margin-left: 8.66666666667%; }
|
||||
.offset-by-two.column,
|
||||
.offset-by-two.columns { margin-left: 17.3333333333%; }
|
||||
.offset-by-three.column,
|
||||
.offset-by-three.columns { margin-left: 26%; }
|
||||
.offset-by-four.column,
|
||||
.offset-by-four.columns { margin-left: 34.6666666667%; }
|
||||
.offset-by-five.column,
|
||||
.offset-by-five.columns { margin-left: 43.3333333333%; }
|
||||
.offset-by-six.column,
|
||||
.offset-by-six.columns { margin-left: 52%; }
|
||||
.offset-by-seven.column,
|
||||
.offset-by-seven.columns { margin-left: 60.6666666667%; }
|
||||
.offset-by-eight.column,
|
||||
.offset-by-eight.columns { margin-left: 69.3333333333%; }
|
||||
.offset-by-nine.column,
|
||||
.offset-by-nine.columns { margin-left: 78.0%; }
|
||||
.offset-by-ten.column,
|
||||
.offset-by-ten.columns { margin-left: 86.6666666667%; }
|
||||
.offset-by-eleven.column,
|
||||
.offset-by-eleven.columns { margin-left: 95.3333333333%; }
|
||||
|
||||
.offset-by-one-third.column,
|
||||
.offset-by-one-third.columns { margin-left: 34.6666666667%; }
|
||||
.offset-by-two-thirds.column,
|
||||
.offset-by-two-thirds.columns { margin-left: 69.3333333333%; }
|
||||
|
||||
.offset-by-one-half.column,
|
||||
.offset-by-one-half.columns { margin-left: 52%; }
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Base Styles
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
/* NOTE
|
||||
html is set to 62.5% so that all the REM measurements throughout Skeleton
|
||||
are based on 10px sizing. So basically 1.5rem = 15px :) */
|
||||
html {
|
||||
font-size: 62.5%; }
|
||||
body {
|
||||
font-size: 1.5em; /* currently ems cause chrome bug misinterpreting rems on body element */
|
||||
line-height: 1.6;
|
||||
font-weight: 400;
|
||||
font-family: "Raleway", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
color: #222; }
|
||||
|
||||
|
||||
/* Typography
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 2rem;
|
||||
font-weight: 300; }
|
||||
h1 { font-size: 4.0rem; line-height: 1.2; letter-spacing: -.1rem;}
|
||||
h2 { font-size: 3.6rem; line-height: 1.25; letter-spacing: -.1rem; }
|
||||
h3 { font-size: 3.0rem; line-height: 1.3; letter-spacing: -.1rem; }
|
||||
h4 { font-size: 2.4rem; line-height: 1.35; letter-spacing: -.08rem; }
|
||||
h5 { font-size: 1.8rem; line-height: 1.5; letter-spacing: -.05rem; }
|
||||
h6 { font-size: 1.5rem; line-height: 1.6; letter-spacing: 0; }
|
||||
|
||||
/* Larger than phablet */
|
||||
@media (min-width: 550px) {
|
||||
h1 { font-size: 5.0rem; }
|
||||
h2 { font-size: 4.2rem; }
|
||||
h3 { font-size: 3.6rem; }
|
||||
h4 { font-size: 3.0rem; }
|
||||
h5 { font-size: 2.4rem; }
|
||||
h6 { font-size: 1.5rem; }
|
||||
}
|
||||
|
||||
p {
|
||||
margin-top: 0; }
|
||||
|
||||
|
||||
/* Links
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
a {
|
||||
color: #1EAEDB; }
|
||||
a:hover {
|
||||
color: #0FA0CE; }
|
||||
|
||||
|
||||
/* Buttons
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
.button,
|
||||
button,
|
||||
input[type="submit"],
|
||||
input[type="reset"],
|
||||
input[type="button"] {
|
||||
display: inline-block;
|
||||
height: 38px;
|
||||
padding: 0 30px;
|
||||
color: #555;
|
||||
text-align: center;
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
line-height: 38px;
|
||||
letter-spacing: .1rem;
|
||||
text-transform: uppercase;
|
||||
text-decoration: none;
|
||||
white-space: nowrap;
|
||||
background-color: transparent;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #bbb;
|
||||
cursor: pointer;
|
||||
box-sizing: border-box; }
|
||||
.button:hover,
|
||||
button:hover,
|
||||
input[type="submit"]:hover,
|
||||
input[type="reset"]:hover,
|
||||
input[type="button"]:hover,
|
||||
.button:focus,
|
||||
button:focus,
|
||||
input[type="submit"]:focus,
|
||||
input[type="reset"]:focus,
|
||||
input[type="button"]:focus {
|
||||
color: #333;
|
||||
border-color: #888;
|
||||
outline: 0; }
|
||||
.button.button-primary,
|
||||
button.button-primary,
|
||||
input[type="submit"].button-primary,
|
||||
input[type="reset"].button-primary,
|
||||
input[type="button"].button-primary {
|
||||
color: #FFF;
|
||||
background-color: #33C3F0;
|
||||
border-color: #33C3F0; }
|
||||
.button.button-primary:hover,
|
||||
button.button-primary:hover,
|
||||
input[type="submit"].button-primary:hover,
|
||||
input[type="reset"].button-primary:hover,
|
||||
input[type="button"].button-primary:hover,
|
||||
.button.button-primary:focus,
|
||||
button.button-primary:focus,
|
||||
input[type="submit"].button-primary:focus,
|
||||
input[type="reset"].button-primary:focus,
|
||||
input[type="button"].button-primary:focus {
|
||||
color: #FFF;
|
||||
background-color: #1EAEDB;
|
||||
border-color: #1EAEDB; }
|
||||
|
||||
|
||||
/* Forms
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
input[type="email"],
|
||||
input[type="number"],
|
||||
input[type="search"],
|
||||
input[type="text"],
|
||||
input[type="tel"],
|
||||
input[type="url"],
|
||||
input[type="password"],
|
||||
textarea,
|
||||
select {
|
||||
height: 38px;
|
||||
padding: 6px 10px; /* The 6px vertically centers text on FF, ignored by Webkit */
|
||||
background-color: #fff;
|
||||
border: 1px solid #D1D1D1;
|
||||
border-radius: 4px;
|
||||
box-shadow: none;
|
||||
box-sizing: border-box; }
|
||||
/* Removes awkward default styles on some inputs for iOS */
|
||||
input[type="email"],
|
||||
input[type="number"],
|
||||
input[type="search"],
|
||||
input[type="text"],
|
||||
input[type="tel"],
|
||||
input[type="url"],
|
||||
input[type="password"],
|
||||
textarea {
|
||||
-webkit-appearance: none;
|
||||
-moz-appearance: none;
|
||||
appearance: none; }
|
||||
textarea {
|
||||
min-height: 65px;
|
||||
padding-top: 6px;
|
||||
padding-bottom: 6px; }
|
||||
input[type="email"]:focus,
|
||||
input[type="number"]:focus,
|
||||
input[type="search"]:focus,
|
||||
input[type="text"]:focus,
|
||||
input[type="tel"]:focus,
|
||||
input[type="url"]:focus,
|
||||
input[type="password"]:focus,
|
||||
textarea:focus,
|
||||
select:focus {
|
||||
border: 1px solid #33C3F0;
|
||||
outline: 0; }
|
||||
label,
|
||||
legend {
|
||||
display: block;
|
||||
margin-bottom: .5rem;
|
||||
font-weight: 600; }
|
||||
fieldset {
|
||||
padding: 0;
|
||||
border-width: 0; }
|
||||
input[type="checkbox"],
|
||||
input[type="radio"] {
|
||||
display: inline; }
|
||||
label > .label-body {
|
||||
display: inline-block;
|
||||
margin-left: .5rem;
|
||||
font-weight: normal; }
|
||||
|
||||
|
||||
/* Lists
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
ul {
|
||||
list-style: circle inside; }
|
||||
ol {
|
||||
list-style: decimal inside; }
|
||||
ol, ul {
|
||||
padding-left: 0;
|
||||
margin-top: 0; }
|
||||
ul ul,
|
||||
ul ol,
|
||||
ol ol,
|
||||
ol ul {
|
||||
margin: 1.5rem 0 1.5rem 3rem;
|
||||
font-size: 90%; }
|
||||
li {
|
||||
margin-bottom: 1rem; }
|
||||
|
||||
|
||||
/* Code
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
code {
|
||||
padding: .2rem .5rem;
|
||||
margin: 0 .2rem;
|
||||
font-size: 90%;
|
||||
white-space: nowrap;
|
||||
background: #F1F1F1;
|
||||
border: 1px solid #E1E1E1;
|
||||
border-radius: 4px; }
|
||||
pre > code {
|
||||
display: block;
|
||||
padding: 1rem 1.5rem;
|
||||
white-space: pre; }
|
||||
|
||||
|
||||
/* Tables
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
th,
|
||||
td {
|
||||
padding: 12px 15px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #E1E1E1; }
|
||||
th:first-child,
|
||||
td:first-child {
|
||||
padding-left: 0; }
|
||||
th:last-child,
|
||||
td:last-child {
|
||||
padding-right: 0; }
|
||||
|
||||
|
||||
/* Spacing
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
button,
|
||||
.button {
|
||||
margin-bottom: 1rem; }
|
||||
input,
|
||||
textarea,
|
||||
select,
|
||||
fieldset {
|
||||
margin-bottom: 1.5rem; }
|
||||
pre,
|
||||
blockquote,
|
||||
dl,
|
||||
figure,
|
||||
table,
|
||||
p,
|
||||
ul,
|
||||
ol,
|
||||
form {
|
||||
margin-bottom: 2.5rem; }
|
||||
|
||||
|
||||
/* Utilities
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
.u-full-width {
|
||||
width: 100%;
|
||||
box-sizing: border-box; }
|
||||
.u-max-full-width {
|
||||
max-width: 100%;
|
||||
box-sizing: border-box; }
|
||||
.u-pull-right {
|
||||
float: right; }
|
||||
.u-pull-left {
|
||||
float: left; }
|
||||
|
||||
|
||||
/* Misc
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
hr {
|
||||
margin-top: 3rem;
|
||||
margin-bottom: 3.5rem;
|
||||
border-width: 0;
|
||||
border-top: 1px solid #E1E1E1; }
|
||||
|
||||
|
||||
/* Clearing
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
|
||||
/* Self Clearing Goodness */
|
||||
.container:after,
|
||||
.row:after,
|
||||
.u-cf {
|
||||
content: "";
|
||||
display: table;
|
||||
clear: both; }
|
||||
|
||||
|
||||
/* Media Queries
|
||||
–––––––––––––––––––––––––––––––––––––––––––––––––– */
|
||||
/*
|
||||
Note: The best way to structure the use of media queries is to create the queries
|
||||
near the relevant code. For example, if you wanted to change the styles for buttons
|
||||
on small devices, paste the mobile query code up in the buttons section and style it
|
||||
there.
|
||||
*/
|
||||
|
||||
|
||||
/* Larger than mobile */
|
||||
@media (min-width: 400px) {}
|
||||
|
||||
/* Larger than phablet (also point when grid becomes active) */
|
||||
@media (min-width: 550px) {}
|
||||
|
||||
/* Larger than tablet */
|
||||
@media (min-width: 750px) {}
|
||||
|
||||
/* Larger than desktop */
|
||||
@media (min-width: 1000px) {}
|
||||
|
||||
/* Larger than Desktop HD */
|
||||
@media (min-width: 1200px) {}
|
26
docker/fib/templates/index.html
Normal file
26
docker/fib/templates/index.html
Normal file
@@ -0,0 +1,26 @@
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang='en'>
|
||||
<head>
|
||||
<meta charset='utf-8' />
|
||||
<link href='static/css/skeleton.css' rel='stylesheet' type='text/css' />
|
||||
<title>Fibonacci Zahlen berechnen</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h2>Fibonacci Generator </h2
|
||||
<!--FORM -->
|
||||
<form action='fib' method='post'>
|
||||
<label> Folgen Element</label>
|
||||
<input
|
||||
type='text'
|
||||
placeholder='Fibonacci Element'
|
||||
name='element'
|
||||
value='{{request.form.element}}'
|
||||
</input><br />
|
||||
<button type='submit'>Berechnen</button>
|
||||
<p>Formel: {{formel}} </p>
|
||||
<p>Fibonacci-Zahl: {{result}} </p>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
8
docker/pyvenv.cfg
Normal file
8
docker/pyvenv.cfg
Normal file
@@ -0,0 +1,8 @@
|
||||
home = /usr/bin
|
||||
implementation = CPython
|
||||
version_info = 3.11.2.final.0
|
||||
virtualenv = 20.17.1+ds
|
||||
include-system-site-packages = false
|
||||
base-prefix = /usr
|
||||
base-exec-prefix = /usr
|
||||
base-executable = /usr/bin/python3
|
7
docker/requirements.txt
Normal file
7
docker/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
blinker==1.7.0
|
||||
click==8.1.7
|
||||
Flask==3.0.1
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.3
|
||||
MarkupSafe==2.1.3
|
||||
Werkzeug==3.0.1
|
1
getattr/.gitignore
vendored
Normal file
1
getattr/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
__pycache__
|
17
getattr/Kreis.py
Normal file
17
getattr/Kreis.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from math import pi as pi
|
||||
|
||||
|
||||
class Kreis:
|
||||
def __init__(self, radius):
|
||||
self.radius = radius
|
||||
|
||||
def durchmesser(self):
|
||||
return self.radius * 2
|
||||
|
||||
def umfang(self):
|
||||
return self.durchmesser() * pi
|
||||
|
||||
def flaeche(self):
|
||||
return self.radius** 2 * pi
|
||||
|
||||
|
19
getattr/Kreis_getattr.py
Normal file
19
getattr/Kreis_getattr.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from math import pi as pi
|
||||
|
||||
|
||||
class Kreis_getattr:
|
||||
def __init__(self, radius):
|
||||
self.radius = radius
|
||||
self.operators ={
|
||||
'durchmesser': lambda x: self.radius * 2,
|
||||
'umfang': lambda x: self.durchmesser * pi,
|
||||
'flaeche': lambda x: self.radius**2 *pi
|
||||
}
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name not in self.operators:
|
||||
raise TypeError(f'unbekannte Operation {name}')
|
||||
|
||||
return self.operators[name](0)
|
||||
|
||||
|
17
getattr/name.py
Normal file
17
getattr/name.py
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
class Name:
|
||||
def __init__(self,vorname,name):
|
||||
self.vorname = vorname
|
||||
self.name = name
|
||||
|
||||
def __getattr__(self,attr):
|
||||
if attr == 'fullname':
|
||||
return self.vorname + ' ' + self.name
|
||||
elif attr == 'sortname':
|
||||
return self.name + ',' + self.vorname
|
||||
else:
|
||||
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attr}'")
|
||||
|
||||
|
||||
|
||||
|
36
getattr/testKreis.py
Normal file
36
getattr/testKreis.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import unittest
|
||||
import Kreis
|
||||
import Kreis_getattr
|
||||
|
||||
|
||||
class TestKreis(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.kreis = Kreis.Kreis(1)
|
||||
self.kreis_getattr = Kreis_getattr.Kreis_getattr(1)
|
||||
|
||||
|
||||
def test_Umfang(self):
|
||||
umfang = self.kreis.umfang()
|
||||
umfang_getattr = self.kreis_getattr.umfang
|
||||
print(f'kreis Umfang {umfang}')
|
||||
print(f'kreis_gettattr Umfang {umfang_getattr}')
|
||||
self.assertEqual(umfang, umfang_getattr)
|
||||
|
||||
def test_Flaeche(self):
|
||||
flaeche = self.kreis.flaeche()
|
||||
flaeche_getattr = self.kreis_getattr.flaeche
|
||||
print(f'kreis Fläche {flaeche}')
|
||||
print(f'kreis_gettattr Fläche {flaeche_getattr}')
|
||||
self.assertEqual(flaeche, flaeche_getattr)
|
||||
|
||||
def test_Durchmesser(self):
|
||||
durchmesser = self.kreis.durchmesser()
|
||||
durchmesser_getattr = self.kreis_getattr.durchmesser
|
||||
print(f'kreis Durchmesser {durchmesser}')
|
||||
print(f'kreis_gettattr Durchmesser {durchmesser_getattr}')
|
||||
self.assertEqual(durchmesser, durchmesser_getattr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
28
getattr/testname.py
Normal file
28
getattr/testname.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import unittest
|
||||
import logging
|
||||
from name import Name
|
||||
|
||||
__LOGLEVEL__ = logging.DEBUG
|
||||
|
||||
logging.basicConfig( format='%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s', level=__LOGLEVEL__)
|
||||
|
||||
class TestName(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
logging.debug('setting up test')
|
||||
self.name = Name('Homer', 'Simpson')
|
||||
|
||||
def test_getattr(self):
|
||||
fullname = self.name.fullname
|
||||
sortname = self.name.sortname
|
||||
|
||||
|
||||
self.assertEqual('Homer Simpson', fullname)
|
||||
self.assertEqual('Simpson,Homer', sortname)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.debug('Test startet.')
|
||||
unittest.main()
|
||||
|
||||
|
3
rest_chat_server/.gitignore
vendored
Normal file
3
rest_chat_server/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
__pycache__
|
||||
.venv
|
||||
*.log
|
1
rest_chat_server/.python-version
Normal file
1
rest_chat_server/.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.9
|
0
rest_chat_server/README.md
Normal file
0
rest_chat_server/README.md
Normal file
83
rest_chat_server/chat_server.py
Normal file
83
rest_chat_server/chat_server.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import falcon, json
|
||||
import logging
|
||||
from logging import config
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class Message:
|
||||
|
||||
def __init__(self,user, text):
|
||||
|
||||
self.user = user
|
||||
self.text = text
|
||||
self.created = datetime.now()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.text
|
||||
|
||||
class ChatServer(object):
|
||||
|
||||
def __init__(self):
|
||||
self.msglist = {}
|
||||
|
||||
initmsg = Message('system','Chat Server bereit')
|
||||
|
||||
self.addMsg(initmsg)
|
||||
|
||||
def addMsg(self,msg):
|
||||
created = datetime.now()
|
||||
self.msglist[created.strftime('%d.%m.%y %H:%M.%f')] = msg
|
||||
|
||||
def on_get(self,req,resp):
|
||||
logging.debug('on_get()')
|
||||
|
||||
logging.debug(f'msglist= {self.msglist}')
|
||||
json_msgs = {}
|
||||
|
||||
for key in self.msglist:
|
||||
text = self.msglist[key].text
|
||||
json_msgs[key] = {
|
||||
'key': key,
|
||||
'user': self.msglist[key].user,
|
||||
'text': text
|
||||
}
|
||||
|
||||
resp.status = falcon.HTTP_OK
|
||||
resp.media = json_msgs
|
||||
|
||||
def on_post(self,req,resp):
|
||||
logging.debug('on_post()')
|
||||
|
||||
json_string = req.media
|
||||
|
||||
logging.debug(f'json_string={json_string}')
|
||||
|
||||
msg = Message(json_string['user'],json_string['text'])
|
||||
self.addMsg(msg)
|
||||
|
||||
|
||||
resp.status = falcon.HTTP_OK
|
||||
|
||||
def on_delete(self,req,resp):
|
||||
logging.debug('on_delete()')
|
||||
|
||||
|
||||
def create_chat_resource():
|
||||
chat_endpoint = ChatServer()
|
||||
|
||||
logging.debug(f'endpoint={chat_endpoint}')
|
||||
|
||||
return chat_endpoint
|
||||
|
||||
|
||||
application = falcon.App()
|
||||
|
||||
with open('log_config.json') as file_config:
|
||||
config.dictConfig(json.load(file_config))
|
||||
|
||||
resource = create_chat_resource()
|
||||
application.add_route('/msg', resource)
|
||||
logging.debug('Chat REST Service bereit')
|
||||
|
||||
|
6
rest_chat_server/hello.py
Normal file
6
rest_chat_server/hello.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def main():
|
||||
print("Hello from rest-chat-server!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
28
rest_chat_server/log_config.json
Normal file
28
rest_chat_server/log_config.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"version":1,
|
||||
"formatters":{
|
||||
"std_out":{
|
||||
"format": "%(asctime)s : %(levelname)s : %(module)s : %(funcName)s : %(lineno)d : (Process Details : (%(process)d, %(processName)s), Thread Details : (%(thread)d, %(threadName)s))\nLog : %(message)s",
|
||||
"datefmt":"%d-%m-%Y %I:%M:%S"
|
||||
}
|
||||
},
|
||||
"handlers":{
|
||||
"console":{
|
||||
"formatter": "std_out",
|
||||
"class": "logging.StreamHandler",
|
||||
"level": "DEBUG"
|
||||
},
|
||||
"file":{
|
||||
"formatter":"std_out",
|
||||
"class":"logging.FileHandler",
|
||||
"level":"DEBUG",
|
||||
"filename" : "chat_server.log"
|
||||
}
|
||||
},
|
||||
"root":{
|
||||
"handlers":["console","file"],
|
||||
"level": "DEBUG"
|
||||
}
|
||||
|
||||
}
|
||||
|
10
rest_chat_server/pyproject.toml
Normal file
10
rest_chat_server/pyproject.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[project]
|
||||
name = "rest-chat-server"
|
||||
version = "0.0.1"
|
||||
description = "Chat Server mit REST"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"falcon>=4.0.2",
|
||||
"gunicorn>=23.0.0",
|
||||
]
|
87
rest_chat_server/uv.lock
generated
Normal file
87
rest_chat_server/uv.lock
generated
Normal file
@@ -0,0 +1,87 @@
|
||||
version = 1
|
||||
requires-python = ">=3.9"
|
||||
|
||||
[[package]]
|
||||
name = "falcon"
|
||||
version = "4.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/37/4f/d317952294dee1982cd930c8ee2b8b7fbf04140473882801061b3346c713/falcon-4.0.2.tar.gz", hash = "sha256:58f4b9c9da4c9b1e2c9f396ad7ef897701b3c7c7c87227f0bd1aee40c7fbc525", size = 630121 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/75/e33013aedec976d13f2104ab2e054b5e3863b518c9b28239d2837b521d7f/falcon-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8eab0212e77017385d48be2dfe9f5b32305fc9e4066cd298e4bb39e666e114c8", size = 2315204 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/5f/4a3ccb6d8bdb4cfcc38aea9cd5e5c49aea400305f581c839be206c3a93e2/falcon-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:942129dd3bfb56342ac368f05ff4f9be53e98883b4227089fce2fd616ebc6ef3", size = 2194755 },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/99/447d6f8618b3f8b882c7e74eafabb59f6e9112acbc3255dddd353fc75505/falcon-4.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e7b6e5ee44bb2411a7f47bb64e0b225f11cca6ddf91e5130d456242095f0d7", size = 10390701 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/27/a93dc68be1e70809cfa6d227424790ff502cc1f4272200bb91ebe92fafb1/falcon-4.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:330f1623e579575a9e3d90c2a15aebe100b2afa1e18a4bee2ddaa9a570e97902", size = 10986193 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/22/aae29170b0947a5170844bf74b671b5e0e6dc218e2cc6262d2378ec44d15/falcon-4.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d7cfac5cfca69373d1f65211d75767ed4f2d53b46554307427ec00a6f7f87c1", size = 10511133 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/73/0862e66b5b9c5295f065ee7c83571fd106dd84d63cf0479038986ddf2881/falcon-4.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:da3d942dd34f7a5213987bd053c3b52b6eb75fcfd342dc4fea9241f79a6529b3", size = 10201951 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/4f/044454ad96a542f2c446a07b6ebc8da0bef8e7e689e32aeb2bf40594a712/falcon-4.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5169e064bbe5dece52e088e3e8b17cae429f1e04c7aef8c31ae350303b19c620", size = 10633849 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/73/bd689c2790c42b6287df1a43928ae3af6cd2541ac1d64d6fa2fc960dad5b/falcon-4.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:0d62e565b9e71b52b59e03130b2b71345a6873f5299aad6a141caf4a58661b41", size = 2115914 },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/e3/dfeff966d60f2308f765736044e0a62f046d2420baf50fa4872b06338fd5/falcon-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cb6ee1aee9ff6a656762cf5fcd2e6c5dced410ca990016be2bc193e6b74ae9da", size = 2321291 },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/02/8a1a68ddf9b6f9d4a7d0d63f0a485109318f08c0181a3d9f4b05dceab355/falcon-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f1a16d8bdc8ef9cf2832a6ca6d43b156b613fb1587cd08cc928c7b8a118ea0a", size = 2196225 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/64/6fa45987bd0fc78d991be5ea0e30f0812eb26713c2e99ce07e35c959346b/falcon-4.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aee81fc4702eef5bccb640b93187fdf36ca2606fca511982069dbc60be2d1c93", size = 11603829 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/ef/bc1d47ee32e2a211cffca346bd935009d2b37189c2119df95c31a9af6231/falcon-4.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c1dbcec63d9118c3dfac1f810305128c4fffe26f4f99a7b4e379dec95fc3bfc", size = 12146705 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/cd/ef07fd256c2a29d3c3f1cc22e0ce59724450eb13ef5ca553e63b6abd19f7/falcon-4.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2892ab1232d3a7cc9890b1b539c471fe04c54f826704f9d05efe5632f18efa1", size = 11693278 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/2b/e03066e7be01f1b09c87ecd9c48e14a0ddafd9fc8fddd05db27b5ad4e3d9/falcon-4.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:af68482b408bde53a77b36e45317767dfc5b6fce1525f5b25d65f57f35d33fca", size = 11379786 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/15/f218b581df1447f743b16812c84ab8f6f7d51fb3c1950129744f6fd653bc/falcon-4.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:53d84de83abe1a2094b319a4f018ab6c5773d9c2c841b528662aa151ab9df35c", size = 11770361 },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/3e/855c3051cb8aad61a921959e5e62416d759761241c1da8394103c3d1d6af/falcon-4.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:1d06bbbccdb58522b2a6bb2e79074844b0db0da1fff407725858a02515e15bbd", size = 2124712 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/db/0b78b7ee3fe7e370ed430b7deabfa524b57a5b9eb32622ce1f1bb7aacf0d/falcon-4.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:23b0419a9a025745734022aaa2e65447595e539ba27352b3f59d86b288f614db", size = 2294078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/01/77b5b0214bc4ca717b6c6cbe8c3adaba653a7312c9c51a9b390f66efbce0/falcon-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:524d7b75f7368fe82e94ed16370db5a27bb4b2d066470cba53f02304264447e8", size = 2186007 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/0c/44abd34e38b88f15c5a7030f48ec079669218af3162de2bd1925e13a46a5/falcon-4.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c6b1d7451d5dee4be9b67a75e2a4a0b024dccffedd4e7c7a09513733b5a11db", size = 11644341 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/f3/0260f70dd080d23372e2ff0e330ca37897ab5e1b4890df281558bda8e34a/falcon-4.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59bb4a29626c5e610c62620a1395755e8c7b5509385b80d3637fbc8a604d29a3", size = 12218712 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/f8/704b73fc76cf283504aaacc2f466a08fd5d440cddd8d50b6d5c288f0293b/falcon-4.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26c9ed2912ee48e2e1e7eca3e7e85ab664ff07bd321097a26e4ad6168059424", size = 11842131 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/9a/53f9ee7ee8758972d92bb3dfb2225a9c382fbd12f684616cab9126420602/falcon-4.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0a12bbf3482b7ef1db0c6727c2ad8be5c3ac777d892e56a170e0b4b93651c915", size = 11346087 },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/ac/8098957dd5b97ed16788104b7acb33c64689f7ab04e0c6b07d6561182950/falcon-4.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a601de7816138f17bf168262e0bceb128fdd1ea2f29ddae035585b5da9223a21", size = 11992383 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/23/f8a74294b5b0cb5b9e3eb44ccea310a5d480ef95e938704827db0dd97f99/falcon-4.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:eec3feda4a9cd773203401e3cf425728a13bf5055b22243b1452e9ad963634f5", size = 2077409 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/34/71ef64406ac7f83c5726a37c9fcae0578bc9d650de09c32148aa6c58502f/falcon-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:110b172afe337fbae802f1402c89a5dfe6392f3b8ce4f2ecdfd5cee48f68b805", size = 2257265 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/13/528d074e8a75a9236c9f060685e4cb813fbca774269afc89d31e821d8560/falcon-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b3a5db14cf2ef05f8f9630468c03939b86dc16115a5250a1870dac3dca1e04ba", size = 2151133 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/70/8c7bf8bf941238a87debce72fcdc7b2301d6599271a392c8216ea2f5d91e/falcon-4.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e0b4d41ce29c2b5c5b18021320e9e0977ba47ade46b67face52ee1325e2ea4", size = 11438997 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/43/71e358d36ec4559737d63312d746fb5f8b0e64f1fe273cd6991e567a9225/falcon-4.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56af3b8838da2e19ae56b4e1bac168669ba257d6941f94933dc4f814fe721c08", size = 12015197 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/70/c10acaa3486748f77d9b0e79aaa19d3023b760bb9b93389ac1883a52e366/falcon-4.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec939d26dd77f57f08f3e13fb14b4e609c0baf073dc3f0c368f0e4cc10439528", size = 11653687 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/3b/dfdd9bd9f6114a49a55298b12048f1b65d0813b82c28676b956c4444f707/falcon-4.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9bfd751dd898505e17152d7ecfcdc457c9d85bceed7e651a9915183bd4afc86b", size = 11165291 },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/61/eb3d1d2076df85d5a7c2cd823ba5dbe0a928053a3102effb9006b2851377/falcon-4.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b85f9c6f50a7465303290cb305404ea5c1ddeff6702179c1a8879c4693b0e5e", size = 11831049 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/c7/268cddb1f84ebe5b402acdf116083658f3fb0dd38a75571e0ee703cef212/falcon-4.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:a410e4023999a74ccf615fafa646b112044b987ef5901c8e5c5b79b163f2b3ba", size = 2052994 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/00/7be6347247812e6553be50d83b0951e569d597b9c3a71e4c0de5b00789b7/falcon-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f9709fd9181f58d492463b951cc42fb33b230e8f261128bc8252a37a4553f318", size = 2323679 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/71/65266fc8433e396f42a2b045e7b7069390c4314e8b66e66c4e092166226f/falcon-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:427c20ceb367039b856506d7baeef17c7f0c40b8fcbf1147c0e76f33a574a7cf", size = 2203699 },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/30/82ff9d61d6baa5f908ccbbb775585cd1ac24dd47fcc2d2e6c5b9b0f44ce4/falcon-4.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fb50cebc3cae6720ccf4a05fccb233ea6a88e803828a07c063d6dce10a74e0e", size = 10418688 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/e5/660262ee87a90aab812d2d10ab42e6e0bffe853890b2e14268863c4dd659/falcon-4.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:628c450e14af811f13db6334265d7ff8a7b8a25ece1bde35d09a367a72046533", size = 11033845 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/b2/cf4c9567ad571e3304743e6f65c42576929cc16d494cb234f972fa70a150/falcon-4.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04b30a7f89e5413e00c5cd1ea62bf7948323eb0220f8a5bbf705abae266a384", size = 10547880 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/b8/503d3574be76a09b64dd48214e012fd1b911a158400bc04b1ee0d4caec0f/falcon-4.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9095a36b8eeb80207322393b3bc88edaacd0426c2907e8427617618421bde9cc", size = 10218888 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/88/d96a3e9d93aee74280a82be844c2eaa603283c5548b3293165deb2d55b4e/falcon-4.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0adc6c2887f9d7ed55fe38edef055cc85c26762e392d80dca8765184c180b921", size = 10663138 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/97/4021fce87e3feb67839405ca8d2560d989da141692214c6f1b297af23443/falcon-4.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:7bffb4cadcbf7c5994695d421ef5305ad8315cfbefe971713046967614f0ffa4", size = 2121203 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/e2/ef821224a9ca9d4bb81d6e7ba60c6fbf3eae2e0dc10d806e6ff21b6dfdc5/falcon-4.0.2-py3-none-any.whl", hash = "sha256:077b2abf001940c6128c9b5872ae8147fe13f6ca333f928d8045d7601a5e847e", size = 318356 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gunicorn"
|
||||
version = "23.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "packaging" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/34/72/9614c465dc206155d93eff0ca20d42e1e35afc533971379482de953521a4/gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec", size = 375031 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/7d/6dac2a6e1eba33ee43f318edbed4ff29151a49b5d37f080aad1e6469bca4/gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d", size = 85029 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "25.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rest-chat-server"
|
||||
version = "0.0.1"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "falcon" },
|
||||
{ name = "gunicorn" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "falcon", specifier = ">=4.0.2" },
|
||||
{ name = "gunicorn", specifier = ">=23.0.0" },
|
||||
]
|
2
teil13/.gitignore
vendored
2
teil13/.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
./__pycache__
|
||||
__pycache__
|
||||
./network/__pycache__/*
|
||||
|
2
teil16/.gitignore
vendored
2
teil16/.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
./__pycache__
|
||||
__pycache__
|
||||
./network/__pycache__/*
|
||||
|
47
teil16/plztest.py
Normal file
47
teil16/plztest.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# coding: utf-8
|
||||
import unittest
|
||||
import logging
|
||||
import re #regular expressions
|
||||
|
||||
logging.basicConfig( format='%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s', level=logging.DEBUG)
|
||||
|
||||
class TestPLZ(unittest.TestCase):
|
||||
|
||||
# Korrektes Format der PLZ prüfen. Falsche Version
|
||||
|
||||
def checkPLZ_falsch(self,plz):
|
||||
try:
|
||||
if len(pl) == 5 and int(plz) > 0:
|
||||
return True
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def checkPLZ_korrekt(self,plz):
|
||||
#regulären Audruck aufbauen, genau 5 Ziffern.
|
||||
pattern = re.compile('\d\d\d\d\d')
|
||||
#Ausdruck auf String anwenden.
|
||||
return pattern.match(plz)
|
||||
|
||||
|
||||
# Die setUp() Methode wird zu Beginn jedes Testcases aufgrufen
|
||||
def setUp(self):
|
||||
logging.debug('setting up test')
|
||||
self.testdaten = ['42287','42289','42119','42277','44139','-1111']
|
||||
|
||||
def test_false_positive(self):
|
||||
|
||||
for plz in self.testdaten:
|
||||
self.assertTrue(self.checkPLZ_falsch(plz))
|
||||
|
||||
def test_plz(self):
|
||||
|
||||
logging.debug('starte test_plz()')
|
||||
for plz in self.testdaten:
|
||||
logging.debug(f'teste ${plz}')
|
||||
self.assertTrue(self.checkPLZ_korrekt(plz))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@@ -11,5 +11,5 @@ def fib_generator():
|
||||
fibo = fib_generator()
|
||||
|
||||
|
||||
for _ in range(int(sys.argv[1])+1):
|
||||
for _ in range(int(sys.argv[1])):
|
||||
print(next(fibo))
|
||||
|
83
teil20/bin/activate
Normal file
83
teil20/bin/activate
Normal file
@@ -0,0 +1,83 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# you cannot run it directly
|
||||
|
||||
|
||||
if [ "${BASH_SOURCE-}" = "$0" ]; then
|
||||
echo "You must source this script: \$ source $0" >&2
|
||||
exit 33
|
||||
fi
|
||||
|
||||
deactivate () {
|
||||
unset -f pydoc >/dev/null 2>&1 || true
|
||||
|
||||
# reset old environment variables
|
||||
# ! [ -z ${VAR+_} ] returns true if VAR is declared at all
|
||||
if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then
|
||||
PATH="$_OLD_VIRTUAL_PATH"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
|
||||
PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# The hash command must be called to get it to forget past
|
||||
# commands. Without forgetting past commands the $PATH changes
|
||||
# we made may not be respected
|
||||
hash -r 2>/dev/null
|
||||
|
||||
if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
|
||||
PS1="$_OLD_VIRTUAL_PS1"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
if [ ! "${1-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
VIRTUAL_ENV='/home/pi/git/pythonkurs/teil20'
|
||||
if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then
|
||||
VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV")
|
||||
fi
|
||||
export VIRTUAL_ENV
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
if ! [ -z "${PYTHONHOME+_}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1-}"
|
||||
if [ "x" != x ] ; then
|
||||
PS1="() ${PS1-}"
|
||||
else
|
||||
PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}"
|
||||
fi
|
||||
export PS1
|
||||
fi
|
||||
|
||||
# Make sure to unalias pydoc if it's already there
|
||||
alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true
|
||||
|
||||
pydoc () {
|
||||
python -m pydoc "$@"
|
||||
}
|
||||
|
||||
# The hash command must be called to get it to forget past
|
||||
# commands. Without forgetting past commands the $PATH changes
|
||||
# we made may not be respected
|
||||
hash -r 2>/dev/null
|
55
teil20/bin/activate.csh
Normal file
55
teil20/bin/activate.csh
Normal file
@@ -0,0 +1,55 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
|
||||
set newline='\
|
||||
'
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV '/home/pi/git/pythonkurs/teil20'
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH:q"
|
||||
setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q"
|
||||
|
||||
|
||||
|
||||
if ('' != "") then
|
||||
set env_name = '() '
|
||||
else
|
||||
set env_name = '('"$VIRTUAL_ENV:t:q"') '
|
||||
endif
|
||||
|
||||
if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then
|
||||
if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then
|
||||
set do_prompt = "1"
|
||||
else
|
||||
set do_prompt = "0"
|
||||
endif
|
||||
else
|
||||
set do_prompt = "1"
|
||||
endif
|
||||
|
||||
if ( $do_prompt == "1" ) then
|
||||
# Could be in a non-interactive environment,
|
||||
# in which case, $prompt is undefined and we wouldn't
|
||||
# care about the prompt anyway.
|
||||
if ( $?prompt ) then
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt:q"
|
||||
if ( "$prompt:q" =~ *"$newline:q"* ) then
|
||||
:
|
||||
else
|
||||
set prompt = "$env_name:q$prompt:q"
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
unset env_name
|
||||
unset do_prompt
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
100
teil20/bin/activate.fish
Normal file
100
teil20/bin/activate.fish
Normal file
@@ -0,0 +1,100 @@
|
||||
# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
|
||||
# Do not run it directly.
|
||||
|
||||
function _bashify_path -d "Converts a fish path to something bash can recognize"
|
||||
set fishy_path $argv
|
||||
set bashy_path $fishy_path[1]
|
||||
for path_part in $fishy_path[2..-1]
|
||||
set bashy_path "$bashy_path:$path_part"
|
||||
end
|
||||
echo $bashy_path
|
||||
end
|
||||
|
||||
function _fishify_path -d "Converts a bash path to something fish can recognize"
|
||||
echo $argv | tr ':' '\n'
|
||||
end
|
||||
|
||||
function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
|
||||
if test (echo $FISH_VERSION | head -c 1) -lt 3
|
||||
set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH")
|
||||
else
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
end
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
and functions -q _old_fish_prompt
|
||||
# Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
|
||||
set -l fish_function_path
|
||||
|
||||
# Erase virtualenv's `fish_prompt` and restore the original.
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
|
||||
if test "$argv[1]" != 'nondestructive'
|
||||
# Self-destruct!
|
||||
functions -e pydoc
|
||||
functions -e deactivate
|
||||
functions -e _bashify_path
|
||||
functions -e _fishify_path
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV '/home/pi/git/pythonkurs/teil20'
|
||||
|
||||
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
|
||||
if test (echo $FISH_VERSION | head -c 1) -lt 3
|
||||
set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH)
|
||||
else
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
end
|
||||
set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH
|
||||
|
||||
# Unset `$PYTHONHOME` if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
function pydoc
|
||||
python -m pydoc $argv
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# Copy the current `fish_prompt` function as `_old_fish_prompt`.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
function fish_prompt
|
||||
# Run the user's prompt first; it might depend on (pipe)status.
|
||||
set -l prompt (_old_fish_prompt)
|
||||
|
||||
# Prompt override provided?
|
||||
# If not, just prepend the environment name.
|
||||
if test -n ''
|
||||
printf '(%s) ' ''
|
||||
else
|
||||
printf '(%s) ' (basename "$VIRTUAL_ENV")
|
||||
end
|
||||
|
||||
string join -- \n $prompt # handle multi-line prompts
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
end
|
117
teil20/bin/activate.nu
Normal file
117
teil20/bin/activate.nu
Normal file
@@ -0,0 +1,117 @@
|
||||
# virtualenv activation module
|
||||
# Activate with `overlay use activate.nu`
|
||||
# Deactivate with `deactivate`, as usual
|
||||
#
|
||||
# To customize the overlay name, you can call `overlay use activate.nu as foo`,
|
||||
# but then simply `deactivate` won't work because it is just an alias to hide
|
||||
# the "activate" overlay. You'd need to call `overlay hide foo` manually.
|
||||
|
||||
export-env {
|
||||
def is-string [x] {
|
||||
($x | describe) == 'string'
|
||||
}
|
||||
|
||||
def has-env [name: string] {
|
||||
$name in (env).name
|
||||
}
|
||||
|
||||
# Emulates a `test -z`, but btter as it handles e.g 'false'
|
||||
def is-env-true [name: string] {
|
||||
if (has-env $name) {
|
||||
# Try to parse 'true', '0', '1', and fail if not convertible
|
||||
let parsed = do -i { $env | get $name | into bool }
|
||||
if ($parsed | describe) == 'bool' {
|
||||
$parsed
|
||||
} else {
|
||||
not ($env | get $name | is-empty)
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
let is_windows = ($nu.os-info.name | str downcase) == 'windows'
|
||||
let virtual_env = '/home/pi/git/pythonkurs/teil20'
|
||||
let bin = 'bin'
|
||||
let path_sep = (char esep)
|
||||
let path_name = if $is_windows {
|
||||
if (has-env 'Path') {
|
||||
'Path'
|
||||
} else {
|
||||
'PATH'
|
||||
}
|
||||
} else {
|
||||
'PATH'
|
||||
}
|
||||
|
||||
let old_path = (
|
||||
if $is_windows {
|
||||
if (has-env 'Path') {
|
||||
$env.Path
|
||||
} else {
|
||||
$env.PATH
|
||||
}
|
||||
} else {
|
||||
$env.PATH
|
||||
} | if (is-string $in) {
|
||||
# if Path/PATH is a string, make it a list
|
||||
$in | split row $path_sep | path expand
|
||||
} else {
|
||||
$in
|
||||
}
|
||||
)
|
||||
|
||||
let venv_path = ([$virtual_env $bin] | path join)
|
||||
let new_path = ($old_path | prepend $venv_path | str collect $path_sep)
|
||||
|
||||
let new_env = {
|
||||
$path_name : $new_path
|
||||
VIRTUAL_ENV : $virtual_env
|
||||
}
|
||||
|
||||
let new_env = if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') {
|
||||
$new_env
|
||||
} else {
|
||||
# Creating the new prompt for the session
|
||||
let virtual_prompt = if ('' == '') {
|
||||
$'(char lparen)($virtual_env | path basename)(char rparen) '
|
||||
} else {
|
||||
'() '
|
||||
}
|
||||
|
||||
# Back up the old prompt builder
|
||||
let old_prompt_command = if (has-env 'VIRTUAL_ENV') and (has-env '_OLD_PROMPT_COMMAND') {
|
||||
$env._OLD_PROMPT_COMMAND
|
||||
} else {
|
||||
if (has-env 'PROMPT_COMMAND') {
|
||||
$env.PROMPT_COMMAND
|
||||
} else {
|
||||
''
|
||||
}
|
||||
}
|
||||
|
||||
# If there is no default prompt, then only the env is printed in the prompt
|
||||
let new_prompt = if (has-env 'PROMPT_COMMAND') {
|
||||
if ($old_prompt_command | describe) == 'block' {
|
||||
{ $'($virtual_prompt)(do $old_prompt_command)' }
|
||||
} else {
|
||||
{ $'($virtual_prompt)($old_prompt_command)' }
|
||||
}
|
||||
} else {
|
||||
{ $'($virtual_prompt)' }
|
||||
}
|
||||
|
||||
$new_env | merge {
|
||||
_OLD_VIRTUAL_PATH : ($old_path | str collect $path_sep)
|
||||
_OLD_PROMPT_COMMAND : $old_prompt_command
|
||||
PROMPT_COMMAND : $new_prompt
|
||||
VIRTUAL_PROMPT : $virtual_prompt
|
||||
}
|
||||
}
|
||||
|
||||
# Environment variables that will be loaded as the virtual env
|
||||
load-env $new_env
|
||||
}
|
||||
|
||||
export alias pydoc = python -m pydoc
|
||||
export alias deactivate = overlay hide activate
|
60
teil20/bin/activate.ps1
Normal file
60
teil20/bin/activate.ps1
Normal file
@@ -0,0 +1,60 @@
|
||||
$script:THIS_PATH = $myinvocation.mycommand.path
|
||||
$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent
|
||||
|
||||
function global:deactivate([switch] $NonDestructive) {
|
||||
if (Test-Path variable:_OLD_VIRTUAL_PATH) {
|
||||
$env:PATH = $variable:_OLD_VIRTUAL_PATH
|
||||
Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global
|
||||
}
|
||||
|
||||
if (Test-Path function:_old_virtual_prompt) {
|
||||
$function:prompt = $function:_old_virtual_prompt
|
||||
Remove-Item function:\_old_virtual_prompt
|
||||
}
|
||||
|
||||
if ($env:VIRTUAL_ENV) {
|
||||
Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
if (!$NonDestructive) {
|
||||
# Self destruct!
|
||||
Remove-Item function:deactivate
|
||||
Remove-Item function:pydoc
|
||||
}
|
||||
}
|
||||
|
||||
function global:pydoc {
|
||||
python -m pydoc $args
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate -nondestructive
|
||||
|
||||
$VIRTUAL_ENV = $BASE_DIR
|
||||
$env:VIRTUAL_ENV = $VIRTUAL_ENV
|
||||
|
||||
New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
|
||||
|
||||
$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
|
||||
if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
function global:_old_virtual_prompt {
|
||||
""
|
||||
}
|
||||
$function:_old_virtual_prompt = $function:prompt
|
||||
|
||||
if ("" -ne "") {
|
||||
function global:prompt {
|
||||
# Add the custom prefix to the existing prompt
|
||||
$previous_prompt_value = & $function:_old_virtual_prompt
|
||||
("() " + $previous_prompt_value)
|
||||
}
|
||||
}
|
||||
else {
|
||||
function global:prompt {
|
||||
# Add a prefix to the current prompt, but don't discard it.
|
||||
$previous_prompt_value = & $function:_old_virtual_prompt
|
||||
$new_prompt_value = "($( Split-Path $env:VIRTUAL_ENV -Leaf )) "
|
||||
($new_prompt_value + $previous_prompt_value)
|
||||
}
|
||||
}
|
||||
}
|
31
teil20/bin/activate_this.py
Normal file
31
teil20/bin/activate_this.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Activate virtualenv for current interpreter:
|
||||
|
||||
Use exec(open(this_file).read(), {'__file__': this_file}).
|
||||
|
||||
This can be used when you must use an existing Python interpreter, not the virtualenv bin/python.
|
||||
"""
|
||||
import os
|
||||
import site
|
||||
import sys
|
||||
|
||||
try:
|
||||
abs_file = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))")
|
||||
|
||||
bin_dir = os.path.dirname(abs_file)
|
||||
base = bin_dir[: -len("bin") - 1] # strip away the bin part from the __file__, plus the path separator
|
||||
|
||||
# prepend bin to PATH (this file is inside the bin directory)
|
||||
os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
|
||||
os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory
|
||||
|
||||
# add the virtual environments libraries to the host python import mechanism
|
||||
prev_length = len(sys.path)
|
||||
for lib in "../lib/python3.11/site-packages".split(os.pathsep):
|
||||
path = os.path.realpath(os.path.join(bin_dir, lib))
|
||||
site.addsitedir(path.decode("utf-8") if "" else path)
|
||||
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
|
||||
|
||||
sys.real_prefix = sys.prefix
|
||||
sys.prefix = base
|
8
teil20/bin/f2py
Executable file
8
teil20/bin/f2py
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from numpy.f2py.f2py2e import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/f2py3
Executable file
8
teil20/bin/f2py3
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from numpy.f2py.f2py2e import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/f2py3.11
Executable file
8
teil20/bin/f2py3.11
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from numpy.f2py.f2py2e import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/ninja
Executable file
8
teil20/bin/ninja
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from ninja import ninja
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(ninja())
|
8
teil20/bin/normalizer
Executable file
8
teil20/bin/normalizer
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from charset_normalizer.cli.normalizer import cli_detect
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli_detect())
|
8
teil20/bin/pip
Executable file
8
teil20/bin/pip
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/pip-3.11
Executable file
8
teil20/bin/pip-3.11
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/pip3
Executable file
8
teil20/bin/pip3
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/pip3.11
Executable file
8
teil20/bin/pip3.11
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
1
teil20/bin/python
Symbolic link
1
teil20/bin/python
Symbolic link
@@ -0,0 +1 @@
|
||||
/usr/bin/python3
|
1
teil20/bin/python3
Symbolic link
1
teil20/bin/python3
Symbolic link
@@ -0,0 +1 @@
|
||||
python
|
1
teil20/bin/python3.11
Symbolic link
1
teil20/bin/python3.11
Symbolic link
@@ -0,0 +1 @@
|
||||
python
|
8
teil20/bin/wheel
Executable file
8
teil20/bin/wheel
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from wheel.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/wheel-3.11
Executable file
8
teil20/bin/wheel-3.11
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from wheel.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/wheel3
Executable file
8
teil20/bin/wheel3
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from wheel.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
teil20/bin/wheel3.11
Executable file
8
teil20/bin/wheel3.11
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/pi/git/pythonkurs/teil20/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from wheel.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
Binary file not shown.
Binary file not shown.
222
teil20/lib/python3.11/site-packages/_distutils_hack/__init__.py
Normal file
222
teil20/lib/python3.11/site-packages/_distutils_hack/__init__.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# don't import any costly modules
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
is_pypy = '__pypy__' in sys.builtin_module_names
|
||||
|
||||
|
||||
def warn_distutils_present():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
if is_pypy and sys.version_info < (3, 7):
|
||||
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
|
||||
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Distutils was imported before Setuptools, but importing Setuptools "
|
||||
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
||||
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
||||
"using distutils directly, ensure that setuptools is installed in the "
|
||||
"traditional way (e.g. not an editable install), and/or make sure "
|
||||
"that setuptools is always imported before distutils."
|
||||
)
|
||||
|
||||
|
||||
def clear_distutils():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn("Setuptools is replacing distutils.")
|
||||
mods = [
|
||||
name
|
||||
for name in sys.modules
|
||||
if name == "distutils" or name.startswith("distutils.")
|
||||
]
|
||||
for name in mods:
|
||||
del sys.modules[name]
|
||||
|
||||
|
||||
def enabled():
|
||||
"""
|
||||
Allow selection of distutils by environment variable.
|
||||
"""
|
||||
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
||||
return which == 'local'
|
||||
|
||||
|
||||
def ensure_local_distutils():
|
||||
import importlib
|
||||
|
||||
clear_distutils()
|
||||
|
||||
# With the DistutilsMetaFinder in place,
|
||||
# perform an import to cause distutils to be
|
||||
# loaded from setuptools._distutils. Ref #2906.
|
||||
with shim():
|
||||
importlib.import_module('distutils')
|
||||
|
||||
# check that submodules load as expected
|
||||
core = importlib.import_module('distutils.core')
|
||||
assert '_distutils' in core.__file__, core.__file__
|
||||
assert 'setuptools._distutils.log' not in sys.modules
|
||||
|
||||
|
||||
def do_override():
|
||||
"""
|
||||
Ensure that the local copy of distutils is preferred over stdlib.
|
||||
|
||||
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
||||
for more motivation.
|
||||
"""
|
||||
if enabled():
|
||||
warn_distutils_present()
|
||||
ensure_local_distutils()
|
||||
|
||||
|
||||
class _TrivialRe:
|
||||
def __init__(self, *patterns):
|
||||
self._patterns = patterns
|
||||
|
||||
def match(self, string):
|
||||
return all(pat in string for pat in self._patterns)
|
||||
|
||||
|
||||
class DistutilsMetaFinder:
|
||||
def find_spec(self, fullname, path, target=None):
|
||||
# optimization: only consider top level modules and those
|
||||
# found in the CPython test suite.
|
||||
if path is not None and not fullname.startswith('test.'):
|
||||
return
|
||||
|
||||
method_name = 'spec_for_{fullname}'.format(**locals())
|
||||
method = getattr(self, method_name, lambda: None)
|
||||
return method()
|
||||
|
||||
def spec_for_distutils(self):
|
||||
if self.is_cpython():
|
||||
return
|
||||
|
||||
import importlib
|
||||
import importlib.abc
|
||||
import importlib.util
|
||||
|
||||
try:
|
||||
mod = importlib.import_module('setuptools._distutils')
|
||||
except Exception:
|
||||
# There are a couple of cases where setuptools._distutils
|
||||
# may not be present:
|
||||
# - An older Setuptools without a local distutils is
|
||||
# taking precedence. Ref #2957.
|
||||
# - Path manipulation during sitecustomize removes
|
||||
# setuptools from the path but only after the hook
|
||||
# has been loaded. Ref #2980.
|
||||
# In either case, fall back to stdlib behavior.
|
||||
return
|
||||
|
||||
class DistutilsLoader(importlib.abc.Loader):
|
||||
def create_module(self, spec):
|
||||
mod.__name__ = 'distutils'
|
||||
return mod
|
||||
|
||||
def exec_module(self, module):
|
||||
pass
|
||||
|
||||
return importlib.util.spec_from_loader(
|
||||
'distutils', DistutilsLoader(), origin=mod.__file__
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_cpython():
|
||||
"""
|
||||
Suppress supplying distutils for CPython (build and tests).
|
||||
Ref #2965 and #3007.
|
||||
"""
|
||||
return os.path.isfile('pybuilddir.txt')
|
||||
|
||||
def spec_for_pip(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running under pip.
|
||||
See pypa/pip#8761 for rationale.
|
||||
"""
|
||||
if self.pip_imported_during_build():
|
||||
return
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
@classmethod
|
||||
def pip_imported_during_build(cls):
|
||||
"""
|
||||
Detect if pip is being imported in a build script. Ref #2355.
|
||||
"""
|
||||
import traceback
|
||||
|
||||
return any(
|
||||
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def frame_file_is_setup(frame):
|
||||
"""
|
||||
Return True if the indicated frame suggests a setup.py file.
|
||||
"""
|
||||
# some frames may not have __file__ (#2940)
|
||||
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
||||
|
||||
def spec_for_sensitive_tests(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running select tests under CPython.
|
||||
|
||||
python/cpython#91169
|
||||
"""
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
sensitive_tests = (
|
||||
[
|
||||
'test.test_distutils',
|
||||
'test.test_peg_generator',
|
||||
'test.test_importlib',
|
||||
]
|
||||
if sys.version_info < (3, 10)
|
||||
else [
|
||||
'test.test_distutils',
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
for name in DistutilsMetaFinder.sensitive_tests:
|
||||
setattr(
|
||||
DistutilsMetaFinder,
|
||||
f'spec_for_{name}',
|
||||
DistutilsMetaFinder.spec_for_sensitive_tests,
|
||||
)
|
||||
|
||||
|
||||
DISTUTILS_FINDER = DistutilsMetaFinder()
|
||||
|
||||
|
||||
def add_shim():
|
||||
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
||||
|
||||
|
||||
class shim:
|
||||
def __enter__(self):
|
||||
insert_shim()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
remove_shim()
|
||||
|
||||
|
||||
def insert_shim():
|
||||
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
||||
|
||||
|
||||
def remove_shim():
|
||||
try:
|
||||
sys.meta_path.remove(DISTUTILS_FINDER)
|
||||
except ValueError:
|
||||
pass
|
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
__import__('_distutils_hack').do_override()
|
1
teil20/lib/python3.11/site-packages/_virtualenv.pth
Normal file
1
teil20/lib/python3.11/site-packages/_virtualenv.pth
Normal file
@@ -0,0 +1 @@
|
||||
import _virtualenv
|
130
teil20/lib/python3.11/site-packages/_virtualenv.py
Normal file
130
teil20/lib/python3.11/site-packages/_virtualenv.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Patches that are applied at runtime to the virtual environment"""
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
VIRTUALENV_PATCH_FILE = os.path.join(__file__)
|
||||
|
||||
|
||||
def patch_dist(dist):
|
||||
"""
|
||||
Distutils allows user to configure some arguments via a configuration file:
|
||||
https://docs.python.org/3/install/index.html#distutils-configuration-files
|
||||
|
||||
Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.
|
||||
"""
|
||||
# we cannot allow some install config as that would get packages installed outside of the virtual environment
|
||||
old_parse_config_files = dist.Distribution.parse_config_files
|
||||
|
||||
def parse_config_files(self, *args, **kwargs):
|
||||
result = old_parse_config_files(self, *args, **kwargs)
|
||||
install = self.get_option_dict("install")
|
||||
|
||||
if "prefix" in install: # the prefix governs where to install the libraries
|
||||
install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix)
|
||||
for base in ("purelib", "platlib", "headers", "scripts", "data"):
|
||||
key = "install_{}".format(base)
|
||||
if key in install: # do not allow global configs to hijack venv paths
|
||||
install.pop(key, None)
|
||||
return result
|
||||
|
||||
dist.Distribution.parse_config_files = parse_config_files
|
||||
|
||||
|
||||
# Import hook that patches some modules to ignore configuration values that break package installation in case
|
||||
# of virtual environments.
|
||||
_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist"
|
||||
if sys.version_info > (3, 4):
|
||||
# https://docs.python.org/3/library/importlib.html#setting-up-an-importer
|
||||
|
||||
class _Finder:
|
||||
"""A meta path finder that allows patching the imported distutils modules"""
|
||||
|
||||
fullname = None
|
||||
|
||||
# lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
|
||||
# because there are gevent-based applications that need to be first to import threading by themselves.
|
||||
# See https://github.com/pypa/virtualenv/issues/1895 for details.
|
||||
lock = []
|
||||
|
||||
def find_spec(self, fullname, path, target=None): # noqa: U100
|
||||
if fullname in _DISTUTILS_PATCH and self.fullname is None:
|
||||
# initialize lock[0] lazily
|
||||
if len(self.lock) == 0:
|
||||
import threading
|
||||
|
||||
lock = threading.Lock()
|
||||
# there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
|
||||
# observing .lock as empty, and further going into hereby initialization. However due to the GIL,
|
||||
# list.append() operation is atomic and this way only one of the threads will "win" to put the lock
|
||||
# - that every thread will use - into .lock[0].
|
||||
# https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
|
||||
self.lock.append(lock)
|
||||
|
||||
from functools import partial
|
||||
from importlib.util import find_spec
|
||||
|
||||
with self.lock[0]:
|
||||
self.fullname = fullname
|
||||
try:
|
||||
spec = find_spec(fullname, path)
|
||||
if spec is not None:
|
||||
# https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
|
||||
is_new_api = hasattr(spec.loader, "exec_module")
|
||||
func_name = "exec_module" if is_new_api else "load_module"
|
||||
old = getattr(spec.loader, func_name)
|
||||
func = self.exec_module if is_new_api else self.load_module
|
||||
if old is not func:
|
||||
try:
|
||||
setattr(spec.loader, func_name, partial(func, old))
|
||||
except AttributeError:
|
||||
pass # C-Extension loaders are r/o such as zipimporter with <python 3.7
|
||||
return spec
|
||||
finally:
|
||||
self.fullname = None
|
||||
|
||||
@staticmethod
|
||||
def exec_module(old, module):
|
||||
old(module)
|
||||
if module.__name__ in _DISTUTILS_PATCH:
|
||||
patch_dist(module)
|
||||
|
||||
@staticmethod
|
||||
def load_module(old, name):
|
||||
module = old(name)
|
||||
if module.__name__ in _DISTUTILS_PATCH:
|
||||
patch_dist(module)
|
||||
return module
|
||||
|
||||
sys.meta_path.insert(0, _Finder())
|
||||
else:
|
||||
# https://www.python.org/dev/peps/pep-0302/
|
||||
from imp import find_module
|
||||
from pkgutil import ImpImporter, ImpLoader
|
||||
|
||||
class _VirtualenvImporter(object, ImpImporter):
|
||||
def __init__(self, path=None):
|
||||
object.__init__(self)
|
||||
ImpImporter.__init__(self, path)
|
||||
|
||||
def find_module(self, fullname, path=None):
|
||||
if fullname in _DISTUTILS_PATCH:
|
||||
try:
|
||||
return _VirtualenvLoader(fullname, *find_module(fullname.split(".")[-1], path))
|
||||
except ImportError:
|
||||
pass
|
||||
return None
|
||||
|
||||
class _VirtualenvLoader(object, ImpLoader):
|
||||
def __init__(self, fullname, file, filename, etc):
|
||||
object.__init__(self)
|
||||
ImpLoader.__init__(self, fullname, file, filename, etc)
|
||||
|
||||
def load_module(self, fullname):
|
||||
module = super(_VirtualenvLoader, self).load_module(fullname)
|
||||
patch_dist(module)
|
||||
module.__loader__ = None # distlib fallback
|
||||
return module
|
||||
|
||||
sys.meta_path.append(_VirtualenvImporter())
|
@@ -0,0 +1 @@
|
||||
pip
|
@@ -0,0 +1,21 @@
|
||||
This package contains a modified version of ca-bundle.crt:
|
||||
|
||||
ca-bundle.crt -- Bundle of CA Root Certificates
|
||||
|
||||
Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
|
||||
This is a bundle of X.509 certificates of public Certificate Authorities
|
||||
(CA). These were automatically extracted from Mozilla's root certificates
|
||||
file (certdata.txt). This file can be found in the mozilla source tree:
|
||||
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
|
||||
It contains the certificates in PEM format and therefore
|
||||
can be directly used with curl / libcurl / php_curl, or with
|
||||
an Apache+mod_ssl webserver for SSL client authentication.
|
||||
Just configure this file as the SSLCACertificateFile.#
|
||||
|
||||
***** BEGIN LICENSE BLOCK *****
|
||||
This Source Code Form is subject to the terms of the Mozilla Public License,
|
||||
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
|
||||
one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
***** END LICENSE BLOCK *****
|
||||
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
|
@@ -0,0 +1,66 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: certifi
|
||||
Version: 2023.7.22
|
||||
Summary: Python package for providing Mozilla's CA Bundle.
|
||||
Home-page: https://github.com/certifi/python-certifi
|
||||
Author: Kenneth Reitz
|
||||
Author-email: me@kennethreitz.com
|
||||
License: MPL-2.0
|
||||
Project-URL: Source, https://github.com/certifi/python-certifi
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Requires-Python: >=3.6
|
||||
License-File: LICENSE
|
||||
|
||||
Certifi: Python SSL Certificates
|
||||
================================
|
||||
|
||||
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
||||
validating the trustworthiness of SSL certificates while verifying the identity
|
||||
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To reference the installed certificate authority (CA) bundle, you can use the
|
||||
built-in function::
|
||||
|
||||
>>> import certifi
|
||||
|
||||
>>> certifi.where()
|
||||
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
||||
|
||||
Or from the command line::
|
||||
|
||||
$ python -m certifi
|
||||
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
||||
|
||||
Enjoy!
|
||||
|
||||
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
||||
|
||||
Addition/Removal of Certificates
|
||||
--------------------------------
|
||||
|
||||
Certifi does not support any addition/removal or other modification of the
|
||||
CA trust store content. This project is intended to provide a reliable and
|
||||
highly portable root of trust to python deployments. Look to upstream projects
|
||||
for methods to use alternate trust.
|
@@ -0,0 +1,14 @@
|
||||
certifi-2023.7.22.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
certifi-2023.7.22.dist-info/LICENSE,sha256=oC9sY4-fuE0G93ZMOrCF2K9-2luTwWbaVDEkeQd8b7A,1052
|
||||
certifi-2023.7.22.dist-info/METADATA,sha256=RgdzxZw4VOIL_B8Rnp13_JJcWJyeRNQo_N39WoaO6y0,2171
|
||||
certifi-2023.7.22.dist-info/RECORD,,
|
||||
certifi-2023.7.22.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
||||
certifi-2023.7.22.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__init__.py,sha256=L_j-d0kYuA_MzA2_2hraF1ovf6KT6DTquRdV3paQwOk,94
|
||||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||
certifi/__pycache__/__init__.cpython-311.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-311.pyc,,
|
||||
certifi/__pycache__/core.cpython-311.pyc,,
|
||||
certifi/cacert.pem,sha256=eU0Dn_3yd8BH4m8sfVj4Glhl2KDrcCSg-sEWT-pNJ88,281617
|
||||
certifi/core.py,sha256=lhewz0zFb2b4ULsQurElmloYwQoecjWzPqY67P8T7iM,4219
|
||||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.40.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@@ -0,0 +1 @@
|
||||
certifi
|
4
teil20/lib/python3.11/site-packages/certifi/__init__.py
Normal file
4
teil20/lib/python3.11/site-packages/certifi/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .core import contents, where
|
||||
|
||||
__all__ = ["contents", "where"]
|
||||
__version__ = "2023.07.22"
|
12
teil20/lib/python3.11/site-packages/certifi/__main__.py
Normal file
12
teil20/lib/python3.11/site-packages/certifi/__main__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import argparse
|
||||
|
||||
from certifi import contents, where
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-c", "--contents", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.contents:
|
||||
print(contents())
|
||||
else:
|
||||
print(where())
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
4635
teil20/lib/python3.11/site-packages/certifi/cacert.pem
Normal file
4635
teil20/lib/python3.11/site-packages/certifi/cacert.pem
Normal file
File diff suppressed because it is too large
Load Diff
108
teil20/lib/python3.11/site-packages/certifi/core.py
Normal file
108
teil20/lib/python3.11/site-packages/certifi/core.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
certifi.py
|
||||
~~~~~~~~~~
|
||||
|
||||
This module returns the installation location of cacert.pem or its contents.
|
||||
"""
|
||||
import sys
|
||||
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
|
||||
from importlib.resources import as_file, files
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the file
|
||||
# in cases where we're inside of a zipimport situation until someone
|
||||
# actually calls where(), but we don't want to re-extract the file
|
||||
# on every call of where(), so we'll do it once then store it in a
|
||||
# global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you to
|
||||
# manage the cleanup of this file, so it doesn't actually return a
|
||||
# path, it returns a context manager that will give you the path
|
||||
# when you enter it and will do any cleanup when you leave it. In
|
||||
# the common case of not needing a temporary file, it will just
|
||||
# return the file system location and the __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
||||
|
||||
elif sys.version_info >= (3, 7):
|
||||
|
||||
from importlib.resources import path as get_path, read_text
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the
|
||||
# file in cases where we're inside of a zipimport situation until
|
||||
# someone actually calls where(), but we don't want to re-extract
|
||||
# the file on every call of where(), so we'll do it once then store
|
||||
# it in a global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you
|
||||
# to manage the cleanup of this file, so it doesn't actually
|
||||
# return a path, it returns a context manager that will give
|
||||
# you the path when you enter it and will do any cleanup when
|
||||
# you leave it. In the common case of not needing a temporary
|
||||
# file, it will just return the file system location and the
|
||||
# __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return read_text("certifi", "cacert.pem", encoding="ascii")
|
||||
|
||||
else:
|
||||
import os
|
||||
import types
|
||||
from typing import Union
|
||||
|
||||
Package = Union[types.ModuleType, str]
|
||||
Resource = Union[str, "os.PathLike"]
|
||||
|
||||
# This fallback will work for Python versions prior to 3.7 that lack the
|
||||
# importlib.resources module but relies on the existing `where` function
|
||||
# so won't address issues with environments like PyOxidizer that don't set
|
||||
# __file__ on modules.
|
||||
def read_text(
|
||||
package: Package,
|
||||
resource: Resource,
|
||||
encoding: str = 'utf-8',
|
||||
errors: str = 'strict'
|
||||
) -> str:
|
||||
with open(where(), encoding=encoding) as data:
|
||||
return data.read()
|
||||
|
||||
# If we don't have importlib.resources, then we will just do the old logic
|
||||
# of assuming we're on the filesystem and munge the path directly.
|
||||
def where() -> str:
|
||||
f = os.path.dirname(__file__)
|
||||
|
||||
return os.path.join(f, "cacert.pem")
|
||||
|
||||
def contents() -> str:
|
||||
return read_text("certifi", "cacert.pem", encoding="ascii")
|
@@ -0,0 +1 @@
|
||||
pip
|
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -0,0 +1,628 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: charset-normalizer
|
||||
Version: 3.2.0
|
||||
Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
|
||||
Home-page: https://github.com/Ousret/charset_normalizer
|
||||
Author: Ahmed TAHRI
|
||||
Author-email: ahmed.tahri@cloudnursery.dev
|
||||
License: MIT
|
||||
Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues
|
||||
Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest
|
||||
Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Text Processing :: Linguistic
|
||||
Classifier: Topic :: Utilities
|
||||
Classifier: Typing :: Typed
|
||||
Requires-Python: >=3.7.0
|
||||
Description-Content-Type: text/markdown
|
||||
License-File: LICENSE
|
||||
Provides-Extra: unicode_backport
|
||||
|
||||
<h1 align="center">Charset Detection, for Everyone 👋</h1>
|
||||
|
||||
<p align="center">
|
||||
<sup>The Real First Universal Charset Detector</sup><br>
|
||||
<a href="https://pypi.org/project/charset-normalizer">
|
||||
<img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
|
||||
</a>
|
||||
<a href="https://pepy.tech/project/charset-normalizer/">
|
||||
<img alt="Download Count Total" src="https://pepy.tech/badge/charset-normalizer/month" />
|
||||
</a>
|
||||
<a href="https://bestpractices.coreinfrastructure.org/projects/7297">
|
||||
<img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
> A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
|
||||
> I'm trying to resolve the issue by taking a new approach.
|
||||
> All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
<p align="center">
|
||||
>>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
|
||||
</p>
|
||||
|
||||
This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
|
||||
|
||||
| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
|
||||
|--------------------------------------------------|:---------------------------------------------:|:------------------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
|
||||
| `Fast` | ❌<br> | ✅<br> | ✅ <br> |
|
||||
| `Universal**` | ❌ | ✅ | ❌ |
|
||||
| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
|
||||
| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
|
||||
| `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
|
||||
| `Native Python` | ✅ | ✅ | ❌ |
|
||||
| `Detect spoken language` | ❌ | ✅ | N/A |
|
||||
| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
|
||||
| `Whl Size` | 193.6 kB | 40 kB | ~200 kB |
|
||||
| `Supported Encoding` | 33 | 🎉 [90](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
|
||||
|
||||
<p align="center">
|
||||
<img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
|
||||
|
||||
*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
|
||||
Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html)
|
||||
|
||||
## ⚡ Performance
|
||||
|
||||
This package offer better performance than its counterpart Chardet. Here are some numbers.
|
||||
|
||||
| Package | Accuracy | Mean per file (ms) | File per sec (est) |
|
||||
|-----------------------------------------------|:--------:|:------------------:|:------------------:|
|
||||
| [chardet](https://github.com/chardet/chardet) | 86 % | 200 ms | 5 file/sec |
|
||||
| charset-normalizer | **98 %** | **10 ms** | 100 file/sec |
|
||||
|
||||
| Package | 99th percentile | 95th percentile | 50th percentile |
|
||||
|-----------------------------------------------|:---------------:|:---------------:|:---------------:|
|
||||
| [chardet](https://github.com/chardet/chardet) | 1200 ms | 287 ms | 23 ms |
|
||||
| charset-normalizer | 100 ms | 50 ms | 5 ms |
|
||||
|
||||
Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
|
||||
|
||||
> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
|
||||
> And yes, these results might change at any time. The dataset can be updated to include more files.
|
||||
> The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
|
||||
> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
|
||||
> (eg. Supported Encoding) Challenge-them if you want.
|
||||
|
||||
## ✨ Installation
|
||||
|
||||
Using pip:
|
||||
|
||||
```sh
|
||||
pip install charset-normalizer -U
|
||||
```
|
||||
|
||||
## 🚀 Basic Usage
|
||||
|
||||
### CLI
|
||||
This package comes with a CLI.
|
||||
|
||||
```
|
||||
usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
|
||||
file [file ...]
|
||||
|
||||
The Real First Universal Charset Detector. Discover originating encoding used
|
||||
on text file. Normalize text to unicode.
|
||||
|
||||
positional arguments:
|
||||
files File(s) to be analysed
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-v, --verbose Display complementary information about file if any.
|
||||
Stdout will contain logs about the detection process.
|
||||
-a, --with-alternative
|
||||
Output complementary possibilities if any. Top-level
|
||||
JSON WILL be a list.
|
||||
-n, --normalize Permit to normalize input file. If not set, program
|
||||
does not write anything.
|
||||
-m, --minimal Only output the charset detected to STDOUT. Disabling
|
||||
JSON output.
|
||||
-r, --replace Replace file when trying to normalize it instead of
|
||||
creating a new one.
|
||||
-f, --force Replace file without asking if you are sure, use this
|
||||
flag with caution.
|
||||
-t THRESHOLD, --threshold THRESHOLD
|
||||
Define a custom maximum amount of chaos allowed in
|
||||
decoded content. 0. <= chaos <= 1.
|
||||
--version Show version information and exit.
|
||||
```
|
||||
|
||||
```bash
|
||||
normalizer ./data/sample.1.fr.srt
|
||||
```
|
||||
|
||||
🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
|
||||
|
||||
```json
|
||||
{
|
||||
"path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
|
||||
"encoding": "cp1252",
|
||||
"encoding_aliases": [
|
||||
"1252",
|
||||
"windows_1252"
|
||||
],
|
||||
"alternative_encodings": [
|
||||
"cp1254",
|
||||
"cp1256",
|
||||
"cp1258",
|
||||
"iso8859_14",
|
||||
"iso8859_15",
|
||||
"iso8859_16",
|
||||
"iso8859_3",
|
||||
"iso8859_9",
|
||||
"latin_1",
|
||||
"mbcs"
|
||||
],
|
||||
"language": "French",
|
||||
"alphabets": [
|
||||
"Basic Latin",
|
||||
"Latin-1 Supplement"
|
||||
],
|
||||
"has_sig_or_bom": false,
|
||||
"chaos": 0.149,
|
||||
"coherence": 97.152,
|
||||
"unicode_path": null,
|
||||
"is_preferred": true
|
||||
}
|
||||
```
|
||||
|
||||
### Python
|
||||
*Just print out normalized text*
|
||||
```python
|
||||
from charset_normalizer import from_path
|
||||
|
||||
results = from_path('./my_subtitle.srt')
|
||||
|
||||
print(str(results.best()))
|
||||
```
|
||||
|
||||
*Upgrade your code without effort*
|
||||
```python
|
||||
from charset_normalizer import detect
|
||||
```
|
||||
|
||||
The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
|
||||
|
||||
See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
|
||||
|
||||
## 😇 Why
|
||||
|
||||
When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
|
||||
reliable alternative using a completely different method. Also! I never back down on a good challenge!
|
||||
|
||||
I **don't care** about the **originating charset** encoding, because **two different tables** can
|
||||
produce **two identical rendered string.**
|
||||
What I want is to get readable text, the best I can.
|
||||
|
||||
In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
|
||||
|
||||
Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
|
||||
|
||||
## 🍰 How
|
||||
|
||||
- Discard all charset encoding table that could not fit the binary content.
|
||||
- Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
|
||||
- Extract matches with the lowest mess detected.
|
||||
- Additionally, we measure coherence / probe for a language.
|
||||
|
||||
**Wait a minute**, what is noise/mess and coherence according to **YOU ?**
|
||||
|
||||
*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
|
||||
**I established** some ground rules about **what is obvious** when **it seems like** a mess.
|
||||
I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
|
||||
improve or rewrite it.
|
||||
|
||||
*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
|
||||
that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
|
||||
|
||||
## ⚡ Known limitations
|
||||
|
||||
- Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
|
||||
- Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
|
||||
|
||||
## ⚠️ About Python EOLs
|
||||
|
||||
**If you are running:**
|
||||
|
||||
- Python >=2.7,<3.5: Unsupported
|
||||
- Python 3.5: charset-normalizer < 2.1
|
||||
- Python 3.6: charset-normalizer < 3.1
|
||||
|
||||
Upgrade your Python interpreter as soon as possible.
|
||||
|
||||
## 👤 Contributing
|
||||
|
||||
Contributions, issues and feature requests are very much welcome.<br />
|
||||
Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
|
||||
|
||||
## 📝 License
|
||||
|
||||
Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
|
||||
This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
|
||||
|
||||
Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
|
||||
|
||||
## 💼 For Enterprise
|
||||
|
||||
Professional support for charset-normalizer is available as part of the [Tidelift
|
||||
Subscription][1]. Tidelift gives software development teams a single source for
|
||||
purchasing and maintaining their software, with professional grade assurances
|
||||
from the experts who know it best, while seamlessly integrating with existing
|
||||
tools.
|
||||
|
||||
[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
|
||||
|
||||
# Changelog
|
||||
All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
|
||||
|
||||
### Changed
|
||||
- Typehint for function `from_path` no longer enforce `PathLike` as its first argument
|
||||
- Minor improvement over the global detection reliability
|
||||
|
||||
### Added
|
||||
- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
|
||||
- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
|
||||
- Explicit support for Python 3.12
|
||||
|
||||
### Fixed
|
||||
- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
|
||||
|
||||
## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
|
||||
|
||||
### Added
|
||||
- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.6 (PR #260)
|
||||
|
||||
### Changed
|
||||
- Optional speedup provided by mypy/c 1.0.1
|
||||
|
||||
## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
|
||||
|
||||
### Fixed
|
||||
- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
|
||||
|
||||
### Changed
|
||||
- Speedup provided by mypy/c 0.990 on Python >= 3.7
|
||||
|
||||
## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
|
||||
## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
|
||||
|
||||
### Added
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Removed
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
|
||||
### Fixed
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
|
||||
|
||||
### Changed
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Removed
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
|
||||
|
||||
### Deprecated
|
||||
- Function `normalize` scheduled for removal in 3.0
|
||||
|
||||
### Changed
|
||||
- Removed useless call to decode in fn is_unprintable (#206)
|
||||
|
||||
### Fixed
|
||||
- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
|
||||
|
||||
## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
|
||||
|
||||
### Added
|
||||
- Output the Unicode table version when running the CLI with `--version` (PR #194)
|
||||
|
||||
### Changed
|
||||
- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
|
||||
- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
|
||||
|
||||
### Fixed
|
||||
- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
|
||||
- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.5 (PR #192)
|
||||
|
||||
### Deprecated
|
||||
- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
|
||||
|
||||
## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
|
||||
|
||||
### Fixed
|
||||
- ASCII miss-detection on rare cases (PR #170)
|
||||
|
||||
## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
|
||||
|
||||
### Added
|
||||
- Explicit support for Python 3.11 (PR #164)
|
||||
|
||||
### Changed
|
||||
- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
|
||||
|
||||
## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
|
||||
|
||||
### Fixed
|
||||
- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
|
||||
|
||||
### Changed
|
||||
- Skipping the language-detection (CD) on ASCII (PR #155)
|
||||
|
||||
## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
|
||||
|
||||
### Changed
|
||||
- Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
|
||||
|
||||
### Fixed
|
||||
- Wrong logging level applied when setting kwarg `explain` to True (PR #146)
|
||||
|
||||
## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
|
||||
### Changed
|
||||
- Improvement over Vietnamese detection (PR #126)
|
||||
- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
|
||||
- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
|
||||
- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
|
||||
- Code style as refactored by Sourcery-AI (PR #131)
|
||||
- Minor adjustment on the MD around european words (PR #133)
|
||||
- Remove and replace SRTs from assets / tests (PR #139)
|
||||
- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
|
||||
|
||||
### Fixed
|
||||
- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
|
||||
- Avoid using too insignificant chunk (PR #137)
|
||||
|
||||
### Added
|
||||
- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
|
||||
|
||||
## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
|
||||
### Added
|
||||
- Add support for Kazakh (Cyrillic) language detection (PR #109)
|
||||
|
||||
### Changed
|
||||
- Further, improve inferring the language from a given single-byte code page (PR #112)
|
||||
- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
|
||||
- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
|
||||
- Various detection improvement (MD+CD) (PR #117)
|
||||
|
||||
### Removed
|
||||
- Remove redundant logging entry about detected language(s) (PR #115)
|
||||
|
||||
### Fixed
|
||||
- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
|
||||
|
||||
## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
|
||||
### Fixed
|
||||
- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
|
||||
- Fix CLI crash when using --minimal output in certain cases (PR #103)
|
||||
|
||||
### Changed
|
||||
- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
|
||||
|
||||
## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
|
||||
### Changed
|
||||
- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
|
||||
- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
|
||||
- The Unicode detection is slightly improved (PR #93)
|
||||
- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
|
||||
|
||||
### Removed
|
||||
- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
|
||||
|
||||
### Fixed
|
||||
- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
|
||||
- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
|
||||
- The MANIFEST.in was not exhaustive (PR #78)
|
||||
|
||||
## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
|
||||
### Fixed
|
||||
- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
|
||||
- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
|
||||
- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
|
||||
- Submatch factoring could be wrong in rare edge cases (PR #72)
|
||||
- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
|
||||
- Fix line endings from CRLF to LF for certain project files (PR #67)
|
||||
|
||||
### Changed
|
||||
- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
|
||||
- Allow fallback on specified encoding if any (PR #71)
|
||||
|
||||
## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
|
||||
### Changed
|
||||
- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
|
||||
- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
|
||||
|
||||
## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
|
||||
### Fixed
|
||||
- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
|
||||
|
||||
### Changed
|
||||
- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
|
||||
|
||||
## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
|
||||
### Fixed
|
||||
- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
|
||||
- Using explain=False permanently disable the verbose output in the current runtime (PR #47)
|
||||
- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
|
||||
- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
|
||||
|
||||
### Changed
|
||||
- Public function normalize default args values were not aligned with from_bytes (PR #53)
|
||||
|
||||
### Added
|
||||
- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
|
||||
|
||||
## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
|
||||
### Changed
|
||||
- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
|
||||
- Accent has been made on UTF-8 detection, should perform rather instantaneous.
|
||||
- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
|
||||
- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
|
||||
- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
|
||||
- utf_7 detection has been reinstated.
|
||||
|
||||
### Removed
|
||||
- This package no longer require anything when used with Python 3.5 (Dropped cached_property)
|
||||
- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
|
||||
- The exception hook on UnicodeDecodeError has been removed.
|
||||
|
||||
### Deprecated
|
||||
- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
|
||||
|
||||
### Fixed
|
||||
- The CLI output used the relative path of the file(s). Should be absolute.
|
||||
|
||||
## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
|
||||
### Fixed
|
||||
- Logger configuration/usage no longer conflict with others (PR #44)
|
||||
|
||||
## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
|
||||
### Removed
|
||||
- Using standard logging instead of using the package loguru.
|
||||
- Dropping nose test framework in favor of the maintained pytest.
|
||||
- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
|
||||
- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
|
||||
- Stop support for UTF-7 that does not contain a SIG.
|
||||
- Dropping PrettyTable, replaced with pure JSON output in CLI.
|
||||
|
||||
### Fixed
|
||||
- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
|
||||
- Not searching properly for the BOM when trying utf32/16 parent codec.
|
||||
|
||||
### Changed
|
||||
- Improving the package final size by compressing frequencies.json.
|
||||
- Huge improvement over the larges payload.
|
||||
|
||||
### Added
|
||||
- CLI now produces JSON consumable output.
|
||||
- Return ASCII if given sequences fit. Given reasonable confidence.
|
||||
|
||||
## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
|
||||
|
||||
### Fixed
|
||||
- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
|
||||
|
||||
## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
|
||||
|
||||
## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
|
||||
|
||||
## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
|
||||
|
||||
### Changed
|
||||
- Amend the previous release to allow prettytable 2.0 (PR #35)
|
||||
|
||||
## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
|
||||
|
||||
### Fixed
|
||||
- Fix error while using the package with a python pre-release interpreter (PR #33)
|
||||
|
||||
### Changed
|
||||
- Dependencies refactoring, constraints revised.
|
||||
|
||||
### Added
|
||||
- Add python 3.9 and 3.10 to the supported interpreters
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -0,0 +1,33 @@
|
||||
../../../bin/normalizer,sha256=sAlg_Asa5t12YXHUzxhlqemzKLkYUqg_bYTzWenW7hY,269
|
||||
charset_normalizer-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
charset_normalizer-3.2.0.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070
|
||||
charset_normalizer-3.2.0.dist-info/METADATA,sha256=K2QHhX9fQ7jFxO7y4IQk7TqYZSH7iTyxgTJQxA65EH0,31284
|
||||
charset_normalizer-3.2.0.dist-info/RECORD,,
|
||||
charset_normalizer-3.2.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
||||
charset_normalizer-3.2.0.dist-info/entry_points.txt,sha256=uYo8aIGLWv8YgWfSna5HnfY_En4pkF1w4bgawNAXzP0,76
|
||||
charset_normalizer-3.2.0.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
|
||||
charset_normalizer/__init__.py,sha256=UzI3xC8PhmcLRMzSgPb6minTmRq0kWznnCBJ8ZCc2XI,1577
|
||||
charset_normalizer/__pycache__/__init__.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/api.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/cd.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/constant.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/legacy.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/md.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/models.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/utils.cpython-311.pyc,,
|
||||
charset_normalizer/__pycache__/version.cpython-311.pyc,,
|
||||
charset_normalizer/api.py,sha256=WOlWjy6wT8SeMYFpaGbXZFN1TMXa-s8vZYfkL4G29iQ,21097
|
||||
charset_normalizer/assets/__init__.py,sha256=wpRfujN7GJuEE5wHHo3wEDVoJ5ovzRIxsImyimCBfGU,20069
|
||||
charset_normalizer/assets/__pycache__/__init__.cpython-311.pyc,,
|
||||
charset_normalizer/cd.py,sha256=mZuiTSKq4XpweSDD2H4T4R3Axtaa-QS0tpEWdpMuAzQ,12554
|
||||
charset_normalizer/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
charset_normalizer/cli/__pycache__/__init__.cpython-311.pyc,,
|
||||
charset_normalizer/cli/__pycache__/normalizer.cpython-311.pyc,,
|
||||
charset_normalizer/cli/normalizer.py,sha256=2F-xURZJzo063Ye-2RLJ2wcmURpbKeAzKwpiws65dAs,9744
|
||||
charset_normalizer/constant.py,sha256=PmCeoKXqq3ZbCtCUpKHwwFBIv9DXMT_an1yd24q28mA,19101
|
||||
charset_normalizer/legacy.py,sha256=T-QuVMsMeDiQEk8WSszMrzVJg_14AMeSkmHdRYhdl1k,2071
|
||||
charset_normalizer/md.py,sha256=gEWM354DqBsiSoNkKzFrIW4KRFQjQLbqYnbHAdBwj74,18682
|
||||
charset_normalizer/models.py,sha256=mC11wo84l00u2o03TRNX7M5ItBAbPUKKXgJSFxA35GY,11492
|
||||
charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
charset_normalizer/utils.py,sha256=HdwmBy9vRqcRVGHKZqYWtpiS5wA35cLjnlVQCm0Bq9s,11578
|
||||
charset_normalizer/version.py,sha256=LbH8odlzMnwR4xZF9wCsnGXQA19axDO7HZ-J9hegIX0,79
|
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.40.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@@ -0,0 +1,2 @@
|
||||
[console_scripts]
|
||||
normalizer = charset_normalizer.cli.normalizer:cli_detect
|
@@ -0,0 +1 @@
|
||||
charset_normalizer
|
@@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Charset-Normalizer
|
||||
~~~~~~~~~~~~~~
|
||||
The Real First Universal Charset Detector.
|
||||
A library that helps you read text from an unknown charset encoding.
|
||||
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
||||
All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
Basic usage:
|
||||
>>> from charset_normalizer import from_bytes
|
||||
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
||||
>>> best_guess = results.best()
|
||||
>>> str(best_guess)
|
||||
'Bсеки човек има право на образование. Oбразованието!'
|
||||
|
||||
Others methods and usages are available - see the full documentation
|
||||
at <https://github.com/Ousret/charset_normalizer>.
|
||||
:copyright: (c) 2021 by Ahmed TAHRI
|
||||
:license: MIT, see LICENSE for more details.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from .api import from_bytes, from_fp, from_path, is_binary
|
||||
from .legacy import detect
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import set_logging_handler
|
||||
from .version import VERSION, __version__
|
||||
|
||||
__all__ = (
|
||||
"from_fp",
|
||||
"from_path",
|
||||
"from_bytes",
|
||||
"is_binary",
|
||||
"detect",
|
||||
"CharsetMatch",
|
||||
"CharsetMatches",
|
||||
"__version__",
|
||||
"VERSION",
|
||||
"set_logging_handler",
|
||||
)
|
||||
|
||||
# Attach a NullHandler to the top level logger by default
|
||||
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
||||
|
||||
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
626
teil20/lib/python3.11/site-packages/charset_normalizer/api.py
Normal file
626
teil20/lib/python3.11/site-packages/charset_normalizer/api.py
Normal file
@@ -0,0 +1,626 @@
|
||||
import logging
|
||||
from os import PathLike
|
||||
from typing import BinaryIO, List, Optional, Set, Union
|
||||
|
||||
from .cd import (
|
||||
coherence_ratio,
|
||||
encoding_languages,
|
||||
mb_encoding_languages,
|
||||
merge_coherence_ratios,
|
||||
)
|
||||
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
|
||||
from .md import mess_ratio
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import (
|
||||
any_specified_encoding,
|
||||
cut_sequence_chunks,
|
||||
iana_name,
|
||||
identify_sig_or_bom,
|
||||
is_cp_similar,
|
||||
is_multi_byte_encoding,
|
||||
should_strip_sig_or_bom,
|
||||
)
|
||||
|
||||
# Will most likely be controversial
|
||||
# logging.addLevelName(TRACE, "TRACE")
|
||||
logger = logging.getLogger("charset_normalizer")
|
||||
explain_handler = logging.StreamHandler()
|
||||
explain_handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
|
||||
)
|
||||
|
||||
|
||||
def from_bytes(
|
||||
sequences: Union[bytes, bytearray],
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.2,
|
||||
cp_isolation: Optional[List[str]] = None,
|
||||
cp_exclusion: Optional[List[str]] = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
|
||||
If there is no results, it is a strong indicator that the source is binary/not text.
|
||||
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
|
||||
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
|
||||
|
||||
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
|
||||
but never take it for granted. Can improve the performance.
|
||||
|
||||
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
|
||||
purpose.
|
||||
|
||||
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
|
||||
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
|
||||
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
|
||||
Custom logging format and handler can be set manually.
|
||||
"""
|
||||
|
||||
if not isinstance(sequences, (bytearray, bytes)):
|
||||
raise TypeError(
|
||||
"Expected object of type bytes or bytearray, got: {0}".format(
|
||||
type(sequences)
|
||||
)
|
||||
)
|
||||
|
||||
if explain:
|
||||
previous_logger_level: int = logger.level
|
||||
logger.addHandler(explain_handler)
|
||||
logger.setLevel(TRACE)
|
||||
|
||||
length: int = len(sequences)
|
||||
|
||||
if length == 0:
|
||||
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level or logging.WARNING)
|
||||
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
|
||||
|
||||
if cp_isolation is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_isolation is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding allowed : %s.",
|
||||
", ".join(cp_isolation),
|
||||
)
|
||||
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
|
||||
else:
|
||||
cp_isolation = []
|
||||
|
||||
if cp_exclusion is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_exclusion is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding excluded : %s.",
|
||||
", ".join(cp_exclusion),
|
||||
)
|
||||
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
|
||||
else:
|
||||
cp_exclusion = []
|
||||
|
||||
if length <= (chunk_size * steps):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
|
||||
steps,
|
||||
chunk_size,
|
||||
length,
|
||||
)
|
||||
steps = 1
|
||||
chunk_size = length
|
||||
|
||||
if steps > 1 and length / steps < chunk_size:
|
||||
chunk_size = int(length / steps)
|
||||
|
||||
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
|
||||
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
|
||||
|
||||
if is_too_small_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
elif is_too_large_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
|
||||
prioritized_encodings: List[str] = []
|
||||
|
||||
specified_encoding: Optional[str] = (
|
||||
any_specified_encoding(sequences) if preemptive_behaviour else None
|
||||
)
|
||||
|
||||
if specified_encoding is not None:
|
||||
prioritized_encodings.append(specified_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected declarative mark in sequence. Priority +1 given for %s.",
|
||||
specified_encoding,
|
||||
)
|
||||
|
||||
tested: Set[str] = set()
|
||||
tested_but_hard_failure: List[str] = []
|
||||
tested_but_soft_failure: List[str] = []
|
||||
|
||||
fallback_ascii: Optional[CharsetMatch] = None
|
||||
fallback_u8: Optional[CharsetMatch] = None
|
||||
fallback_specified: Optional[CharsetMatch] = None
|
||||
|
||||
results: CharsetMatches = CharsetMatches()
|
||||
|
||||
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
|
||||
|
||||
if sig_encoding is not None:
|
||||
prioritized_encodings.append(sig_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
|
||||
len(sig_payload),
|
||||
sig_encoding,
|
||||
)
|
||||
|
||||
prioritized_encodings.append("ascii")
|
||||
|
||||
if "utf_8" not in prioritized_encodings:
|
||||
prioritized_encodings.append("utf_8")
|
||||
|
||||
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
|
||||
if cp_isolation and encoding_iana not in cp_isolation:
|
||||
continue
|
||||
|
||||
if cp_exclusion and encoding_iana in cp_exclusion:
|
||||
continue
|
||||
|
||||
if encoding_iana in tested:
|
||||
continue
|
||||
|
||||
tested.add(encoding_iana)
|
||||
|
||||
decoded_payload: Optional[str] = None
|
||||
bom_or_sig_available: bool = sig_encoding == encoding_iana
|
||||
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
|
||||
encoding_iana
|
||||
)
|
||||
|
||||
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
if encoding_iana in {"utf_7"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s does not provide an IncrementalDecoder",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
if is_too_large_sequence and is_multi_byte_decoder is False:
|
||||
str(
|
||||
sequences[: int(50e4)]
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) : int(50e4)],
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
else:
|
||||
decoded_payload = str(
|
||||
sequences
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) :],
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
except (UnicodeDecodeError, LookupError) as e:
|
||||
if not isinstance(e, LookupError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
similar_soft_failure_test: bool = False
|
||||
|
||||
for encoding_soft_failed in tested_but_soft_failure:
|
||||
if is_cp_similar(encoding_iana, encoding_soft_failed):
|
||||
similar_soft_failure_test = True
|
||||
break
|
||||
|
||||
if similar_soft_failure_test:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
|
||||
encoding_iana,
|
||||
encoding_soft_failed,
|
||||
)
|
||||
continue
|
||||
|
||||
r_ = range(
|
||||
0 if not bom_or_sig_available else len(sig_payload),
|
||||
length,
|
||||
int(length / steps),
|
||||
)
|
||||
|
||||
multi_byte_bonus: bool = (
|
||||
is_multi_byte_decoder
|
||||
and decoded_payload is not None
|
||||
and len(decoded_payload) < length
|
||||
)
|
||||
|
||||
if multi_byte_bonus:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s is a multi byte encoding table and it appear that at least one character "
|
||||
"was encoded using n-bytes.",
|
||||
encoding_iana,
|
||||
)
|
||||
|
||||
max_chunk_gave_up: int = int(len(r_) / 4)
|
||||
|
||||
max_chunk_gave_up = max(max_chunk_gave_up, 2)
|
||||
early_stop_count: int = 0
|
||||
lazy_str_hard_failure = False
|
||||
|
||||
md_chunks: List[str] = []
|
||||
md_ratios = []
|
||||
|
||||
try:
|
||||
for chunk in cut_sequence_chunks(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
r_,
|
||||
chunk_size,
|
||||
bom_or_sig_available,
|
||||
strip_sig_or_bom,
|
||||
sig_payload,
|
||||
is_multi_byte_decoder,
|
||||
decoded_payload,
|
||||
):
|
||||
md_chunks.append(chunk)
|
||||
|
||||
md_ratios.append(
|
||||
mess_ratio(
|
||||
chunk,
|
||||
threshold,
|
||||
explain is True and 1 <= len(cp_isolation) <= 2,
|
||||
)
|
||||
)
|
||||
|
||||
if md_ratios[-1] >= threshold:
|
||||
early_stop_count += 1
|
||||
|
||||
if (early_stop_count >= max_chunk_gave_up) or (
|
||||
bom_or_sig_available and strip_sig_or_bom is False
|
||||
):
|
||||
break
|
||||
except (
|
||||
UnicodeDecodeError
|
||||
) as e: # Lazy str loading may have missed something there
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
early_stop_count = max_chunk_gave_up
|
||||
lazy_str_hard_failure = True
|
||||
|
||||
# We might want to check the sequence again with the whole content
|
||||
# Only if initial MD tests passes
|
||||
if (
|
||||
not lazy_str_hard_failure
|
||||
and is_too_large_sequence
|
||||
and not is_multi_byte_decoder
|
||||
):
|
||||
try:
|
||||
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
|
||||
except UnicodeDecodeError as e:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
|
||||
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
|
||||
tested_but_soft_failure.append(encoding_iana)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
|
||||
"Computed mean chaos is %f %%.",
|
||||
encoding_iana,
|
||||
early_stop_count,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
# Preparing those fallbacks in case we got nothing.
|
||||
if (
|
||||
enable_fallback
|
||||
and encoding_iana in ["ascii", "utf_8", specified_encoding]
|
||||
and not lazy_str_hard_failure
|
||||
):
|
||||
fallback_entry = CharsetMatch(
|
||||
sequences, encoding_iana, threshold, False, [], decoded_payload
|
||||
)
|
||||
if encoding_iana == specified_encoding:
|
||||
fallback_specified = fallback_entry
|
||||
elif encoding_iana == "ascii":
|
||||
fallback_ascii = fallback_entry
|
||||
else:
|
||||
fallback_u8 = fallback_entry
|
||||
continue
|
||||
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s passed initial chaos probing. Mean measured chaos is %f %%",
|
||||
encoding_iana,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
|
||||
if not is_multi_byte_decoder:
|
||||
target_languages: List[str] = encoding_languages(encoding_iana)
|
||||
else:
|
||||
target_languages = mb_encoding_languages(encoding_iana)
|
||||
|
||||
if target_languages:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"{} should target any language(s) of {}".format(
|
||||
encoding_iana, str(target_languages)
|
||||
),
|
||||
)
|
||||
|
||||
cd_ratios = []
|
||||
|
||||
# We shall skip the CD when its about ASCII
|
||||
# Most of the time its not relevant to run "language-detection" on it.
|
||||
if encoding_iana != "ascii":
|
||||
for chunk in md_chunks:
|
||||
chunk_languages = coherence_ratio(
|
||||
chunk,
|
||||
language_threshold,
|
||||
",".join(target_languages) if target_languages else None,
|
||||
)
|
||||
|
||||
cd_ratios.append(chunk_languages)
|
||||
|
||||
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
|
||||
|
||||
if cd_ratios_merged:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"We detected language {} using {}".format(
|
||||
cd_ratios_merged, encoding_iana
|
||||
),
|
||||
)
|
||||
|
||||
results.append(
|
||||
CharsetMatch(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
mean_mess_ratio,
|
||||
bom_or_sig_available,
|
||||
cd_ratios_merged,
|
||||
decoded_payload,
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||
and mean_mess_ratio < 0.1
|
||||
):
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one.", encoding_iana
|
||||
)
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([results[encoding_iana]])
|
||||
|
||||
if encoding_iana == sig_encoding:
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
|
||||
"the beginning of the sequence.",
|
||||
encoding_iana,
|
||||
)
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([results[encoding_iana]])
|
||||
|
||||
if len(results) == 0:
|
||||
if fallback_u8 or fallback_ascii or fallback_specified:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
|
||||
)
|
||||
|
||||
if fallback_specified:
|
||||
logger.debug(
|
||||
"Encoding detection: %s will be used as a fallback match",
|
||||
fallback_specified.encoding,
|
||||
)
|
||||
results.append(fallback_specified)
|
||||
elif (
|
||||
(fallback_u8 and fallback_ascii is None)
|
||||
or (
|
||||
fallback_u8
|
||||
and fallback_ascii
|
||||
and fallback_u8.fingerprint != fallback_ascii.fingerprint
|
||||
)
|
||||
or (fallback_u8 is not None)
|
||||
):
|
||||
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
|
||||
results.append(fallback_u8)
|
||||
elif fallback_ascii:
|
||||
logger.debug("Encoding detection: ascii will be used as a fallback match")
|
||||
results.append(fallback_ascii)
|
||||
|
||||
if results:
|
||||
logger.debug(
|
||||
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
|
||||
results.best().encoding, # type: ignore
|
||||
len(results) - 1,
|
||||
)
|
||||
else:
|
||||
logger.debug("Encoding detection: Unable to determine any suitable charset.")
|
||||
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def from_fp(
|
||||
fp: BinaryIO,
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: Optional[List[str]] = None,
|
||||
cp_exclusion: Optional[List[str]] = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but using a file pointer that is already ready.
|
||||
Will not close the file pointer.
|
||||
"""
|
||||
return from_bytes(
|
||||
fp.read(),
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def from_path(
|
||||
path: Union[str, bytes, PathLike], # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: Optional[List[str]] = None,
|
||||
cp_exclusion: Optional[List[str]] = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
|
||||
Can raise IOError.
|
||||
"""
|
||||
with open(path, "rb") as fp:
|
||||
return from_fp(
|
||||
fp,
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def is_binary(
|
||||
fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: Optional[List[str]] = None,
|
||||
cp_exclusion: Optional[List[str]] = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
|
||||
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
|
||||
are disabled to be stricter around ASCII-compatible but unlikely to be a string.
|
||||
"""
|
||||
if isinstance(fp_or_path_or_payload, (str, PathLike)):
|
||||
guesses = from_path(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
elif isinstance(
|
||||
fp_or_path_or_payload,
|
||||
(
|
||||
bytes,
|
||||
bytearray,
|
||||
),
|
||||
):
|
||||
guesses = from_bytes(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
else:
|
||||
guesses = from_fp(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
|
||||
return not guesses
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
390
teil20/lib/python3.11/site-packages/charset_normalizer/cd.py
Normal file
390
teil20/lib/python3.11/site-packages/charset_normalizer/cd.py
Normal file
@@ -0,0 +1,390 @@
|
||||
import importlib
|
||||
from codecs import IncrementalDecoder
|
||||
from collections import Counter
|
||||
from functools import lru_cache
|
||||
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
|
||||
|
||||
from .assets import FREQUENCIES
|
||||
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
|
||||
from .md import is_suspiciously_successive_range
|
||||
from .models import CoherenceMatches
|
||||
from .utils import (
|
||||
is_accentuated,
|
||||
is_latin,
|
||||
is_multi_byte_encoding,
|
||||
is_unicode_range_secondary,
|
||||
unicode_range,
|
||||
)
|
||||
|
||||
|
||||
def encoding_unicode_range(iana_name: str) -> List[str]:
|
||||
"""
|
||||
Return associated unicode ranges in a single byte code page.
|
||||
"""
|
||||
if is_multi_byte_encoding(iana_name):
|
||||
raise IOError("Function not supported on multi-byte code page")
|
||||
|
||||
decoder = importlib.import_module(
|
||||
"encodings.{}".format(iana_name)
|
||||
).IncrementalDecoder
|
||||
|
||||
p: IncrementalDecoder = decoder(errors="ignore")
|
||||
seen_ranges: Dict[str, int] = {}
|
||||
character_count: int = 0
|
||||
|
||||
for i in range(0x40, 0xFF):
|
||||
chunk: str = p.decode(bytes([i]))
|
||||
|
||||
if chunk:
|
||||
character_range: Optional[str] = unicode_range(chunk)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
if is_unicode_range_secondary(character_range) is False:
|
||||
if character_range not in seen_ranges:
|
||||
seen_ranges[character_range] = 0
|
||||
seen_ranges[character_range] += 1
|
||||
character_count += 1
|
||||
|
||||
return sorted(
|
||||
[
|
||||
character_range
|
||||
for character_range in seen_ranges
|
||||
if seen_ranges[character_range] / character_count >= 0.15
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def unicode_range_languages(primary_range: str) -> List[str]:
|
||||
"""
|
||||
Return inferred languages used with a unicode range.
|
||||
"""
|
||||
languages: List[str] = []
|
||||
|
||||
for language, characters in FREQUENCIES.items():
|
||||
for character in characters:
|
||||
if unicode_range(character) == primary_range:
|
||||
languages.append(language)
|
||||
break
|
||||
|
||||
return languages
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def encoding_languages(iana_name: str) -> List[str]:
|
||||
"""
|
||||
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
|
||||
primary_range: Optional[str] = None
|
||||
|
||||
for specified_range in unicode_ranges:
|
||||
if "Latin" not in specified_range:
|
||||
primary_range = specified_range
|
||||
break
|
||||
|
||||
if primary_range is None:
|
||||
return ["Latin Based"]
|
||||
|
||||
return unicode_range_languages(primary_range)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def mb_encoding_languages(iana_name: str) -> List[str]:
|
||||
"""
|
||||
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
if (
|
||||
iana_name.startswith("shift_")
|
||||
or iana_name.startswith("iso2022_jp")
|
||||
or iana_name.startswith("euc_j")
|
||||
or iana_name == "cp932"
|
||||
):
|
||||
return ["Japanese"]
|
||||
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
|
||||
return ["Chinese"]
|
||||
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
|
||||
return ["Korean"]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
|
||||
def get_target_features(language: str) -> Tuple[bool, bool]:
|
||||
"""
|
||||
Determine main aspects from a supported language if it contains accents and if is pure Latin.
|
||||
"""
|
||||
target_have_accents: bool = False
|
||||
target_pure_latin: bool = True
|
||||
|
||||
for character in FREQUENCIES[language]:
|
||||
if not target_have_accents and is_accentuated(character):
|
||||
target_have_accents = True
|
||||
if target_pure_latin and is_latin(character) is False:
|
||||
target_pure_latin = False
|
||||
|
||||
return target_have_accents, target_pure_latin
|
||||
|
||||
|
||||
def alphabet_languages(
|
||||
characters: List[str], ignore_non_latin: bool = False
|
||||
) -> List[str]:
|
||||
"""
|
||||
Return associated languages associated to given characters.
|
||||
"""
|
||||
languages: List[Tuple[str, float]] = []
|
||||
|
||||
source_have_accents = any(is_accentuated(character) for character in characters)
|
||||
|
||||
for language, language_characters in FREQUENCIES.items():
|
||||
target_have_accents, target_pure_latin = get_target_features(language)
|
||||
|
||||
if ignore_non_latin and target_pure_latin is False:
|
||||
continue
|
||||
|
||||
if target_have_accents is False and source_have_accents:
|
||||
continue
|
||||
|
||||
character_count: int = len(language_characters)
|
||||
|
||||
character_match_count: int = len(
|
||||
[c for c in language_characters if c in characters]
|
||||
)
|
||||
|
||||
ratio: float = character_match_count / character_count
|
||||
|
||||
if ratio >= 0.2:
|
||||
languages.append((language, ratio))
|
||||
|
||||
languages = sorted(languages, key=lambda x: x[1], reverse=True)
|
||||
|
||||
return [compatible_language[0] for compatible_language in languages]
|
||||
|
||||
|
||||
def characters_popularity_compare(
|
||||
language: str, ordered_characters: List[str]
|
||||
) -> float:
|
||||
"""
|
||||
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
|
||||
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
|
||||
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
|
||||
"""
|
||||
if language not in FREQUENCIES:
|
||||
raise ValueError("{} not available".format(language))
|
||||
|
||||
character_approved_count: int = 0
|
||||
FREQUENCIES_language_set = set(FREQUENCIES[language])
|
||||
|
||||
ordered_characters_count: int = len(ordered_characters)
|
||||
target_language_characters_count: int = len(FREQUENCIES[language])
|
||||
|
||||
large_alphabet: bool = target_language_characters_count > 26
|
||||
|
||||
for character, character_rank in zip(
|
||||
ordered_characters, range(0, ordered_characters_count)
|
||||
):
|
||||
if character not in FREQUENCIES_language_set:
|
||||
continue
|
||||
|
||||
character_rank_in_language: int = FREQUENCIES[language].index(character)
|
||||
expected_projection_ratio: float = (
|
||||
target_language_characters_count / ordered_characters_count
|
||||
)
|
||||
character_rank_projection: int = int(character_rank * expected_projection_ratio)
|
||||
|
||||
if (
|
||||
large_alphabet is False
|
||||
and abs(character_rank_projection - character_rank_in_language) > 4
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
large_alphabet is True
|
||||
and abs(character_rank_projection - character_rank_in_language)
|
||||
< target_language_characters_count / 3
|
||||
):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
characters_before_source: List[str] = FREQUENCIES[language][
|
||||
0:character_rank_in_language
|
||||
]
|
||||
characters_after_source: List[str] = FREQUENCIES[language][
|
||||
character_rank_in_language:
|
||||
]
|
||||
characters_before: List[str] = ordered_characters[0:character_rank]
|
||||
characters_after: List[str] = ordered_characters[character_rank:]
|
||||
|
||||
before_match_count: int = len(
|
||||
set(characters_before) & set(characters_before_source)
|
||||
)
|
||||
|
||||
after_match_count: int = len(
|
||||
set(characters_after) & set(characters_after_source)
|
||||
)
|
||||
|
||||
if len(characters_before_source) == 0 and before_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if len(characters_after_source) == 0 and after_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if (
|
||||
before_match_count / len(characters_before_source) >= 0.4
|
||||
or after_match_count / len(characters_after_source) >= 0.4
|
||||
):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
return character_approved_count / len(ordered_characters)
|
||||
|
||||
|
||||
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
|
||||
"""
|
||||
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
|
||||
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
|
||||
One containing the latin letters and the other hebrew.
|
||||
"""
|
||||
layers: Dict[str, str] = {}
|
||||
|
||||
for character in decoded_sequence:
|
||||
if character.isalpha() is False:
|
||||
continue
|
||||
|
||||
character_range: Optional[str] = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
layer_target_range: Optional[str] = None
|
||||
|
||||
for discovered_range in layers:
|
||||
if (
|
||||
is_suspiciously_successive_range(discovered_range, character_range)
|
||||
is False
|
||||
):
|
||||
layer_target_range = discovered_range
|
||||
break
|
||||
|
||||
if layer_target_range is None:
|
||||
layer_target_range = character_range
|
||||
|
||||
if layer_target_range not in layers:
|
||||
layers[layer_target_range] = character.lower()
|
||||
continue
|
||||
|
||||
layers[layer_target_range] += character.lower()
|
||||
|
||||
return list(layers.values())
|
||||
|
||||
|
||||
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
|
||||
"""
|
||||
This function merge results previously given by the function coherence_ratio.
|
||||
The return type is the same as coherence_ratio.
|
||||
"""
|
||||
per_language_ratios: Dict[str, List[float]] = {}
|
||||
for result in results:
|
||||
for sub_result in result:
|
||||
language, ratio = sub_result
|
||||
if language not in per_language_ratios:
|
||||
per_language_ratios[language] = [ratio]
|
||||
continue
|
||||
per_language_ratios[language].append(ratio)
|
||||
|
||||
merge = [
|
||||
(
|
||||
language,
|
||||
round(
|
||||
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
|
||||
4,
|
||||
),
|
||||
)
|
||||
for language in per_language_ratios
|
||||
]
|
||||
|
||||
return sorted(merge, key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
|
||||
"""
|
||||
We shall NOT return "English—" in CoherenceMatches because it is an alternative
|
||||
of "English". This function only keeps the best match and remove the em-dash in it.
|
||||
"""
|
||||
index_results: Dict[str, List[float]] = dict()
|
||||
|
||||
for result in results:
|
||||
language, ratio = result
|
||||
no_em_name: str = language.replace("—", "")
|
||||
|
||||
if no_em_name not in index_results:
|
||||
index_results[no_em_name] = []
|
||||
|
||||
index_results[no_em_name].append(ratio)
|
||||
|
||||
if any(len(index_results[e]) > 1 for e in index_results):
|
||||
filtered_results: CoherenceMatches = []
|
||||
|
||||
for language in index_results:
|
||||
filtered_results.append((language, max(index_results[language])))
|
||||
|
||||
return filtered_results
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@lru_cache(maxsize=2048)
|
||||
def coherence_ratio(
|
||||
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
|
||||
) -> CoherenceMatches:
|
||||
"""
|
||||
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
|
||||
A layer = Character extraction by alphabets/ranges.
|
||||
"""
|
||||
|
||||
results: List[Tuple[str, float]] = []
|
||||
ignore_non_latin: bool = False
|
||||
|
||||
sufficient_match_count: int = 0
|
||||
|
||||
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
|
||||
if "Latin Based" in lg_inclusion_list:
|
||||
ignore_non_latin = True
|
||||
lg_inclusion_list.remove("Latin Based")
|
||||
|
||||
for layer in alpha_unicode_split(decoded_sequence):
|
||||
sequence_frequencies: TypeCounter[str] = Counter(layer)
|
||||
most_common = sequence_frequencies.most_common()
|
||||
|
||||
character_count: int = sum(o for c, o in most_common)
|
||||
|
||||
if character_count <= TOO_SMALL_SEQUENCE:
|
||||
continue
|
||||
|
||||
popular_character_ordered: List[str] = [c for c, o in most_common]
|
||||
|
||||
for language in lg_inclusion_list or alphabet_languages(
|
||||
popular_character_ordered, ignore_non_latin
|
||||
):
|
||||
ratio: float = characters_popularity_compare(
|
||||
language, popular_character_ordered
|
||||
)
|
||||
|
||||
if ratio < threshold:
|
||||
continue
|
||||
elif ratio >= 0.8:
|
||||
sufficient_match_count += 1
|
||||
|
||||
results.append((language, round(ratio, 4)))
|
||||
|
||||
if sufficient_match_count >= 3:
|
||||
break
|
||||
|
||||
return sorted(
|
||||
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user