Compare commits

..

1 Commits
v1.5.2 ... vcr

Author SHA1 Message Date
ed
ff8313d0fb add mistake 2021-07-01 21:49:44 +02:00
209 changed files with 8137 additions and 40110 deletions

View File

@@ -1,40 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: '9001'
---
NOTE:
all of the below are optional, consider them as inspiration, delete and rewrite at will, thx md
**Describe the bug**
a description of what the bug is
**To Reproduce**
List of steps to reproduce the issue, or, if it's hard to reproduce, then at least a detailed explanation of what you did to run into it
**Expected behavior**
a description of what you expected to happen
**Screenshots**
if applicable, add screenshots to help explain your problem, such as the kickass crashpage :^)
**Server details**
if the issue is possibly on the server-side, then mention some of the following:
* server OS / version:
* python version:
* copyparty arguments:
* filesystem (`lsblk -f` on linux):
**Client details**
if the issue is possibly on the client-side, then mention some of the following:
* the device type and model:
* OS version:
* browser version:
**Additional context**
any other context about the problem here

View File

@@ -1,22 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: '9001'
---
all of the below are optional, consider them as inspiration, delete and rewrite at will
**is your feature request related to a problem? Please describe.**
a description of what the problem is, for example, `I'm always frustrated when [...]` or `Why is it not possible to [...]`
**Describe the idea / solution you'd like**
a description of what you want to happen
**Describe any alternatives you've considered**
a description of any alternative solutions or features you've considered
**Additional context**
add any other context or screenshots about the feature request here

View File

@@ -1,10 +0,0 @@
---
name: Something else
about: "┐(゚∀゚)┌"
title: ''
labels: ''
assignees: ''
---

View File

@@ -1,7 +0,0 @@
modernize your local checkout of the repo like so,
```sh
git branch -m master hovudstraum
git fetch origin
git branch -u origin/hovudstraum hovudstraum
git remote set-head origin -a
```

17
.gitignore vendored
View File

@@ -5,16 +5,12 @@ __pycache__/
MANIFEST.in MANIFEST.in
MANIFEST MANIFEST
copyparty.egg-info/ copyparty.egg-info/
buildenv/
build/
dist/
sfx/
.venv/ .venv/
/buildenv/
/build/
/dist/
/py2/
/sfx*
/unt/
/log/
# ide # ide
*.sublime-workspace *.sublime-workspace
@@ -22,10 +18,5 @@ copyparty.egg-info/
*.bak *.bak
# derived # derived
copyparty/res/COPYING.txt
copyparty/web/deps/ copyparty/web/deps/
srv/ srv/
# state/logs
up.*.txt
.hist/

2
.vscode/launch.json vendored
View File

@@ -17,7 +17,7 @@
"-mtp", "-mtp",
".bpm=f,bin/mtag/audio-bpm.py", ".bpm=f,bin/mtag/audio-bpm.py",
"-aed:wark", "-aed:wark",
"-vsrv::r:rw,ed:c,dupe", "-vsrv::r:aed:cnodupe",
"-vdist:dist:r" "-vdist:dist:r"
] ]
}, },

8
.vscode/launch.py vendored Executable file → Normal file
View File

@@ -1,5 +1,3 @@
#!/usr/bin/env python3
# takes arguments from launch.json # takes arguments from launch.json
# is used by no_dbg in tasks.json # is used by no_dbg in tasks.json
# launches 10x faster than mspython debugpy # launches 10x faster than mspython debugpy
@@ -11,15 +9,15 @@ import sys
print(sys.executable) print(sys.executable)
import json5
import shlex import shlex
import jstyleson
import subprocess as sp import subprocess as sp
with open(".vscode/launch.json", "r", encoding="utf-8") as f: with open(".vscode/launch.json", "r", encoding="utf-8") as f:
tj = f.read() tj = f.read()
oj = json5.loads(tj) oj = jstyleson.loads(tj)
argv = oj["configurations"][0]["args"] argv = oj["configurations"][0]["args"]
try: try:
@@ -30,8 +28,6 @@ except:
argv = [os.path.expanduser(x) if x.startswith("~") else x for x in argv] argv = [os.path.expanduser(x) if x.startswith("~") else x for x in argv]
argv += sys.argv[1:]
if re.search(" -j ?[0-9]", " ".join(argv)): if re.search(" -j ?[0-9]", " ".join(argv)):
argv = [sys.executable, "-m", "copyparty"] + argv argv = [sys.executable, "-m", "copyparty"] + argv
sp.check_call(argv) sp.check_call(argv)

26
.vscode/settings.json vendored
View File

@@ -23,6 +23,7 @@
"terminal.ansiBrightWhite": "#ffffff", "terminal.ansiBrightWhite": "#ffffff",
}, },
"python.testing.pytestEnabled": false, "python.testing.pytestEnabled": false,
"python.testing.nosetestsEnabled": false,
"python.testing.unittestEnabled": true, "python.testing.unittestEnabled": true,
"python.testing.unittestArgs": [ "python.testing.unittestArgs": [
"-v", "-v",
@@ -34,40 +35,18 @@
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.flake8Enabled": true, "python.linting.flake8Enabled": true,
"python.linting.banditEnabled": true, "python.linting.banditEnabled": true,
"python.linting.mypyEnabled": true,
"python.linting.mypyArgs": [
"--ignore-missing-imports",
"--follow-imports=silent",
"--show-column-numbers",
"--strict"
],
"python.linting.flake8Args": [ "python.linting.flake8Args": [
"--max-line-length=120", "--max-line-length=120",
"--ignore=E722,F405,E203,W503,W293,E402,E501,E128", "--ignore=E722,F405,E203,W503,W293,E402",
], ],
"python.linting.banditArgs": [ "python.linting.banditArgs": [
"--ignore=B104" "--ignore=B104"
], ],
"python.linting.pylintArgs": [
"--disable=missing-module-docstring",
"--disable=missing-class-docstring",
"--disable=missing-function-docstring",
"--disable=wrong-import-position",
"--disable=raise-missing-from",
"--disable=bare-except",
"--disable=invalid-name",
"--disable=line-too-long",
"--disable=consider-using-f-string"
],
// python3 -m isort --py=27 --profile=black copyparty/
"python.formatting.provider": "black", "python.formatting.provider": "black",
"editor.formatOnSave": true, "editor.formatOnSave": true,
"[html]": { "[html]": {
"editor.formatOnSave": false, "editor.formatOnSave": false,
}, },
"[css]": {
"editor.formatOnSave": false,
},
"files.associations": { "files.associations": {
"*.makefile": "makefile" "*.makefile": "makefile"
}, },
@@ -76,5 +55,4 @@
"py27" "py27"
], ],
"python.linting.enabled": true, "python.linting.enabled": true,
"python.pythonPath": "/usr/bin/python3"
} }

5
.vscode/tasks.json vendored
View File

@@ -9,10 +9,7 @@
{ {
"label": "no_dbg", "label": "no_dbg",
"type": "shell", "type": "shell",
"command": "${config:python.pythonPath}", "command": "${config:python.pythonPath} .vscode/launch.py"
"args": [
".vscode/launch.py"
]
} }
] ]
} }

View File

@@ -1,24 +0,0 @@
in the words of Abraham Lincoln:
> Be excellent to each other... and... PARTY ON, DUDES!
more specifically I'll paraphrase some examples from a german automotive corporation as they cover all the bases without being too wordy
## Examples of unacceptable behavior
* intimidation, harassment, trolling
* insulting, derogatory, harmful or prejudicial comments
* posting private information without permission
* political or personal attacks
## Examples of expected behavior
* being nice, friendly, welcoming, inclusive, mindful and empathetic
* acting considerate, modest, respectful
* using polite and inclusive language
* criticize constructively and accept constructive criticism
* respect different points of view
## finally and even more specifically,
* parse opinions and feedback objectively without prejudice
* it's the message that matters, not who said it
aaand that's how you say `be nice` in a way that fills half a floppy w

View File

@@ -1,3 +0,0 @@
* do something cool
really tho, send a PR or an issue or whatever, all appreciated, anything goes, just behave aight

1358
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +1,4 @@
# [`up2k.py`](up2k.py) # [`copyparty-fuse.py`](copyparty-fuse.py)
* command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
* file uploads, file-search, autoresume of aborted/broken uploads
* sync local folder to server
* generally faster than browsers
* if something breaks just restart it
# [`partyjournal.py`](partyjournal.py)
produces a chronological list of all uploads by collecting info from up2k databases and the filesystem
* outputs a standalone html file
* optional mapping from IP-addresses to nicknames
# [`partyfuse.py`](partyfuse.py)
* mount a copyparty server as a local filesystem (read-only) * mount a copyparty server as a local filesystem (read-only)
* **supports Windows!** -- expect `194 MiB/s` sequential read * **supports Windows!** -- expect `194 MiB/s` sequential read
* **supports Linux** -- expect `117 MiB/s` sequential read * **supports Linux** -- expect `117 MiB/s` sequential read
@@ -31,19 +17,19 @@ also consider using [../docs/rclone.md](../docs/rclone.md) instead for 5x perfor
* install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/) * install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/)
* [x] add python 3.x to PATH (it asks during install) * [x] add python 3.x to PATH (it asks during install)
* `python -m pip install --user fusepy` * `python -m pip install --user fusepy`
* `python ./partyfuse.py n: http://192.168.1.69:3923/` * `python ./copyparty-fuse.py n: http://192.168.1.69:3923/`
10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled: 10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled:
* `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}` * `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}`
* `/mingw64/bin/python3 -m pip install --user fusepy` * `/mingw64/bin/python3 -m pip install --user fusepy`
* `/mingw64/bin/python3 ./partyfuse.py [...]` * `/mingw64/bin/python3 ./copyparty-fuse.py [...]`
you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE) you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE)
(winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine) (winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine)
# [`partyfuse2.py`](partyfuse2.py) # [`copyparty-fuse🅱️.py`](copyparty-fuseb.py)
* mount a copyparty server as a local filesystem (read-only) * mount a copyparty server as a local filesystem (read-only)
* does the same thing except more correct, `samba` approves * does the same thing except more correct, `samba` approves
* **supports Linux** -- expect `18 MiB/s` (wait what) * **supports Linux** -- expect `18 MiB/s` (wait what)
@@ -51,7 +37,7 @@ you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releas
# [`partyfuse-streaming.py`](partyfuse-streaming.py) # [`copyparty-fuse-streaming.py`](copyparty-fuse-streaming.py)
* pretend this doesn't exist * pretend this doesn't exist
@@ -61,7 +47,6 @@ you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releas
* copyparty can Popen programs like these during file indexing to collect additional metadata * copyparty can Popen programs like these during file indexing to collect additional metadata
# [`dbtool.py`](dbtool.py) # [`dbtool.py`](dbtool.py)
upgrade utility which can show db info and help transfer data between databases, for example when a new version of copyparty is incompatible with the old DB and automatically rebuilds the DB from scratch, but you have some really expensive `-mtp` parsers and want to copy over the tags from the old db upgrade utility which can show db info and help transfer data between databases, for example when a new version of copyparty is incompatible with the old DB and automatically rebuilds the DB from scratch, but you have some really expensive `-mtp` parsers and want to copy over the tags from the old db
@@ -76,9 +61,3 @@ cd /mnt/nas/music/.hist
~/src/copyparty/bin/dbtool.py -src up2k.*.v3 up2k.db -rm-mtp-flag -copy key ~/src/copyparty/bin/dbtool.py -src up2k.*.v3 up2k.db -rm-mtp-flag -copy key
~/src/copyparty/bin/dbtool.py -src up2k.*.v3 up2k.db -rm-mtp-flag -copy .bpm -vac ~/src/copyparty/bin/dbtool.py -src up2k.*.v3 up2k.db -rm-mtp-flag -copy .bpm -vac
``` ```
# [`prisonparty.sh`](prisonparty.sh)
* run copyparty in a chroot, preventing any accidental file access
* creates bindmounts for /bin, /lib, and so on, see `sysdirs=`

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
"""partyfuse-streaming: remote copyparty as a local filesystem""" """copyparty-fuse-streaming: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>" __author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020 __copyright__ = 2020
__license__ = "MIT" __license__ = "MIT"
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
mount a copyparty server (local or remote) as a filesystem mount a copyparty server (local or remote) as a filesystem
usage: usage:
python partyfuse-streaming.py http://192.168.1.69:3923/ ./music python copyparty-fuse-streaming.py http://192.168.1.69:3923/ ./music
dependencies: dependencies:
python3 -m pip install --user fusepy python3 -m pip install --user fusepy
@@ -21,7 +21,7 @@ dependencies:
+ on Windows: https://github.com/billziss-gh/winfsp/releases/latest + on Windows: https://github.com/billziss-gh/winfsp/releases/latest
this was a mistake: this was a mistake:
fork of partyfuse.py with a streaming cache rather than readahead, fork of copyparty-fuse.py with a streaming cache rather than readahead,
thought this was gonna be way faster (and it kind of is) thought this was gonna be way faster (and it kind of is)
except the overhead of reopening connections on trunc totally kills it except the overhead of reopening connections on trunc totally kills it
""" """
@@ -42,7 +42,6 @@ import threading
import traceback import traceback
import http.client # py2: httplib import http.client # py2: httplib
import urllib.parse import urllib.parse
import calendar
from datetime import datetime from datetime import datetime
from urllib.parse import quote_from_bytes as quote from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote from urllib.parse import unquote_to_bytes as unquote
@@ -62,12 +61,12 @@ except:
else: else:
libfuse = "apt install libfuse\n modprobe fuse" libfuse = "apt install libfuse\n modprobe fuse"
m = """\033[33m print(
could not import fuse; these may help: "\n could not import fuse; these may help:"
{} -m pip install --user fusepy + "\n python3 -m pip install --user fusepy\n "
{} + libfuse
\033[0m""" + "\n"
print(m.format(sys.executable, libfuse)) )
raise raise
@@ -154,7 +153,7 @@ def dewin(txt):
class RecentLog(object): class RecentLog(object):
def __init__(self): def __init__(self):
self.mtx = threading.Lock() self.mtx = threading.Lock()
self.f = None # open("partyfuse.log", "wb") self.f = None # open("copyparty-fuse.log", "wb")
self.q = [] self.q = []
thr = threading.Thread(target=self.printer) thr = threading.Thread(target=self.printer)
@@ -185,9 +184,9 @@ class RecentLog(object):
print("".join(q), end="") print("".join(q), end="")
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/ # [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/ # [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/ # [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
# #
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done # [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done # [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
@@ -346,7 +345,7 @@ class Gateway(object):
except: except:
pass pass
def sendreq(self, meth, path, headers, **kwargs): def sendreq(self, *args, headers={}, **kwargs):
if self.password: if self.password:
headers["Cookie"] = "=".join(["cppwd", self.password]) headers["Cookie"] = "=".join(["cppwd", self.password])
@@ -355,21 +354,21 @@ class Gateway(object):
if c.rx_path: if c.rx_path:
raise Exception() raise Exception()
c.request(meth, path, headers=headers, **kwargs) c.request(*list(args), headers=headers, **kwargs)
c.rx = c.getresponse() c.rx = c.getresponse()
return c return c
except: except:
tid = threading.current_thread().ident tid = threading.current_thread().ident
dbg( dbg(
"\033[1;37;44mbad conn {:x}\n {} {}\n {}\033[0m".format( "\033[1;37;44mbad conn {:x}\n {}\n {}\033[0m".format(
tid, meth, path, c.rx_path if c else "(null)" tid, " ".join(str(x) for x in args), c.rx_path if c else "(null)"
) )
) )
self.closeconn(c) self.closeconn(c)
c = self.getconn() c = self.getconn()
try: try:
c.request(meth, path, headers=headers, **kwargs) c.request(*list(args), headers=headers, **kwargs)
c.rx = c.getresponse() c.rx = c.getresponse()
return c return c
except: except:
@@ -387,7 +386,7 @@ class Gateway(object):
path = dewin(path) path = dewin(path)
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots" web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
c = self.sendreq("GET", web_path, {}) c = self.sendreq("GET", web_path)
if c.rx.status != 200: if c.rx.status != 200:
self.closeconn(c) self.closeconn(c)
log( log(
@@ -441,7 +440,7 @@ class Gateway(object):
) )
) )
c = self.sendreq("GET", web_path, {"Range": hdr_range}) c = self.sendreq("GET", web_path, headers={"Range": hdr_range})
if c.rx.status != http.client.PARTIAL_CONTENT: if c.rx.status != http.client.PARTIAL_CONTENT:
self.closeconn(c) self.closeconn(c)
raise Exception( raise Exception(
@@ -496,7 +495,7 @@ class Gateway(object):
ts = 60 * 60 * 24 * 2 ts = 60 * 60 * 24 * 2
try: try:
sz = int(fsize) sz = int(fsize)
ts = calendar.timegm(time.strptime(fdate, "%Y-%m-%d %H:%M:%S")) ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
except: except:
info("bad HTML or OS [{}] [{}]".format(fdate, fsize)) info("bad HTML or OS [{}] [{}]".format(fdate, fsize))
# python cannot strptime(1959-01-01) on windows # python cannot strptime(1959-01-01) on windows

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
"""partyfuse: remote copyparty as a local filesystem""" """copyparty-fuse: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>" __author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2019 __copyright__ = 2019
__license__ = "MIT" __license__ = "MIT"
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
mount a copyparty server (local or remote) as a filesystem mount a copyparty server (local or remote) as a filesystem
usage: usage:
python partyfuse.py http://192.168.1.69:3923/ ./music python copyparty-fuse.py http://192.168.1.69:3923/ ./music
dependencies: dependencies:
python3 -m pip install --user fusepy python3 -m pip install --user fusepy
@@ -22,7 +22,7 @@ dependencies:
note: note:
you probably want to run this on windows clients: you probably want to run this on windows clients:
https://github.com/9001/copyparty/blob/hovudstraum/contrib/explorer-nothumbs-nofoldertypes.reg https://github.com/9001/copyparty/blob/master/contrib/explorer-nothumbs-nofoldertypes.reg
get server cert: get server cert:
awk '/-BEGIN CERTIFICATE-/ {a=1} a; /-END CERTIFICATE-/{exit}' <(openssl s_client -connect 127.0.0.1:3923 </dev/null 2>/dev/null) >cert.pem awk '/-BEGIN CERTIFICATE-/ {a=1} a; /-END CERTIFICATE-/{exit}' <(openssl s_client -connect 127.0.0.1:3923 </dev/null 2>/dev/null) >cert.pem
@@ -45,7 +45,6 @@ import threading
import traceback import traceback
import http.client # py2: httplib import http.client # py2: httplib
import urllib.parse import urllib.parse
import calendar
from datetime import datetime from datetime import datetime
from urllib.parse import quote_from_bytes as quote from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote from urllib.parse import unquote_to_bytes as unquote
@@ -55,13 +54,10 @@ MACOS = platform.system() == "Darwin"
info = log = dbg = None info = log = dbg = None
print( print("{} v{} @ {}".format(
"{} v{} @ {}".format( platform.python_implementation(),
platform.python_implementation(), ".".join([str(x) for x in sys.version_info]),
".".join([str(x) for x in sys.version_info]), sys.executable))
sys.executable,
)
)
try: try:
@@ -72,14 +68,14 @@ except:
elif MACOS: elif MACOS:
libfuse = "install https://osxfuse.github.io/" libfuse = "install https://osxfuse.github.io/"
else: else:
libfuse = "apt install libfuse3-3\n modprobe fuse" libfuse = "apt install libfuse\n modprobe fuse"
m = """\033[33m print(
could not import fuse; these may help: "\n could not import fuse; these may help:"
{} -m pip install --user fusepy + "\n python3 -m pip install --user fusepy\n "
{} + libfuse
\033[0m""" + "\n"
print(m.format(sys.executable, libfuse)) )
raise raise
@@ -166,7 +162,7 @@ def dewin(txt):
class RecentLog(object): class RecentLog(object):
def __init__(self): def __init__(self):
self.mtx = threading.Lock() self.mtx = threading.Lock()
self.f = None # open("partyfuse.log", "wb") self.f = None # open("copyparty-fuse.log", "wb")
self.q = [] self.q = []
thr = threading.Thread(target=self.printer) thr = threading.Thread(target=self.printer)
@@ -197,9 +193,9 @@ class RecentLog(object):
print("".join(q), end="") print("".join(q), end="")
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/ # [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/ # [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/ # [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
# #
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done # [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done # [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
@@ -303,14 +299,14 @@ class Gateway(object):
except: except:
pass pass
def sendreq(self, meth, path, headers, **kwargs): def sendreq(self, *args, headers={}, **kwargs):
tid = get_tid() tid = get_tid()
if self.password: if self.password:
headers["Cookie"] = "=".join(["cppwd", self.password]) headers["Cookie"] = "=".join(["cppwd", self.password])
try: try:
c = self.getconn(tid) c = self.getconn(tid)
c.request(meth, path, headers=headers, **kwargs) c.request(*list(args), headers=headers, **kwargs)
return c.getresponse() return c.getresponse()
except: except:
dbg("bad conn") dbg("bad conn")
@@ -318,7 +314,7 @@ class Gateway(object):
self.closeconn(tid) self.closeconn(tid)
try: try:
c = self.getconn(tid) c = self.getconn(tid)
c.request(meth, path, headers=headers, **kwargs) c.request(*list(args), headers=headers, **kwargs)
return c.getresponse() return c.getresponse()
except: except:
info("http connection failed:\n" + traceback.format_exc()) info("http connection failed:\n" + traceback.format_exc())
@@ -335,7 +331,7 @@ class Gateway(object):
path = dewin(path) path = dewin(path)
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots&ls" web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots&ls"
r = self.sendreq("GET", web_path, {}) r = self.sendreq("GET", web_path)
if r.status != 200: if r.status != 200:
self.closeconn() self.closeconn()
log( log(
@@ -372,7 +368,7 @@ class Gateway(object):
) )
) )
r = self.sendreq("GET", web_path, {"Range": hdr_range}) r = self.sendreq("GET", web_path, headers={"Range": hdr_range})
if r.status != http.client.PARTIAL_CONTENT: if r.status != http.client.PARTIAL_CONTENT:
self.closeconn() self.closeconn()
raise Exception( raise Exception(
@@ -394,16 +390,15 @@ class Gateway(object):
rsp = json.loads(rsp.decode("utf-8")) rsp = json.loads(rsp.decode("utf-8"))
ret = [] ret = []
for statfun, nodes in [ for is_dir, nodes in [[True, rsp["dirs"]], [False, rsp["files"]]]:
[self.stat_dir, rsp["dirs"]],
[self.stat_file, rsp["files"]],
]:
for n in nodes: for n in nodes:
fname = unquote(n["href"].split("?")[0]).rstrip(b"/").decode("wtf-8") fname = unquote(n["href"]).rstrip(b"/")
fname = fname.decode("wtf-8")
if bad_good: if bad_good:
fname = enwin(fname) fname = enwin(fname)
ret.append([fname, statfun(n["ts"], n["sz"]), 0]) fun = self.stat_dir if is_dir else self.stat_file
ret.append([fname, fun(n["ts"], n["sz"]), 0])
return ret return ret
@@ -444,7 +439,7 @@ class Gateway(object):
ts = 60 * 60 * 24 * 2 ts = 60 * 60 * 24 * 2
try: try:
sz = int(fsize) sz = int(fsize)
ts = calendar.timegm(time.strptime(fdate, "%Y-%m-%d %H:%M:%S")) ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
except: except:
info("bad HTML or OS [{}] [{}]".format(fdate, fsize)) info("bad HTML or OS [{}] [{}]".format(fdate, fsize))
# python cannot strptime(1959-01-01) on windows # python cannot strptime(1959-01-01) on windows

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
"""partyfuse2: remote copyparty as a local filesystem""" """copyparty-fuseb: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>" __author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020 __copyright__ = 2020
__license__ = "MIT" __license__ = "MIT"
@@ -11,18 +11,14 @@ import re
import os import os
import sys import sys
import time import time
import json
import stat import stat
import errno import errno
import struct import struct
import codecs
import platform
import threading import threading
import http.client # py2: httplib import http.client # py2: httplib
import urllib.parse import urllib.parse
from datetime import datetime from datetime import datetime
from urllib.parse import quote_from_bytes as quote from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote
try: try:
import fuse import fuse
@@ -32,19 +28,9 @@ try:
if not hasattr(fuse, "__version__"): if not hasattr(fuse, "__version__"):
raise Exception("your fuse-python is way old") raise Exception("your fuse-python is way old")
except: except:
if WINDOWS: print(
libfuse = "install https://github.com/billziss-gh/winfsp/releases/latest" "\n could not import fuse; these may help:\n python3 -m pip install --user fuse-python\n apt install libfuse\n modprobe fuse\n"
elif MACOS: )
libfuse = "install https://osxfuse.github.io/"
else:
libfuse = "apt install libfuse\n modprobe fuse"
m = """\033[33m
could not import fuse; these may help:
{} -m pip install --user fuse-python
{}
\033[0m"""
print(m.format(sys.executable, libfuse))
raise raise
@@ -52,22 +38,18 @@ except:
mount a copyparty server (local or remote) as a filesystem mount a copyparty server (local or remote) as a filesystem
usage: usage:
python ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas python ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas
dependencies: dependencies:
sudo apk add fuse-dev python3-dev sudo apk add fuse-dev python3-dev
python3 -m pip install --user fuse-python python3 -m pip install --user fuse-python
fork of partyfuse.py based on fuse-python which fork of copyparty-fuse.py based on fuse-python which
appears to be more compliant than fusepy? since this works with samba appears to be more compliant than fusepy? since this works with samba
(probably just my garbage code tbh) (probably just my garbage code tbh)
""" """
WINDOWS = sys.platform == "win32"
MACOS = platform.system() == "Darwin"
def threadless_log(msg): def threadless_log(msg):
print(msg + "\n", end="") print(msg + "\n", end="")
@@ -111,41 +93,6 @@ def html_dec(txt):
) )
def register_wtf8():
def wtf8_enc(text):
return str(text).encode("utf-8", "surrogateescape"), len(text)
def wtf8_dec(binary):
return bytes(binary).decode("utf-8", "surrogateescape"), len(binary)
def wtf8_search(encoding_name):
return codecs.CodecInfo(wtf8_enc, wtf8_dec, name="wtf-8")
codecs.register(wtf8_search)
bad_good = {}
good_bad = {}
def enwin(txt):
return "".join([bad_good.get(x, x) for x in txt])
for bad, good in bad_good.items():
txt = txt.replace(bad, good)
return txt
def dewin(txt):
return "".join([good_bad.get(x, x) for x in txt])
for bad, good in bad_good.items():
txt = txt.replace(good, bad)
return txt
class CacheNode(object): class CacheNode(object):
def __init__(self, tag, data): def __init__(self, tag, data):
self.tag = tag self.tag = tag
@@ -168,9 +115,8 @@ class Stat(fuse.Stat):
class Gateway(object): class Gateway(object):
def __init__(self, base_url, pw): def __init__(self, base_url):
self.base_url = base_url self.base_url = base_url
self.pw = pw
ui = urllib.parse.urlparse(base_url) ui = urllib.parse.urlparse(base_url)
self.web_root = ui.path.strip("/") self.web_root = ui.path.strip("/")
@@ -189,7 +135,8 @@ class Gateway(object):
self.conns = {} self.conns = {}
def quotep(self, path): def quotep(self, path):
path = path.encode("wtf-8") # TODO: mojibake support
path = path.encode("utf-8", "ignore")
return quote(path, safe="/") return quote(path, safe="/")
def getconn(self, tid=None): def getconn(self, tid=None):
@@ -212,29 +159,20 @@ class Gateway(object):
except: except:
pass pass
def sendreq(self, *args, **ka): def sendreq(self, *args, **kwargs):
tid = get_tid() tid = get_tid()
if self.pw:
ck = "cppwd=" + self.pw
try:
ka["headers"]["Cookie"] = ck
except:
ka["headers"] = {"Cookie": ck}
try: try:
c = self.getconn(tid) c = self.getconn(tid)
c.request(*list(args), **ka) c.request(*list(args), **kwargs)
return c.getresponse() return c.getresponse()
except: except:
self.closeconn(tid) self.closeconn(tid)
c = self.getconn(tid) c = self.getconn(tid)
c.request(*list(args), **ka) c.request(*list(args), **kwargs)
return c.getresponse() return c.getresponse()
def listdir(self, path): def listdir(self, path):
if bad_good: web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
path = dewin(path)
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots&ls"
r = self.sendreq("GET", web_path) r = self.sendreq("GET", web_path)
if r.status != 200: if r.status != 200:
self.closeconn() self.closeconn()
@@ -244,12 +182,9 @@ class Gateway(object):
) )
) )
return self.parse_jls(r) return self.parse_html(r)
def download_file_range(self, path, ofs1, ofs2): def download_file_range(self, path, ofs1, ofs2):
if bad_good:
path = dewin(path)
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?raw" web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?raw"
hdr_range = "bytes={}-{}".format(ofs1, ofs2 - 1) hdr_range = "bytes={}-{}".format(ofs1, ofs2 - 1)
log("downloading {}".format(hdr_range)) log("downloading {}".format(hdr_range))
@@ -265,27 +200,40 @@ class Gateway(object):
return r.read() return r.read()
def parse_jls(self, datasrc): def parse_html(self, datasrc):
rsp = b"" ret = []
remainder = b""
ptn = re.compile(
r"^<tr><td>(-|DIR)</td><td><a [^>]+>([^<]+)</a></td><td>([^<]+)</td><td>([^<]+)</td></tr>$"
)
while True: while True:
buf = datasrc.read(1024 * 32) buf = remainder + datasrc.read(4096)
# print('[{}]'.format(buf.decode('utf-8')))
if not buf: if not buf:
break break
rsp += buf remainder = b""
endpos = buf.rfind(b"\n")
if endpos >= 0:
remainder = buf[endpos + 1 :]
buf = buf[:endpos]
rsp = json.loads(rsp.decode("utf-8")) lines = buf.decode("utf-8").split("\n")
ret = [] for line in lines:
for statfun, nodes in [ m = ptn.match(line)
[self.stat_dir, rsp["dirs"]], if not m:
[self.stat_file, rsp["files"]], # print(line)
]: continue
for n in nodes:
fname = unquote(n["href"].split("?")[0]).rstrip(b"/").decode("wtf-8")
if bad_good:
fname = enwin(fname)
ret.append([fname, statfun(n["ts"], n["sz"]), 0]) ftype, fname, fsize, fdate = m.groups()
fname = html_dec(fname)
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
sz = int(fsize)
if ftype == "-":
ret.append([fname, self.stat_file(ts, sz), 0])
else:
ret.append([fname, self.stat_dir(ts, sz), 0])
return ret return ret
@@ -314,7 +262,6 @@ class CPPF(Fuse):
Fuse.__init__(self, *args, **kwargs) Fuse.__init__(self, *args, **kwargs)
self.url = None self.url = None
self.pw = None
self.dircache = [] self.dircache = []
self.dircache_mtx = threading.Lock() self.dircache_mtx = threading.Lock()
@@ -324,7 +271,7 @@ class CPPF(Fuse):
def init2(self): def init2(self):
# TODO figure out how python-fuse wanted this to go # TODO figure out how python-fuse wanted this to go
self.gw = Gateway(self.url, self.pw) # .decode('utf-8')) self.gw = Gateway(self.url) # .decode('utf-8'))
info("up") info("up")
def clean_dircache(self): def clean_dircache(self):
@@ -589,8 +536,6 @@ class CPPF(Fuse):
def getattr(self, path): def getattr(self, path):
log("getattr [{}]".format(path)) log("getattr [{}]".format(path))
if WINDOWS:
path = enwin(path) # windows occasionally decodes f0xx to xx
path = path.strip("/") path = path.strip("/")
try: try:
@@ -623,25 +568,9 @@ class CPPF(Fuse):
def main(): def main():
time.strptime("19970815", "%Y%m%d") # python#7980 time.strptime("19970815", "%Y%m%d") # python#7980
register_wtf8()
if WINDOWS:
os.system("rem")
for ch in '<>:"\\|?*':
# microsoft maps illegal characters to f0xx
# (e000 to f8ff is basic-plane private-use)
bad_good[ch] = chr(ord(ch) + 0xF000)
for n in range(0, 0x100):
# map surrogateescape to another private-use area
bad_good[chr(n + 0xDC00)] = chr(n + 0xF100)
for k, v in bad_good.items():
good_bad[v] = k
server = CPPF() server = CPPF()
server.parser.add_option(mountopt="url", metavar="BASE_URL", default=None) server.parser.add_option(mountopt="url", metavar="BASE_URL", default=None)
server.parser.add_option(mountopt="pw", metavar="PASSWORD", default=None)
server.parse(values=server, errex=1) server.parse(values=server, errex=1)
if not server.url or not str(server.url).startswith("http"): if not server.url or not str(server.url).startswith("http"):
print("\nerror:") print("\nerror:")
@@ -649,7 +578,7 @@ def main():
print(" need argument: mount-path") print(" need argument: mount-path")
print("example:") print("example:")
print( print(
" ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas" " ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas"
) )
sys.exit(1) sys.exit(1)

View File

@@ -8,10 +8,7 @@ import sqlite3
import argparse import argparse
DB_VER1 = 3 DB_VER1 = 3
DB_VER2 = 5 DB_VER2 = 4
BY_PATH = None
NC = None
def die(msg): def die(msg):
@@ -60,13 +57,8 @@ def compare(n1, d1, n2, d2, verbose):
if rd.split("/", 1)[0] == ".hist": if rd.split("/", 1)[0] == ".hist":
continue continue
if BY_PATH: q = "select w from up where rd = ? and fn = ?"
q = "select w from up where rd = ? and fn = ?" hit = d2.execute(q, (rd, fn)).fetchone()
hit = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
hit = d2.execute(q, (w1[:16], w1)).fetchone()
if not hit: if not hit:
miss += 1 miss += 1
if verbose: if verbose:
@@ -78,32 +70,27 @@ def compare(n1, d1, n2, d2, verbose):
n = 0 n = 0
miss = {} miss = {}
nmiss = 0 nmiss = 0
for w1s, k, v in d1.execute("select * from mt"): for w1, k, v in d1.execute("select * from mt"):
n += 1 n += 1
if n % 100_000 == 0: if n % 100_000 == 0:
m = f"\033[36mchecked {n:,} of {nt:,} tags in {n1} against {n2}, so far {nmiss} missing tags\033[0m" m = f"\033[36mchecked {n:,} of {nt:,} tags in {n1} against {n2}, so far {nmiss} missing tags\033[0m"
print(m) print(m)
q = "select w, rd, fn from up where substr(w,1,16) = ?" q = "select rd, fn from up where substr(w,1,16) = ?"
w1, rd, fn = d1.execute(q, (w1s,)).fetchone() rd, fn = d1.execute(q, (w1,)).fetchone()
if rd.split("/", 1)[0] == ".hist": if rd.split("/", 1)[0] == ".hist":
continue continue
if BY_PATH: q = "select substr(w,1,16) from up where rd = ? and fn = ?"
q = "select w from up where rd = ? and fn = ?" w2 = d2.execute(q, (rd, fn)).fetchone()
w2 = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
w2 = d2.execute(q, (w1s, w1)).fetchone()
if w2: if w2:
w2 = w2[0] w2 = w2[0]
v2 = None v2 = None
if w2: if w2:
v2 = d2.execute( v2 = d2.execute(
"select v from mt where w = ? and +k = ?", (w2[:16], k) "select v from mt where w = ? and +k = ?", (w2, k)
).fetchone() ).fetchone()
if v2: if v2:
v2 = v2[0] v2 = v2[0]
@@ -137,7 +124,7 @@ def compare(n1, d1, n2, d2, verbose):
for k, v in sorted(miss.items()): for k, v in sorted(miss.items()):
if v: if v:
print(f"{n1} has {v:7} more {k:<7} tags than {n2}") print(f"{n1} has {v:6} more {k:<6} tags than {n2}")
print(f"in total, {nmiss} missing tags in {n2}\n") print(f"in total, {nmiss} missing tags in {n2}\n")
@@ -145,75 +132,47 @@ def compare(n1, d1, n2, d2, verbose):
def copy_mtp(d1, d2, tag, rm): def copy_mtp(d1, d2, tag, rm):
nt = next(d1.execute("select count(w) from mt where k = ?", (tag,)))[0] nt = next(d1.execute("select count(w) from mt where k = ?", (tag,)))[0]
n = 0 n = 0
ncopy = 0 ndone = 0
nskip = 0 for w1, k, v in d1.execute("select * from mt where k = ?", (tag,)):
for w1s, k, v in d1.execute("select * from mt where k = ?", (tag,)):
n += 1 n += 1
if n % 25_000 == 0: if n % 25_000 == 0:
m = f"\033[36m{n:,} of {nt:,} tags checked, so far {ncopy} copied, {nskip} skipped\033[0m" m = f"\033[36m{n:,} of {nt:,} tags checked, so far {ndone} copied\033[0m"
print(m) print(m)
q = "select w, rd, fn from up where substr(w,1,16) = ?" q = "select rd, fn from up where substr(w,1,16) = ?"
w1, rd, fn = d1.execute(q, (w1s,)).fetchone() rd, fn = d1.execute(q, (w1,)).fetchone()
if rd.split("/", 1)[0] == ".hist": if rd.split("/", 1)[0] == ".hist":
continue continue
if BY_PATH: q = "select substr(w,1,16) from up where rd = ? and fn = ?"
q = "select w from up where rd = ? and fn = ?" w2 = d2.execute(q, (rd, fn)).fetchone()
w2 = d2.execute(q, (rd, fn)).fetchone()
else:
q = "select w from up where substr(w,1,16) = ? and +w = ?"
w2 = d2.execute(q, (w1s, w1)).fetchone()
if not w2: if not w2:
continue continue
w2s = w2[0][:16] w2 = w2[0]
hit = d2.execute("select v from mt where w = ? and +k = ?", (w2s, k)).fetchone() hit = d2.execute("select v from mt where w = ? and +k = ?", (w2, k)).fetchone()
if hit: if hit:
hit = hit[0] hit = hit[0]
if hit != v: if hit != v:
if NC and hit is not None: ndone += 1
nskip += 1
continue
ncopy += 1
if hit is not None: if hit is not None:
d2.execute("delete from mt where w = ? and +k = ?", (w2s, k)) d2.execute("delete from mt where w = ? and +k = ?", (w2, k))
d2.execute("insert into mt values (?,?,?)", (w2s, k, v)) d2.execute("insert into mt values (?,?,?)", (w2, k, v))
if rm: if rm:
d2.execute("delete from mt where w = ? and +k = 't:mtp'", (w2s,)) d2.execute("delete from mt where w = ? and +k = 't:mtp'", (w2,))
d2.commit() d2.commit()
print(f"copied {ncopy} {tag} tags over, skipped {nskip}") print(f"copied {ndone} {tag} tags over")
def examples():
print(
"""
# clearing the journal
./dbtool.py up2k.db
# copy tags ".bpm" and "key" from old.db to up2k.db, and remove the mtp flag from matching files (so copyparty won't run any mtps on it)
./dbtool.py -ls up2k.db
./dbtool.py -src old.db up2k.db -cmp
./dbtool.py -src old.v3 up2k.db -rm-mtp-flag -copy key
./dbtool.py -src old.v3 up2k.db -rm-mtp-flag -copy .bpm -vac
"""
)
def main(): def main():
global NC, BY_PATH
os.system("") os.system("")
print() print()
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("db", help="database to work on") ap.add_argument("db", help="database to work on")
ap.add_argument("-h2", action="store_true", help="show examples")
ap.add_argument("-src", metavar="DB", type=str, help="database to copy from") ap.add_argument("-src", metavar="DB", type=str, help="database to copy from")
ap2 = ap.add_argument_group("informational / read-only stuff") ap2 = ap.add_argument_group("informational / read-only stuff")
@@ -226,29 +185,11 @@ def main():
ap2.add_argument( ap2.add_argument(
"-rm-mtp-flag", "-rm-mtp-flag",
action="store_true", action="store_true",
help="when an mtp tag is copied over, also mark that file as done, so copyparty won't run any mtps on those files", help="when an mtp tag is copied over, also mark that as done, so copyparty won't run mtp on it",
) )
ap2.add_argument("-vac", action="store_true", help="optimize DB") ap2.add_argument("-vac", action="store_true", help="optimize DB")
ap2 = ap.add_argument_group("behavior modifiers")
ap2.add_argument(
"-nc",
action="store_true",
help="no-clobber; don't replace/overwrite existing tags",
)
ap2.add_argument(
"-by-path",
action="store_true",
help="match files based on location rather than warks (content-hash), use this if the databases have different wark salts",
)
ar = ap.parse_args() ar = ap.parse_args()
if ar.h2:
examples()
return
NC = ar.nc
BY_PATH = ar.by_path
for v in [ar.db, ar.src]: for v in [ar.db, ar.src]:
if v and not os.path.exists(v): if v and not os.path.exists(v):

View File

@@ -1,23 +1,10 @@
standalone programs which take an audio file as argument standalone programs which take an audio file as argument
**NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen`
some of these rely on libraries which are not MIT-compatible some of these rely on libraries which are not MIT-compatible
* [audio-bpm.py](./audio-bpm.py) detects the BPM of music using the BeatRoot Vamp Plugin; imports GPL2 * [audio-bpm.py](./audio-bpm.py) detects the BPM of music using the BeatRoot Vamp Plugin; imports GPL2
* [audio-key.py](./audio-key.py) detects the melodic key of music using the Mixxx fork of keyfinder; imports GPL3 * [audio-key.py](./audio-key.py) detects the melodic key of music using the Mixxx fork of keyfinder; imports GPL3
these invoke standalone programs which are GPL or similar, so is legally fine for most purposes:
* [media-hash.py](./media-hash.py) generates checksums for audio and video streams; uses FFmpeg (LGPL or GPL)
* [image-noexif.py](./image-noexif.py) removes exif tags from images; uses exiftool (GPLv1 or artistic-license)
these do not have any problematic dependencies at all:
* [cksum.py](./cksum.py) computes various checksums
* [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser)
* [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty
# dependencies # dependencies
@@ -31,10 +18,7 @@ run [`install-deps.sh`](install-deps.sh) to build/install most dependencies requ
# usage from copyparty # usage from copyparty
`copyparty -e2dsa -e2ts` followed by any combination of these: `copyparty -e2dsa -e2ts -mtp key=f,audio-key.py -mtp .bpm=f,audio-bpm.py`
* `-mtp key=f,audio-key.py`
* `-mtp .bpm=f,audio-bpm.py`
* `-mtp ahash,vhash=f,media-hash.py`
* `f,` makes the detected value replace any existing values * `f,` makes the detected value replace any existing values
* the `.` in `.bpm` indicates numeric value * the `.` in `.bpm` indicates numeric value
@@ -42,12 +26,9 @@ run [`install-deps.sh`](install-deps.sh) to build/install most dependencies requ
* `mtp` modules will not run if a file has existing tags in the db, so clear out the tags with `-e2tsr` the first time you launch with new `mtp` options * `mtp` modules will not run if a file has existing tags in the db, so clear out the tags with `-e2tsr` the first time you launch with new `mtp` options
## usage with volflags ## usage with volume-flags
instead of affecting all volumes, you can set the options for just one volume like so: instead of affecting all volumes, you can set the options for just one volume like so:
```
`copyparty -v /mnt/nas/music:/music:r:c,e2dsa:c,e2ts` immediately followed by any combination of these: copyparty -v /mnt/nas/music:/music:r:cmtp=key=f,audio-key.py:cmtp=.bpm=f,audio-bpm.py:ce2dsa:ce2ts
```
* `:c,mtp=key=f,audio-key.py`
* `:c,mtp=.bpm=f,audio-bpm.py`
* `:c,mtp=ahash,vhash=f,media-hash.py`

View File

@@ -19,18 +19,17 @@ dep: ffmpeg
def det(tf): def det(tf):
# fmt: off # fmt: off
sp.check_call([ sp.check_call([
b"ffmpeg", "ffmpeg",
b"-nostdin", "-nostdin",
b"-hide_banner", "-hide_banner",
b"-v", b"fatal", "-v", "fatal",
b"-ss", b"13", "-ss", "13",
b"-y", b"-i", fsenc(sys.argv[1]), "-y", "-i", fsenc(sys.argv[1]),
b"-map", b"0:a:0", "-ac", "1",
b"-ac", b"1", "-ar", "22050",
b"-ar", b"22050", "-t", "300",
b"-t", b"300", "-f", "f32le",
b"-f", b"f32le", tf
fsenc(tf)
]) ])
# fmt: on # fmt: on

View File

@@ -23,15 +23,14 @@ dep: ffmpeg
def det(tf): def det(tf):
# fmt: off # fmt: off
sp.check_call([ sp.check_call([
b"ffmpeg", "ffmpeg",
b"-nostdin", "-nostdin",
b"-hide_banner", "-hide_banner",
b"-v", b"fatal", "-v", "fatal",
b"-y", b"-i", fsenc(sys.argv[1]), "-y", "-i", fsenc(sys.argv[1]),
b"-map", b"0:a:0", "-t", "300",
b"-t", b"300", "-sample_fmt", "s16",
b"-sample_fmt", b"s16", tf
fsenc(tf)
]) ])
# fmt: on # fmt: on

View File

@@ -1,89 +0,0 @@
#!/usr/bin/env python3
import sys
import json
import zlib
import struct
import base64
import hashlib
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p
"""
calculates various checksums for uploads,
usage: -mtp crc32,md5,sha1,sha256b=ad,bin/mtag/cksum.py
"""
def main():
config = "crc32 md5 md5b sha1 sha1b sha256 sha256b sha512/240 sha512b/240"
# b suffix = base64 encoded
# slash = truncate to n bits
known = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
config = config.split()
hashers = {
k: v()
for k, v in known.items()
if k in [x.split("/")[0].rstrip("b") for x in known]
}
crc32 = 0 if "crc32" in config else None
with open(fsenc(sys.argv[1]), "rb", 512 * 1024) as f:
while True:
buf = f.read(64 * 1024)
if not buf:
break
for x in hashers.values():
x.update(buf)
if crc32 is not None:
crc32 = zlib.crc32(buf, crc32)
ret = {}
for s in config:
alg = s.split("/")[0]
b64 = alg.endswith("b")
alg = alg.rstrip("b")
if alg in hashers:
v = hashers[alg].digest()
elif alg == "crc32":
v = crc32
if v < 0:
v &= 2 ** 32 - 1
v = struct.pack(">L", v)
else:
raise Exception("what is {}".format(s))
if "/" in s:
v = v[: int(int(s.split("/")[1]) / 8)]
if b64:
v = base64.b64encode(v).decode("ascii").rstrip("=")
else:
try:
v = v.hex()
except:
import binascii
v = binascii.hexlify(v)
ret[s] = v
print(json.dumps(ret, indent=4))
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env python3
"""
fetch latest msg from guestbook and return as tag
example copyparty config to use this:
--urlform save,get -vsrv/hello:hello:w:c,e2ts,mtp=guestbook=t10,ad,p,bin/mtag/guestbook-read.py:mte=+guestbook
explained:
for realpath srv/hello (served at /hello), write-only for eveyrone,
enable file analysis on upload (e2ts),
use mtp plugin "bin/mtag/guestbook-read.py" to provide metadata tag "guestbook",
do this on all uploads regardless of extension,
t10 = 10 seconds timeout for each dwonload,
ad = parse file regardless if FFmpeg thinks it is audio or not
p = request upload info as json on stdin (need ip)
mte=+guestbook enabled indexing of that tag for this volume
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
"""
import json
import os
import sqlite3
import sys
# set 0 to allow infinite msgs from one IP,
# other values delete older messages to make space,
# so 1 only keeps latest msg
NUM_MSGS_TO_KEEP = 1
def main():
fp = os.path.abspath(sys.argv[1])
fdir = os.path.dirname(fp)
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
ip = md["up_ip"]
# can put the database inside `fdir` if you'd like,
# by default it saves to PWD:
# os.chdir(fdir)
db = sqlite3.connect("guestbook.db3")
with db:
t = "select msg from gb where ip = ? order by ts desc"
r = db.execute(t, (ip,)).fetchone()
if r:
print(r[0])
if __name__ == "__main__":
main()

View File

@@ -1,111 +0,0 @@
#!/usr/bin/env python3
"""
store messages from users in an sqlite database
which can be read from another mtp for example
takes input from application/x-www-form-urlencoded POSTs,
for example using the message/pager function on the website
example copyparty config to use this:
--urlform save,get -vsrv/hello:hello:w:c,e2ts,mtp=xgb=ebin,t10,ad,p,bin/mtag/guestbook.py:mte=+xgb
explained:
for realpath srv/hello (served at /hello),write-only for eveyrone,
enable file analysis on upload (e2ts),
use mtp plugin "bin/mtag/guestbook.py" to provide metadata tag "xgb",
do this on all uploads with the file extension "bin",
t300 = 300 seconds timeout for each dwonload,
ad = parse file regardless if FFmpeg thinks it is audio or not
p = request upload info as json on stdin
mte=+xgb enabled indexing of that tag for this volume
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
"""
import json
import os
import sqlite3
import sys
from urllib.parse import unquote_to_bytes as unquote
# set 0 to allow infinite msgs from one IP,
# other values delete older messages to make space,
# so 1 only keeps latest msg
NUM_MSGS_TO_KEEP = 1
def main():
fp = os.path.abspath(sys.argv[1])
fdir = os.path.dirname(fp)
fname = os.path.basename(fp)
if not fname.startswith("put-") or not fname.endswith(".bin"):
raise Exception("not a post file")
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
buf = b""
with open(fp, "rb") as f:
while True:
b = f.read(4096)
buf += b
if len(buf) > 4096:
raise Exception("too big")
if not b:
break
if not buf:
raise Exception("file is empty")
buf = unquote(buf.replace(b"+", b" "))
txt = buf.decode("utf-8")
if not txt.startswith("msg="):
raise Exception("does not start with msg=")
ip = md["up_ip"]
ts = md["up_at"]
txt = txt[4:]
# can put the database inside `fdir` if you'd like,
# by default it saves to PWD:
# os.chdir(fdir)
db = sqlite3.connect("guestbook.db3")
try:
db.execute("select 1 from gb").fetchone()
except:
with db:
db.execute("create table gb (ip text, ts real, msg text)")
db.execute("create index gb_ip on gb(ip)")
with db:
if NUM_MSGS_TO_KEEP == 1:
t = "delete from gb where ip = ?"
db.execute(t, (ip,))
t = "insert into gb values (?,?,?)"
db.execute(t, (ip, ts, txt))
if NUM_MSGS_TO_KEEP > 1:
t = "select ts from gb where ip = ? order by ts desc"
hits = db.execute(t, (ip,)).fetchall()
if len(hits) > NUM_MSGS_TO_KEEP:
lim = hits[NUM_MSGS_TO_KEEP][0]
t = "delete from gb where ip = ? and ts <= ?"
db.execute(t, (ip, lim))
print(txt)
if __name__ == "__main__":
main()

View File

@@ -1,95 +0,0 @@
#!/usr/bin/env python3
"""
remove exif tags from uploaded images
dependencies:
exiftool
about:
creates a "noexif" subfolder and puts exif-stripped copies of each image there,
the reason for the subfolder is to avoid issues with the up2k.db / deduplication:
if the original image is modified in-place, then copyparty will keep the original
hash in up2k.db for a while (until the next volume rescan), so if the image is
reuploaded after a rescan then the upload will be renamed and kept as a dupe
alternatively you could switch the logic around, making a copy of the original
image into a subfolder named "exif" and modify the original in-place, but then
up2k.db will be out of sync until the next rescan, so any additional uploads
of the same image will get symlinked (deduplicated) to the modified copy
instead of the original in "exif"
or maybe delete the original image after processing, that would kinda work too
example copyparty config to use this:
-v/mnt/nas/pics:pics:rwmd,ed:c,e2ts,mte=+noexif:c,mtp=noexif=ejpg,ejpeg,ad,bin/mtag/image-noexif.py
explained:
for realpath /mnt/nas/pics (served at /pics) with read-write-modify-delete for ed,
enable file analysis on upload (e2ts),
append "noexif" to the list of known tags (mtp),
and use mtp plugin "bin/mtag/image-noexif.py" to provide that tag,
do this on all uploads with the file extension "jpg" or "jpeg",
ad = parse file regardless if FFmpeg thinks it is audio or not
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
and your python must have sqlite3 support compiled in
"""
import os
import sys
import filecmp
import subprocess as sp
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
def main():
cwd, fn = os.path.split(sys.argv[1])
if os.path.basename(cwd) == "noexif":
return
os.chdir(cwd)
f1 = fsenc(fn)
f2 = os.path.join(b"noexif", f1)
cmd = [
b"exiftool",
b"-exif:all=",
b"-iptc:all=",
b"-xmp:all=",
b"-P",
b"-o",
b"noexif/",
b"--",
f1,
]
sp.check_output(cmd)
if not os.path.exists(f2):
print("failed")
return
if filecmp.cmp(f1, f2, shallow=False):
print("clean")
else:
print("exif")
# lastmod = os.path.getmtime(f1)
# times = (int(time.time()), int(lastmod))
# os.utime(f2, times)
if __name__ == "__main__":
try:
main()
except:
pass

View File

@@ -4,9 +4,7 @@ set -e
# install dependencies for audio-*.py # install dependencies for audio-*.py
# #
# linux/alpine: requires gcc g++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-dev py3-{wheel,pip} py3-numpy{,-dev} # linux: requires {python3,ffmpeg,fftw}-dev py3-{wheel,pip} py3-numpy{,-dev} vamp-sdk-dev patchelf
# linux/debian: requires libav{codec,device,filter,format,resample,util}-dev {libfftw3,python3,libsndfile1}-dev python3-{numpy,pip} vamp-{plugin-sdk,examples} patchelf cmake
# linux/fedora: requires gcc gcc-c++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-devel python3-numpy vamp-plugin-sdk qm-vamp-plugins
# win64: requires msys2-mingw64 environment # win64: requires msys2-mingw64 environment
# macos: requires macports # macos: requires macports
# #
@@ -102,11 +100,8 @@ export -f dl_files
github_tarball() { github_tarball() {
rm -rf g
mkdir g
cd g
dl_text "$1" | dl_text "$1" |
tee ../json | tee json |
( (
# prefer jq if available # prefer jq if available
jq -r '.tarball_url' || jq -r '.tarball_url' ||
@@ -115,11 +110,8 @@ github_tarball() {
awk -F\" '/"tarball_url": "/ {print$4}' awk -F\" '/"tarball_url": "/ {print$4}'
) | ) |
tee /dev/stderr | tee /dev/stderr |
head -n 1 |
tr -d '\r' | tr '\n' '\0' | tr -d '\r' | tr '\n' '\0' |
xargs -0 bash -c 'dl_files "$@"' _ xargs -0 bash -c 'dl_files "$@"' _
mv * ../tgz
cd ..
} }
@@ -134,7 +126,6 @@ gitlab_tarball() {
tr \" '\n' | grep -E '\.tar\.gz$' | head -n 1 tr \" '\n' | grep -E '\.tar\.gz$' | head -n 1
) | ) |
tee /dev/stderr | tee /dev/stderr |
head -n 1 |
tr -d '\r' | tr '\n' '\0' | tr -d '\r' | tr '\n' '\0' |
tee links | tee links |
xargs -0 bash -c 'dl_files "$@"' _ xargs -0 bash -c 'dl_files "$@"' _
@@ -146,27 +137,20 @@ install_keyfinder() {
# use msys2 in mingw-w64 mode # use msys2 in mingw-w64 mode
# pacman -S --needed mingw-w64-x86_64-{ffmpeg,python} # pacman -S --needed mingw-w64-x86_64-{ffmpeg,python}
[ -e $HOME/pe/keyfinder ] && {
echo found a keyfinder build in ~/pe, skipping
return
}
cd "$td"
github_tarball https://api.github.com/repos/mixxxdj/libkeyfinder/releases/latest github_tarball https://api.github.com/repos/mixxxdj/libkeyfinder/releases/latest
ls -al
tar -xf tgz tar -xf mixxxdj-libkeyfinder-*
rm tgz rm -- *.tar.gz
cd mixxxdj-libkeyfinder* cd mixxxdj-libkeyfinder*
h="$HOME" h="$HOME"
so="lib/libkeyfinder.so" so="lib/libkeyfinder.so"
memes=(-DBUILD_TESTING=OFF) memes=()
[ $win ] && [ $win ] &&
so="bin/libkeyfinder.dll" && so="bin/libkeyfinder.dll" &&
h="$(printf '%s\n' "$USERPROFILE" | tr '\\' '/')" && h="$(printf '%s\n' "$USERPROFILE" | tr '\\' '/')" &&
memes+=(-G "MinGW Makefiles") memes+=(-G "MinGW Makefiles" -DBUILD_TESTING=OFF)
[ $mac ] && [ $mac ] &&
so="lib/libkeyfinder.dylib" so="lib/libkeyfinder.dylib"
@@ -186,7 +170,7 @@ install_keyfinder() {
} }
# rm -rf /Users/ed/Library/Python/3.9/lib/python/site-packages/*keyfinder* # rm -rf /Users/ed/Library/Python/3.9/lib/python/site-packages/*keyfinder*
CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include -I/usr/include/ffmpeg" \ CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include" \
LDFLAGS="-L$h/pe/keyfinder/lib -L$h/pe/keyfinder/lib64 -L/opt/local/lib" \ LDFLAGS="-L$h/pe/keyfinder/lib -L$h/pe/keyfinder/lib64 -L/opt/local/lib" \
PKG_CONFIG_PATH=/c/msys64/mingw64/lib/pkgconfig \ PKG_CONFIG_PATH=/c/msys64/mingw64/lib/pkgconfig \
$pybin -m pip install --user keyfinder $pybin -m pip install --user keyfinder
@@ -223,22 +207,6 @@ install_vamp() {
$pybin -m pip install --user vamp $pybin -m pip install --user vamp
cd "$td"
echo '#include <vamp-sdk/Plugin.h>' | gcc -x c -c -o /dev/null - || [ -e ~/pe/vamp-sdk ] || {
printf '\033[33mcould not find the vamp-sdk, building from source\033[0m\n'
(dl_files yolo https://code.soundsoftware.ac.uk/attachments/download/2588/vamp-plugin-sdk-2.9.0.tar.gz)
sha512sum -c <(
echo "7ef7f837d19a08048b059e0da408373a7964ced452b290fae40b85d6d70ca9000bcfb3302cd0b4dc76cf2a848528456f78c1ce1ee0c402228d812bd347b6983b -"
) <vamp-plugin-sdk-2.9.0.tar.gz
tar -xf vamp-plugin-sdk-2.9.0.tar.gz
rm -- *.tar.gz
ls -al
cd vamp-plugin-sdk-*
./configure --prefix=$HOME/pe/vamp-sdk
make -j1 install
}
cd "$td"
have_beatroot || { have_beatroot || {
printf '\033[33mcould not find the vamp beatroot plugin, building from source\033[0m\n' printf '\033[33mcould not find the vamp beatroot plugin, building from source\033[0m\n'
(dl_files yolo https://code.soundsoftware.ac.uk/attachments/download/885/beatroot-vamp-v1.0.tar.gz) (dl_files yolo https://code.soundsoftware.ac.uk/attachments/download/885/beatroot-vamp-v1.0.tar.gz)
@@ -246,11 +214,8 @@ install_vamp() {
echo "1f444d1d58ccf565c0adfe99f1a1aa62789e19f5071e46857e2adfbc9d453037bc1c4dcb039b02c16240e9b97f444aaff3afb625c86aa2470233e711f55b6874 -" echo "1f444d1d58ccf565c0adfe99f1a1aa62789e19f5071e46857e2adfbc9d453037bc1c4dcb039b02c16240e9b97f444aaff3afb625c86aa2470233e711f55b6874 -"
) <beatroot-vamp-v1.0.tar.gz ) <beatroot-vamp-v1.0.tar.gz
tar -xf beatroot-vamp-v1.0.tar.gz tar -xf beatroot-vamp-v1.0.tar.gz
rm -- *.tar.gz
cd beatroot-vamp-v1.0 cd beatroot-vamp-v1.0
[ -e ~/pe/vamp-sdk ] && make -f Makefile.linux -j4
sed -ri 's`^(CFLAGS :=.*)`\1 -I'$HOME'/pe/vamp-sdk/include`' Makefile.linux
make -f Makefile.linux -j4 LDFLAGS=-L$HOME/pe/vamp-sdk/lib
# /home/ed/vamp /home/ed/.vamp /usr/local/lib/vamp # /home/ed/vamp /home/ed/.vamp /usr/local/lib/vamp
mkdir ~/vamp mkdir ~/vamp
cp -pv beatroot-vamp.* ~/vamp/ cp -pv beatroot-vamp.* ~/vamp/
@@ -264,7 +229,6 @@ install_vamp() {
# not in use because it kinda segfaults, also no windows support # not in use because it kinda segfaults, also no windows support
install_soundtouch() { install_soundtouch() {
cd "$td"
gitlab_tarball https://gitlab.com/api/v4/projects/soundtouch%2Fsoundtouch/releases gitlab_tarball https://gitlab.com/api/v4/projects/soundtouch%2Fsoundtouch/releases
tar -xvf soundtouch-* tar -xvf soundtouch-*

View File

@@ -1,73 +0,0 @@
#!/usr/bin/env python
import re
import sys
import json
import time
import base64
import hashlib
import subprocess as sp
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
"""
dep: ffmpeg
"""
def det():
# fmt: off
cmd = [
b"ffmpeg",
b"-nostdin",
b"-hide_banner",
b"-v", b"fatal",
b"-i", fsenc(sys.argv[1]),
b"-f", b"framemd5",
b"-"
]
# fmt: on
p = sp.Popen(cmd, stdout=sp.PIPE)
# ps = io.TextIOWrapper(p.stdout, encoding="utf-8")
ps = p.stdout
chans = {}
for ln in ps:
if ln.startswith(b"#stream#"):
break
m = re.match(r"^#media_type ([0-9]): ([a-zA-Z])", ln.decode("utf-8"))
if m:
chans[m.group(1)] = m.group(2)
hashers = [hashlib.sha512(), hashlib.sha512()]
for ln in ps:
n = int(ln[:1])
v = ln.rsplit(b",", 1)[-1].strip()
hashers[n].update(v)
r = {}
for k, v in chans.items():
dg = hashers[int(k)].digest()[:12]
dg = base64.urlsafe_b64encode(dg).decode("ascii")
r[v[0].lower() + "hash"] = dg
print(json.dumps(r, indent=4))
def main():
try:
det()
except:
pass # mute
if __name__ == "__main__":
main()

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
"""
mtp test -- opens a texteditor
usage:
-vsrv/v1:v1:r:c,mte=+x1:c,mtp=x1=ad,p,bin/mtag/mousepad.py
explained:
c,mte: list of tags to index in this volume
c,mtp: add new tag provider
x1: dummy tag to provide
ad: dontcare if audio or not
p: priority 1 (run after initial tag-scan with ffprobe or mutagen)
"""
def main():
env = os.environ.copy()
env["DISPLAY"] = ":0.0"
if False:
# open the uploaded file
fp = sys.argv[-1]
else:
# display stdin contents (`oth_tags`)
fp = "/dev/stdin"
p = sp.Popen(["/usr/bin/mousepad", fp])
p.communicate()
main()

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env python
import json
import os
import subprocess as sp
import sys
import time
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
_ = r"""
first checks the tag "vidchk" which must be "ok" to continue,
then uploads all files to some cloud storage (RCLONE_REMOTE)
and DELETES THE ORIGINAL FILES if rclone returns 0 ("success")
deps:
rclone
usage:
-mtp x2=t43200,ay,p2,bin/mtag/rclone-upload.py
explained:
t43200: timeout 12h
ay: only process files which contain audio (including video with audio)
p2: set priority 2 (after vidchk's suggested priority of 1),
so the output of vidchk will be passed in here
complete usage example as vflags along with vidchk:
-vsrv/vidchk:vidchk:r:rw,ed:c,e2dsa,e2ts,mtp=vidchk=t600,p,bin/mtag/vidchk.py:c,mtp=rupload=t43200,ay,p2,bin/mtag/rclone-upload.py:c,mte=+vidchk,rupload
setup: see https://rclone.org/drive/
if you wanna use this script standalone / separately from copyparty,
either set CONDITIONAL_UPLOAD False or provide the following stdin:
{"vidchk":"ok"}
"""
RCLONE_REMOTE = "notmybox"
CONDITIONAL_UPLOAD = True
def main():
fp = sys.argv[1]
if CONDITIONAL_UPLOAD:
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
chk = md.get("vidchk", None)
if chk != "ok":
print(f"vidchk={chk}", file=sys.stderr)
sys.exit(1)
dst = f"{RCLONE_REMOTE}:".encode("utf-8")
cmd = [b"rclone", b"copy", b"--", fsenc(fp), dst]
t0 = time.time()
try:
sp.check_call(cmd)
except:
print("rclone failed", file=sys.stderr)
sys.exit(1)
print(f"{time.time() - t0:.1f} sec")
os.unlink(fsenc(fp))
if __name__ == "__main__":
main()

View File

@@ -1,21 +0,0 @@
// ==UserScript==
// @name twitter-unmute
// @namespace http://ocv.me/
// @version 0.1
// @description memes
// @author ed <irc.rizon.net>
// @match https://twitter.com/*
// @icon https://www.google.com/s2/favicons?domain=twitter.com
// @grant GM_addStyle
// ==/UserScript==
function grunnur() {
setInterval(function () {
//document.querySelector('div[aria-label="Unmute"]').click();
document.querySelector('video').muted = false;
}, 200);
}
var scr = document.createElement('script');
scr.textContent = '(' + grunnur.toString() + ')();';
(document.head || document.getElementsByTagName('head')[0]).appendChild(scr);

View File

@@ -1,39 +0,0 @@
# example config file to use copyparty as a youtube manifest collector,
# use with copyparty like: python copyparty.py -c yt-ipr.conf
#
# see docs/example.conf for a better explanation of the syntax, but
# newlines are block separators, so adding blank lines inside a volume definition is bad
# (use comments as separators instead)
# create user ed, password wark
u ed:wark
# create a volume at /ytm which stores files at ./srv/ytm
./srv/ytm
/ytm
# write-only, but read-write for user ed
w
rw ed
# rescan the volume on startup
c e2dsa
# collect tags from all new files since last scan
c e2ts
# optionally enable compression to make the files 50% smaller
c pk
# only allow uploads which are between 16k and 1m large
c sz=16k-1m
# allow up to 10 uploads over 5 minutes from each ip
c maxn=10,300
# move uploads into subfolders: YEAR-MONTH / DAY-HOUR / <upload>
c rotf=%Y-%m/%d-%H
# delete uploads when they are 24 hours old
c lifetime=86400
# add the parser and tell copyparty what tags it can expect from it
c mtp=yt-id,yt-title,yt-author,yt-channel,yt-views,yt-private,yt-manifest,yt-expires=bin/mtag/yt-ipr.py
# decide which tags we want to index and in what order
c mte=yt-id,yt-title,yt-author,yt-channel,yt-views,yt-private,yt-manifest,yt-expires
# create any other volumes you'd like down here, or merge this with an existing config file

View File

@@ -1,47 +0,0 @@
// ==UserScript==
// @name youtube-playerdata-hub
// @match https://youtube.com/*
// @match https://*.youtube.com/*
// @version 1.0
// @grant GM_addStyle
// ==/UserScript==
function main() {
var server = 'https://127.0.0.1:3923/ytm?pw=wark',
interval = 60; // sec
var sent = {};
function send(txt, mf_url, desc) {
if (sent[mf_url])
return;
fetch(server + '&_=' + Date.now(), { method: "PUT", body: txt });
console.log('[yt-pdh] yeet %d bytes, %s', txt.length, desc);
sent[mf_url] = 1;
}
function collect() {
try {
var pd = document.querySelector('ytd-watch-flexy');
if (!pd)
return console.log('[yt-pdh] no video found');
pd = pd.playerData;
var mu = pd.streamingData.dashManifestUrl || pd.streamingData.hlsManifestUrl;
if (!mu || !mu.length)
return console.log('[yt-pdh] no manifest found');
var desc = pd.videoDetails.videoId + ', ' + pd.videoDetails.title;
send(JSON.stringify(pd), mu, desc);
}
catch (ex) {
console.log("[yt-pdh]", ex);
}
}
setInterval(collect, interval * 1000);
}
var scr = document.createElement('script');
scr.textContent = '(' + main.toString() + ')();';
(document.head || document.getElementsByTagName('head')[0]).appendChild(scr);
console.log('[yt-pdh] a');

View File

@@ -1,139 +0,0 @@
#!/usr/bin/env python3
"""
use copyparty as a chromecast replacement:
* post a URL and it will open in the default browser
* upload a file and it will open in the default application
* the `key` command simulates keyboard input
* the `x` command executes other xdotool commands
* the `c` command executes arbitrary unix commands
the android app makes it a breeze to post pics and links:
https://github.com/9001/party-up/releases
(iOS devices have to rely on the web-UI)
goes without saying, but this is HELLA DANGEROUS,
GIVES RCE TO ANYONE WHO HAVE UPLOAD PERMISSIONS
example copyparty config to use this:
--urlform save,get -v.::w:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,kn,c0,bin/mtag/very-bad-idea.py
recommended deps:
apt install xdotool libnotify-bin
https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/meadup.js
and you probably want `twitter-unmute.user.js` from the res folder
-----------------------------------------------------------------------
-- startup script:
-----------------------------------------------------------------------
#!/bin/bash
set -e
# create qr code
ip=$(ip r | awk '/^default/{print$(NF-2)}'); echo http://$ip:3923/ | qrencode -o - -s 4 >/dev/shm/cpp-qr.png
/usr/bin/feh -x /dev/shm/cpp-qr.png &
# reposition and make topmost (with janky raspbian support)
( sleep 0.5
xdotool search --name cpp-qr.png windowactivate --sync windowmove 1780 0
wmctrl -r :ACTIVE: -b toggle,above || true
ps aux | grep -E 'sleep[ ]7\.27' ||
while true; do
w=$(xdotool getactivewindow)
xdotool search --name cpp-qr.png windowactivate windowraise windowfocus
xdotool windowactivate $w
xdotool windowfocus $w
sleep 7.27 || break
done &
xeyes # distraction window to prevent ^w from closing the qr-code
) &
# bail if copyparty is already running
ps aux | grep -E '[3] copy[p]arty' && exit 0
# dumb chrome wrapper to allow autoplay
cat >/usr/local/bin/chromium-browser <<'EOF'
#!/bin/bash
set -e
/usr/bin/chromium-browser --autoplay-policy=no-user-gesture-required "$@"
EOF
chmod 755 /usr/local/bin/chromium-browser
# start the server (note: replace `-v.::rw:` with `-v.::w:` to disallow retrieving uploaded stuff)
cd ~/Downloads; python3 copyparty-sfx.py --urlform save,get -v.::rw:c,e2d,e2t,mte=+a1:c,mtp=a1=ad,kn,very-bad-idea.py
"""
import os
import sys
import time
import subprocess as sp
from urllib.parse import unquote_to_bytes as unquote
def main():
fp = os.path.abspath(sys.argv[1])
with open(fp, "rb") as f:
txt = f.read(4096)
if txt.startswith(b"msg="):
open_post(txt)
else:
open_url(fp)
def open_post(txt):
txt = unquote(txt.replace(b"+", b" ")).decode("utf-8")[4:]
try:
k, v = txt.split(" ", 1)
except:
open_url(txt)
if k == "key":
sp.call(["xdotool", "key"] + v.split(" "))
elif k == "x":
sp.call(["xdotool"] + v.split(" "))
elif k == "c":
env = os.environ.copy()
while " " in v:
v1, v2 = v.split(" ", 1)
if "=" not in v1:
break
ek, ev = v1.split("=", 1)
env[ek] = ev
v = v2
sp.call(v.split(" "), env=env)
else:
open_url(txt)
def open_url(txt):
ext = txt.rsplit(".")[-1].lower()
sp.call(["notify-send", "--", txt])
if ext not in ["jpg", "jpeg", "png", "gif", "webp"]:
# sp.call(["wmctrl", "-c", ":ACTIVE:"]) # closes the active window correctly
sp.call(["killall", "vlc"])
sp.call(["killall", "mpv"])
sp.call(["killall", "feh"])
time.sleep(0.5)
for _ in range(20):
sp.call(["xdotool", "key", "ctrl+w"]) # closes the open tab correctly
# else:
# sp.call(["xdotool", "getactivewindow", "windowminimize"]) # minimizes the focused windo
# close any error messages:
sp.call(["xdotool", "search", "--name", "Error", "windowclose"])
# sp.call(["xdotool", "key", "ctrl+alt+d"]) # doesnt work at all
# sp.call(["xdotool", "keydown", "--delay", "100", "ctrl+alt+d"])
# sp.call(["xdotool", "keyup", "ctrl+alt+d"])
sp.call(["xdg-open", txt])
main()

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env python3
import json
import re
import os
import sys
import subprocess as sp
try:
from copyparty.util import fsenc
except:
def fsenc(p):
return p.encode("utf-8")
_ = r"""
inspects video files for errors and such
plus stores a bunch of metadata to filename.ff.json
usage:
-mtp vidchk=t600,ay,p,bin/mtag/vidchk.py
explained:
t600: timeout 10min
ay: only process files which contain audio (including video with audio)
p: set priority 1 (lowest priority after initial ffprobe/mutagen for base tags),
makes copyparty feed base tags into this script as json
if you wanna use this script standalone / separately from copyparty,
provide the video resolution on stdin as json: {"res":"1920x1080"}
"""
FAST = True # parse entire file at container level
# FAST = False # fully decode audio and video streams
# warnings to ignore
harmless = re.compile(
r"Unsupported codec with id |Could not find codec parameters.*Attachment:|analyzeduration"
+ r"|timescale not set"
)
def wfilter(lines):
return [x for x in lines if x.strip() and not harmless.search(x)]
def errchk(so, se, rc, dbg):
if dbg:
with open(dbg, "wb") as f:
f.write(b"so:\n" + so + b"\nse:\n" + se + b"\n")
if rc:
err = (so + se).decode("utf-8", "replace").split("\n", 1)
err = wfilter(err) or err
return f"ERROR {rc}: {err[0]}"
if se:
err = se.decode("utf-8", "replace").split("\n", 1)
err = wfilter(err)
if err:
return f"Warning: {err[0]}"
return None
def main():
fp = sys.argv[1]
zb = sys.stdin.buffer.read()
zs = zb.decode("utf-8", "replace")
md = json.loads(zs)
fdir = os.path.dirname(os.path.realpath(fp))
flag = os.path.join(fdir, ".processed")
if os.path.exists(flag):
return "already processed"
try:
w, h = [int(x) for x in md["res"].split("x")]
if not w + h:
raise Exception()
except:
return "could not determine resolution"
# grab streams/format metadata + 2 seconds of frames at the start and end
zs = "ffprobe -hide_banner -v warning -of json -show_streams -show_format -show_packets -show_data_hash crc32 -read_intervals %+2,999999%+2"
cmd = zs.encode("ascii").split(b" ") + [fsenc(fp)]
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
so, se = p.communicate()
# spaces to tabs, drops filesize from 69k to 48k
so = b"\n".join(
[
b"\t" * int((len(x) - len(x.lstrip())) / 4) + x.lstrip()
for x in (so or b"").split(b"\n")
]
)
with open(fsenc(f"{fp}.ff.json"), "wb") as f:
f.write(so)
err = errchk(so, se, p.returncode, f"{fp}.vidchk")
if err:
return err
if max(w, h) < 1280 and min(w, h) < 720:
return "resolution too small"
zs = (
"ffmpeg -y -hide_banner -nostdin -v warning"
+ " -err_detect +crccheck+bitstream+buffer+careful+compliant+aggressive+explode"
+ " -xerror -i"
)
cmd = zs.encode("ascii").split(b" ") + [fsenc(fp)]
if FAST:
zs = "-c copy -f null -"
else:
zs = "-vcodec rawvideo -acodec pcm_s16le -f null -"
cmd += zs.encode("ascii").split(b" ")
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
so, se = p.communicate()
return errchk(so, se, p.returncode, f"{fp}.vidchk")
if __name__ == "__main__":
print(main() or "ok")

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env python3
"""
use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the
message/pager function on the website)
example copyparty config to use this:
--urlform save,get -vsrv/wget:wget:rwmd,ed:c,e2ts,mtp=title=ebin,t300,ad,bin/mtag/wget.py
explained:
for realpath srv/wget (served at /wget) with read-write-modify-delete for ed,
enable file analysis on upload (e2ts),
use mtp plugin "bin/mtag/wget.py" to provide metadata tag "title",
do this on all uploads with the file extension "bin",
t300 = 300 seconds timeout for each dwonload,
ad = parse file regardless if FFmpeg thinks it is audio or not
PS: this requires e2ts to be functional,
meaning you need to do at least one of these:
* apt install ffmpeg
* pip3 install mutagen
"""
import os
import sys
import subprocess as sp
from urllib.parse import unquote_to_bytes as unquote
def main():
fp = os.path.abspath(sys.argv[1])
fdir = os.path.dirname(fp)
fname = os.path.basename(fp)
if not fname.startswith("put-") or not fname.endswith(".bin"):
raise Exception("not a post file")
buf = b""
with open(fp, "rb") as f:
while True:
b = f.read(4096)
buf += b
if len(buf) > 4096:
raise Exception("too big")
if not b:
break
if not buf:
raise Exception("file is empty")
buf = unquote(buf.replace(b"+", b" "))
url = buf.decode("utf-8")
if not url.startswith("msg="):
raise Exception("does not start with msg=")
url = url[4:]
if "://" not in url:
url = "https://" + url
os.chdir(fdir)
name = url.split("?")[0].split("/")[-1]
tfn = "-- DOWNLOADING " + name
open(tfn, "wb").close()
cmd = ["wget", "--trust-server-names", "--", url]
try:
sp.check_call(cmd)
# OPTIONAL:
# on success, delete the .bin file which contains the URL
os.unlink(fp)
except:
open("-- FAILED TO DONWLOAD " + name, "wb").close()
os.unlink(tfn)
print(url)
if __name__ == "__main__":
main()

View File

@@ -1,198 +0,0 @@
#!/usr/bin/env python
import re
import os
import sys
import gzip
import json
import base64
import string
import urllib.request
from datetime import datetime
"""
youtube initial player response
it's probably best to use this through a config file; see res/yt-ipr.conf
but if you want to use plain arguments instead then:
-v srv/ytm:ytm:w:rw,ed
:c,e2ts,e2dsa
:c,sz=16k-1m:c,maxn=10,300:c,rotf=%Y-%m/%d-%H
:c,mtp=yt-id,yt-title,yt-author,yt-channel,yt-views,yt-private,yt-manifest,yt-expires=bin/mtag/yt-ipr.py
:c,mte=yt-id,yt-title,yt-author,yt-channel,yt-views,yt-private,yt-manifest,yt-expires
see res/yt-ipr.user.js for the example userscript to go with this
"""
def main():
try:
with gzip.open(sys.argv[1], "rt", encoding="utf-8", errors="replace") as f:
txt = f.read()
except:
with open(sys.argv[1], "r", encoding="utf-8", errors="replace") as f:
txt = f.read()
txt = "{" + txt.split("{", 1)[1]
try:
pd = json.loads(txt)
except json.decoder.JSONDecodeError as ex:
pd = json.loads(txt[: ex.pos])
# print(json.dumps(pd, indent=2))
if "videoDetails" in pd:
parse_youtube(pd)
else:
parse_freg(pd)
def get_expiration(url):
et = re.search(r"[?&]expire=([0-9]+)", url).group(1)
et = datetime.utcfromtimestamp(int(et))
return et.strftime("%Y-%m-%d, %H:%M")
def parse_youtube(pd):
vd = pd["videoDetails"]
sd = pd["streamingData"]
et = sd["adaptiveFormats"][0]["url"]
et = get_expiration(et)
mf = []
if "dashManifestUrl" in sd:
mf.append("dash")
if "hlsManifestUrl" in sd:
mf.append("hls")
r = {
"yt-id": vd["videoId"],
"yt-title": vd["title"],
"yt-author": vd["author"],
"yt-channel": vd["channelId"],
"yt-views": vd["viewCount"],
"yt-private": vd["isPrivate"],
# "yt-expires": sd["expiresInSeconds"],
"yt-manifest": ",".join(mf),
"yt-expires": et,
}
print(json.dumps(r))
freg_conv(pd)
def parse_freg(pd):
md = pd["metadata"]
r = {
"yt-id": md["id"],
"yt-title": md["title"],
"yt-author": md["channelName"],
"yt-channel": md["channelURL"].strip("/").split("/")[-1],
"yt-expires": get_expiration(list(pd["video"].values())[0]),
}
print(json.dumps(r))
def freg_conv(pd):
# based on getURLs.js v1.5 (2021-08-07)
# fmt: off
priority = {
"video": [
337, 315, 266, 138, # 2160p60
313, 336, # 2160p
308, # 1440p60
271, 264, # 1440p
335, 303, 299, # 1080p60
248, 169, 137, # 1080p
334, 302, 298, # 720p60
247, 136 # 720p
],
"audio": [
251, 141, 171, 140, 250, 249, 139
]
}
vid_id = pd["videoDetails"]["videoId"]
chan_id = pd["videoDetails"]["channelId"]
try:
thumb_url = pd["microformat"]["playerMicroformatRenderer"]["thumbnail"]["thumbnails"][0]["url"]
start_ts = pd["microformat"]["playerMicroformatRenderer"]["liveBroadcastDetails"]["startTimestamp"]
except:
thumb_url = f"https://img.youtube.com/vi/{vid_id}/maxresdefault.jpg"
start_ts = ""
# fmt: on
metadata = {
"title": pd["videoDetails"]["title"],
"id": vid_id,
"channelName": pd["videoDetails"]["author"],
"channelURL": "https://www.youtube.com/channel/" + chan_id,
"description": pd["videoDetails"]["shortDescription"],
"thumbnailUrl": thumb_url,
"startTimestamp": start_ts,
}
if [x for x in vid_id if x not in string.ascii_letters + string.digits + "_-"]:
print(f"malicious json", file=sys.stderr)
return
basepath = os.path.dirname(sys.argv[1])
thumb_fn = f"{basepath}/{vid_id}.jpg"
tmp_fn = f"{thumb_fn}.{os.getpid()}"
if not os.path.exists(thumb_fn) and (
thumb_url.startswith("https://img.youtube.com/vi/")
or thumb_url.startswith("https://i.ytimg.com/vi/")
):
try:
with urllib.request.urlopen(thumb_url) as fi:
with open(tmp_fn, "wb") as fo:
fo.write(fi.read())
os.rename(tmp_fn, thumb_fn)
except:
if os.path.exists(tmp_fn):
os.unlink(tmp_fn)
try:
with open(thumb_fn, "rb") as f:
thumb = base64.b64encode(f.read()).decode("ascii")
except:
thumb = "/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/yQALCAABAAEBAREA/8wABgAQEAX/2gAIAQEAAD8A0s8g/9k="
metadata["thumbnail"] = "data:image/jpeg;base64," + thumb
ret = {
"metadata": metadata,
"version": "1.5",
"createTime": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),
}
for stream, itags in priority.items():
for itag in itags:
url = None
for afmt in pd["streamingData"]["adaptiveFormats"]:
if itag == afmt["itag"]:
url = afmt["url"]
break
if url:
ret[stream] = {itag: url}
break
fn = f"{basepath}/{vid_id}.urls.json"
with open(fn, "w", encoding="utf-8", errors="replace") as f:
f.write(json.dumps(ret, indent=4))
if __name__ == "__main__":
try:
main()
except:
# raise
pass

View File

@@ -1,177 +0,0 @@
#!/usr/bin/env python3
"""
partyjournal.py: chronological history of uploads
2021-12-31, v0.1, ed <irc.rizon.net>, MIT-Licensed
https://github.com/9001/copyparty/blob/hovudstraum/bin/partyjournal.py
produces a chronological list of all uploads,
by collecting info from up2k databases and the filesystem
specify subnet `192.168.1.*` with argument `.=192.168.1.`,
affecting all successive mappings
usage:
./partyjournal.py > partyjournal.html .=192.168.1. cart=125 steen=114 steen=131 sleepy=121 fscarlet=144 ed=101 ed=123
"""
import sys
import base64
import sqlite3
import argparse
from datetime import datetime
from urllib.parse import quote_from_bytes as quote
from urllib.parse import unquote_to_bytes as unquote
FS_ENCODING = sys.getfilesystemencoding()
class APF(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
##
## snibbed from copyparty
def s3dec(v):
if not v.startswith("//"):
return v
v = base64.urlsafe_b64decode(v.encode("ascii")[2:])
return v.decode(FS_ENCODING, "replace")
def quotep(txt):
btxt = txt.encode("utf-8", "replace")
quot1 = quote(btxt, safe=b"/")
quot1 = quot1.encode("ascii")
quot2 = quot1.replace(b" ", b"+")
return quot2.decode("utf-8", "replace")
def html_escape(s, quote=False, crlf=False):
"""html.escape but also newlines"""
s = s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
if quote:
s = s.replace('"', "&quot;").replace("'", "&#x27;")
if crlf:
s = s.replace("\r", "&#13;").replace("\n", "&#10;")
return s
## end snibs
##
def main():
ap = argparse.ArgumentParser(formatter_class=APF)
ap.add_argument("who", nargs="*")
ar = ap.parse_args()
imap = {}
subnet = ""
for v in ar.who:
if "=" not in v:
raise Exception("bad who: " + v)
k, v = v.split("=")
if k == ".":
subnet = v
continue
imap["{}{}".format(subnet, v)] = k
print(repr(imap), file=sys.stderr)
print(
"""\
<!DOCTYPE html>
<html lang="en">
<head><meta charset="utf-8"><style>
html, body {
color: #ccc;
background: #222;
font-family: sans-serif;
}
a {
color: #fc5;
}
td, th {
padding: .2em .5em;
border: 1px solid #999;
border-width: 0 1px 1px 0;
white-space: nowrap;
}
td:nth-child(1),
td:nth-child(2),
td:nth-child(3) {
font-family: monospace, monospace;
text-align: right;
}
tr:first-child {
position: sticky;
top: -1px;
}
th {
background: #222;
text-align: left;
}
</style></head><body><table><tr>
<th>wark</th>
<th>time</th>
<th>size</th>
<th>who</th>
<th>link</th>
</tr>"""
)
db_path = ".hist/up2k.db"
conn = sqlite3.connect(db_path)
q = r"pragma table_info(up)"
inf = conn.execute(q).fetchall()
cols = [x[1] for x in inf]
print("<!-- " + str(cols) + " -->")
# ['w', 'mt', 'sz', 'rd', 'fn', 'ip', 'at']
q = r"select * from up order by case when at > 0 then at else mt end"
for w, mt, sz, rd, fn, ip, at in conn.execute(q):
link = "/".join([s3dec(x) for x in [rd, fn] if x])
if fn.startswith("put-") and sz < 4096:
try:
with open(link, "rb") as f:
txt = f.read().decode("utf-8", "replace")
except:
continue
if txt.startswith("msg="):
txt = txt.encode("utf-8", "replace")
txt = unquote(txt.replace(b"+", b" "))
link = txt.decode("utf-8")[4:]
sz = "{:,}".format(sz)
v = [
w[:16],
datetime.utcfromtimestamp(at if at > 0 else mt).strftime(
"%Y-%m-%d %H:%M:%S"
),
sz,
imap.get(ip, ip),
]
row = "<tr>\n "
row += "\n ".join(["<td>{}</th>".format(x) for x in v])
row += '\n <td><a href="{}">{}</a></td>'.format(link, html_escape(link))
row += "\n</tr>"
print(row)
print("</table></body></html>")
if __name__ == "__main__":
main()

View File

@@ -1,128 +0,0 @@
#!/bin/bash
set -e
# runs copyparty (or any other program really) in a chroot
#
# assumption: these directories, and everything within, are owned by root
sysdirs=( /bin /lib /lib32 /lib64 /sbin /usr )
# error-handler
help() { cat <<'EOF'
usage:
./prisonparty.sh <ROOTDIR> <UID> <GID> [VOLDIR [VOLDIR...]] -- python3 copyparty-sfx.py [...]
example:
./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 copyparty-sfx.py -v /mnt/nas/music::rwmd
example for running straight from source (instead of using an sfx):
PYTHONPATH=$PWD ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 -um copyparty -v /mnt/nas/music::rwmd
note that if you have python modules installed as --user (such as bpm/key detectors),
you should add /home/foo/.local as a VOLDIR
EOF
exit 1
}
# read arguments
trap help EXIT
jail="$(realpath "$1")"; shift
uid="$1"; shift
gid="$1"; shift
vols=()
while true; do
v="$1"; shift
[ "$v" = -- ] && break # end of volumes
[ "$#" -eq 0 ] && break # invalid usage
vols+=( "$(realpath "$v")" )
done
pybin="$1"; shift
pybin="$(command -v "$pybin")"
pyarg=
while true; do
v="$1"
[ "${v:0:1}" = - ] || break
pyarg="$pyarg $v"
shift
done
cpp="$1"; shift
[ -d "$cpp" ] && cppdir="$PWD" || {
# sfx, not module
cpp="$(realpath "$cpp")"
cppdir="$(dirname "$cpp")"
}
trap - EXIT
# debug/vis
echo
echo "chroot-dir = $jail"
echo "user:group = $uid:$gid"
echo " copyparty = $cpp"
echo
printf '\033[33m%s\033[0m\n' "copyparty can access these folders and all their subdirectories:"
for v in "${vols[@]}"; do
printf '\033[36m ├─\033[0m %s \033[36m ── added by (You)\033[0m\n' "$v"
done
printf '\033[36m ├─\033[0m %s \033[36m ── where the copyparty binary is\033[0m\n' "$cppdir"
printf '\033[36m ╰─\033[0m %s \033[36m ── the folder you are currently in\033[0m\n' "$PWD"
vols+=("$cppdir" "$PWD")
echo
# remove any trailing slashes
jail="${jail%/}"
# bind-mount system directories and volumes
printf '%s\n' "${sysdirs[@]}" "${vols[@]}" | sed -r 's`/$``' | LC_ALL=C sort | uniq |
while IFS= read -r v; do
[ -e "$v" ] || {
# printf '\033[1;31mfolder does not exist:\033[0m %s\n' "/$v"
continue
}
i1=$(stat -c%D.%i "$v" 2>/dev/null || echo a)
i2=$(stat -c%D.%i "$jail$v" 2>/dev/null || echo b)
# echo "v [$v] i1 [$i1] i2 [$i2]"
[ $i1 = $i2 ] && continue
mkdir -p "$jail$v"
mount --bind "$v" "$jail$v"
done
cln() {
rv=$?
# cleanup if not in use
lsof "$jail" | grep -qF "$jail" &&
echo "chroot is in use, will not cleanup" ||
{
mount | grep -F " on $jail" |
awk '{sub(/ type .*/,"");sub(/.* on /,"");print}' |
LC_ALL=C sort -r | tee /dev/stderr | tr '\n' '\0' | xargs -r0 umount
}
exit $rv
}
trap cln EXIT
# create a tmp
mkdir -p "$jail/tmp"
chmod 777 "$jail/tmp"
# run copyparty
export HOME=$(getent passwd $uid | cut -d: -f6)
export USER=$(getent passwd $uid | cut -d: -f1)
export LOGNAME="$USER"
#echo "pybin [$pybin]"
#echo "pyarg [$pyarg]"
#echo "cpp [$cpp]"
chroot --userspec=$uid:$gid "$jail" "$pybin" $pyarg "$cpp" "$@" &
p=$!
trap 'kill $p' INT TERM
wait

View File

@@ -1,99 +0,0 @@
#!/usr/bin/env python3
"""
unforget.py: rebuild db from logfiles
2022-09-07, v0.1, ed <irc.rizon.net>, MIT-Licensed
https://github.com/9001/copyparty/blob/hovudstraum/bin/unforget.py
only makes sense if running copyparty with --no-forget
(e.g. immediately shifting uploads to other storage)
usage:
xz -d < log | ./unforget.py .hist/up2k.db
"""
import re
import sys
import json
import base64
import sqlite3
import argparse
FS_ENCODING = sys.getfilesystemencoding()
class APF(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
mem_cur = sqlite3.connect(":memory:").cursor()
mem_cur.execute(r"create table a (b text)")
def s3enc(rd: str, fn: str) -> tuple[str, str]:
ret: list[str] = []
for v in [rd, fn]:
try:
mem_cur.execute("select * from a where b = ?", (v,))
ret.append(v)
except:
wtf8 = v.encode(FS_ENCODING, "surrogateescape")
ret.append("//" + base64.urlsafe_b64encode(wtf8).decode("ascii"))
return ret[0], ret[1]
def main():
ap = argparse.ArgumentParser()
ap.add_argument("db")
ar = ap.parse_args()
db = sqlite3.connect(ar.db).cursor()
ptn_times = re.compile(r"no more chunks, setting times \(([0-9]+)")
at = 0
ctr = 0
for ln in [x.decode("utf-8", "replace").rstrip() for x in sys.stdin.buffer]:
if "no more chunks, setting times (" in ln:
m = ptn_times.search(ln)
if m:
at = int(m.group(1))
if '"hash": []' in ln:
try:
ofs = ln.find("{")
j = json.loads(ln[ofs:])
except:
pass
w = j["wark"]
if db.execute("select w from up where w = ?", (w,)).fetchone():
continue
# PYTHONPATH=/home/ed/dev/copyparty/ python3 -m copyparty -e2dsa -v foo:foo:rwmd,ed -aed:wark --no-forget
# 05:34:43.845 127.0.0.1 42496 no more chunks, setting times (1662528883, 1658001882)
# 05:34:43.863 127.0.0.1 42496 {"name": "f\"2", "purl": "/foo/bar/baz/", "size": 1674, "lmod": 1658001882, "sprs": true, "hash": [], "wark": "LKIWpp2jEAh9dH3fu-DobuURFGEKlODXDGTpZ1otMhUg"}
# | w | mt | sz | rd | fn | ip | at |
# | LKIWpp2jEAh9dH3fu-DobuURFGEKlODXDGTpZ1otMhUg | 1658001882 | 1674 | bar/baz | f"2 | 127.0.0.1 | 1662528883 |
rd, fn = s3enc(j["purl"].strip("/"), j["name"])
ip = ln.split(" ")[1].split("m")[-1]
q = "insert into up values (?,?,?,?,?,?,?)"
v = (w, int(j["lmod"]), int(j["size"]), rd, fn, ip, at)
db.execute(q, v)
ctr += 1
if ctr % 1024 == 1023:
print(f"{ctr} commit...")
db.connection.commit()
if ctr:
db.connection.commit()
print(f"unforgot {ctr} files")
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

24
bin/up2k.sh Normal file → Executable file
View File

@@ -8,7 +8,7 @@ set -e
## ##
## config ## config
datalen=$((128*1024*1024)) datalen=$((2*1024*1024*1024))
target=127.0.0.1 target=127.0.0.1
posturl=/inc posturl=/inc
passwd=wark passwd=wark
@@ -37,10 +37,10 @@ gendata() {
# pipe a chunk, get the base64 checksum # pipe a chunk, get the base64 checksum
gethash() { gethash() {
printf $( printf $(
sha512sum | cut -c-66 | sha512sum | cut -c-64 |
sed -r 's/ .*//;s/(..)/\\x\1/g' sed -r 's/ .*//;s/(..)/\\x\1/g'
) | ) |
base64 -w0 | cut -c-44 | base64 -w0 | cut -c-43 |
tr '+/' '-_' tr '+/' '-_'
} }
@@ -123,7 +123,7 @@ printf '\033[36m'
{ {
{ {
cat <<EOF cat <<EOF
POST $posturl/ HTTP/1.1 POST $posturl/handshake.php HTTP/1.1
Connection: Close Connection: Close
Cookie: cppwd=$passwd Cookie: cppwd=$passwd
Content-Type: text/plain;charset=UTF-8 Content-Type: text/plain;charset=UTF-8
@@ -145,16 +145,14 @@ printf '\033[0m\nwark: %s\n' $wark
## ##
## wait for signal to continue ## wait for signal to continue
true || { w8=/dev/shm/$salt.w8
w8=/dev/shm/$salt.w8 touch $w8
touch $w8
echo "ready; rm -f $w8" echo "ready; rm -f $w8"
while [ -e $w8 ]; do while [ -e $w8 ]; do
sleep 0.2 sleep 0.2
done done
}
## ##
@@ -177,7 +175,7 @@ while [ $remains -gt 0 ]; do
{ {
cat <<EOF cat <<EOF
POST $posturl/ HTTP/1.1 POST $posturl/chunkpit.php HTTP/1.1
Connection: Keep-Alive Connection: Keep-Alive
Cookie: cppwd=$passwd Cookie: cppwd=$passwd
Content-Type: application/octet-stream Content-Type: application/octet-stream

View File

@@ -1,6 +1,3 @@
### [`plugins/`](plugins/)
* example extensions
### [`copyparty.bat`](copyparty.bat) ### [`copyparty.bat`](copyparty.bat)
* launches copyparty with no arguments (anon read+write within same folder) * launches copyparty with no arguments (anon read+write within same folder)
* intended for windows machines with no python.exe in PATH * intended for windows machines with no python.exe in PATH
@@ -22,29 +19,17 @@ however if your copyparty is behind a reverse-proxy, you may want to use [`share
* `URL`: full URL to the root folder (with trailing slash) followed by `$regex:1|1$` * `URL`: full URL to the root folder (with trailing slash) followed by `$regex:1|1$`
* `pw`: password (remove `Parameters` if anon-write) * `pw`: password (remove `Parameters` if anon-write)
### [`media-osd-bgone.ps1`](media-osd-bgone.ps1)
* disables the [windows OSD popup](https://user-images.githubusercontent.com/241032/122821375-0e08df80-d2dd-11eb-9fd9-184e8aacf1d0.png) (the thing on the left) which appears every time you hit media hotkeys to adjust volume or change song while playing music with the copyparty web-ui, or most other audio players really
### [`explorer-nothumbs-nofoldertypes.reg`](explorer-nothumbs-nofoldertypes.reg) ### [`explorer-nothumbs-nofoldertypes.reg`](explorer-nothumbs-nofoldertypes.reg)
* disables thumbnails and folder-type detection in windows explorer * disables thumbnails and folder-type detection in windows explorer
* makes it way faster (especially for slow/networked locations (such as partyfuse)) * makes it way faster (especially for slow/networked locations (such as copyparty-fuse))
### [`webdav-basicauth.reg`](webdav-basicauth.reg)
* enables webdav basic-auth over plaintext http; takes effect after a reboot OR after running `webdav-unlimit.bat`
### [`webdav-unlimit.bat`](webdav-unlimit.bat)
* removes the 47.6 MiB filesize limit when downloading from webdav
### [`cfssl.sh`](cfssl.sh) ### [`cfssl.sh`](cfssl.sh)
* creates CA and server certificates using cfssl * creates CA and server certificates using cfssl
* give a 3rd argument to install it to your copyparty config * give a 3rd argument to install it to your copyparty config
* systemd service at [`systemd/cfssl.service`](systemd/cfssl.service)
# OS integration # OS integration
init-scripts to start copyparty as a service init-scripts to start copyparty as a service
* [`systemd/copyparty.service`](systemd/copyparty.service) runs the sfx normally * [`systemd/copyparty.service`](systemd/copyparty.service)
* [`rc/copyparty`](rc/copyparty) runs sfx normally on freebsd, create a `copyparty` user
* [`systemd/prisonparty.service`](systemd/prisonparty.service) runs the sfx in a chroot
* [`openrc/copyparty`](openrc/copyparty) * [`openrc/copyparty`](openrc/copyparty)
# Reverse-proxy # Reverse-proxy

View File

@@ -1,15 +0,0 @@
# when running copyparty behind a reverse proxy,
# the following arguments are recommended:
#
# --http-only lower latency on initial connection
# -i 127.0.0.1 only accept connections from nginx
#
# if you are doing location-based proxying (such as `/stuff` below)
# you must run copyparty with --rp-loc=stuff
#
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
LoadModule proxy_module modules/mod_proxy.so
ProxyPass "/stuff" "http://127.0.0.1:3923/stuff"
# do not specify ProxyPassReverse
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}

View File

@@ -1,14 +1,13 @@
#!/bin/bash #!/bin/bash
set -e set -e
# ca-name and server-fqdn # ca-name and server-name
ca_name="$1" ca_name="$1"
srv_fqdn="$2" srv_name="$2"
[ -z "$srv_fqdn" ] && { [ -z "$srv_name" ] && {
echo "need arg 1: ca name" echo "need arg 1: ca name"
echo "need arg 2: server fqdn and/or IPs, comma-separated" echo "need arg 2: server name"
echo "optional arg 3: if set, write cert into copyparty cfg"
exit 1 exit 1
} }
@@ -32,15 +31,15 @@ EOF
gen_srv() { gen_srv() {
(tee /dev/stderr <<EOF (tee /dev/stderr <<EOF
{"key": {"algo":"rsa", "size":4096}, {"key": {"algo":"rsa", "size":4096},
"names": [{"O":"$ca_name - $srv_fqdn"}]} "names": [{"O":"$ca_name - $srv_name"}]}
EOF EOF
)| )|
cfssl gencert -ca ca.pem -ca-key ca.key \ cfssl gencert -ca ca.pem -ca-key ca.key \
-profile=www -hostname="$srv_fqdn" - | -profile=www -hostname="$srv_name.$ca_name" - |
cfssljson -bare "$srv_fqdn" cfssljson -bare "$srv_name"
mv "$srv_fqdn-key.pem" "$srv_fqdn.key" mv "$srv_name-key.pem" "$srv_name.key"
rm "$srv_fqdn.csr" rm "$srv_name.csr"
} }
@@ -58,13 +57,13 @@ show() {
awk '!o; {o=0} /[0-9a-f:]{16}/{o=1}' awk '!o; {o=0} /[0-9a-f:]{16}/{o=1}'
} }
show ca.pem show ca.pem
show "$srv_fqdn.pem" show "$srv_name.pem"
# write cert into copyparty config # write cert into copyparty config
[ -z "$3" ] || { [ -z "$3" ] || {
mkdir -p ~/.config/copyparty mkdir -p ~/.config/copyparty
cat "$srv_fqdn".{key,pem} ca.pem >~/.config/copyparty/cert.pem cat "$srv_name".{key,pem} ca.pem >~/.config/copyparty/cert.pem
} }

View File

@@ -1,104 +0,0 @@
# media-osd-bgone.ps1: disable media-control OSD on win10do
# v1.1, 2021-06-25, ed <irc.rizon.net>, MIT-licensed
# https://github.com/9001/copyparty/blob/hovudstraum/contrib/media-osd-bgone.ps1
#
# locates the first window that looks like the media OSD and minimizes it;
# doing this once after each reboot should do the trick
# (adjust the width/height filter if it doesn't work)
#
# ---------------------------------------------------------------------
#
# tip: save the following as "media-osd-bgone.bat" next to this script:
# start cmd /c "powershell -command ""set-executionpolicy -scope process bypass; .\media-osd-bgone.ps1"" & ping -n 2 127.1 >nul"
#
# then create a shortcut to that bat-file and move the shortcut here:
# %appdata%\Microsoft\Windows\Start Menu\Programs\Startup
#
# and now this will autorun on bootup
Add-Type -TypeDefinition @"
using System;
using System.IO;
using System.Threading;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Windows.Forms;
namespace A {
public class B : Control {
[DllImport("user32.dll")]
static extern void keybd_event(byte bVk, byte bScan, uint dwFlags, int dwExtraInfo);
[DllImport("user32.dll", SetLastError = true)]
static extern IntPtr FindWindowEx(IntPtr hwndParent, IntPtr hwndChildAfter, string lpszClass, string lpszWindow);
[DllImport("user32.dll", SetLastError=true)]
static extern bool GetWindowRect(IntPtr hwnd, out RECT lpRect);
[DllImport("user32.dll")]
static extern bool ShowWindow(IntPtr hWnd, int nCmdShow);
[StructLayout(LayoutKind.Sequential)]
public struct RECT {
public int x;
public int y;
public int x2;
public int y2;
}
bool fa() {
RECT r;
IntPtr it = IntPtr.Zero;
while ((it = FindWindowEx(IntPtr.Zero, it, "NativeHWNDHost", "")) != IntPtr.Zero) {
if (FindWindowEx(it, IntPtr.Zero, "DirectUIHWND", "") == IntPtr.Zero)
continue;
if (!GetWindowRect(it, out r))
continue;
int w = r.x2 - r.x + 1;
int h = r.y2 - r.y + 1;
Console.WriteLine("[*] hwnd {0:x} @ {1}x{2} sz {3}x{4}", it, r.x, r.y, w, h);
if (h != 141)
continue;
ShowWindow(it, 6);
Console.WriteLine("[+] poof");
return true;
}
return false;
}
void fb() {
keybd_event((byte)Keys.VolumeMute, 0, 0, 0);
keybd_event((byte)Keys.VolumeMute, 0, 2, 0);
Thread.Sleep(500);
keybd_event((byte)Keys.VolumeMute, 0, 0, 0);
keybd_event((byte)Keys.VolumeMute, 0, 2, 0);
while (true) {
if (fa()) {
break;
}
Console.WriteLine("[!] not found");
Thread.Sleep(1000);
}
this.Invoke((MethodInvoker)delegate {
Application.Exit();
});
}
public void Run() {
Console.WriteLine("[+] hi");
new Thread(new ThreadStart(fb)).Start();
Application.Run();
Console.WriteLine("[+] bye");
}
}
}
"@ -ReferencedAssemblies System.Windows.Forms
(New-Object -TypeName A.B).Run()

View File

@@ -1,21 +1,11 @@
# when running copyparty behind a reverse proxy, # when running copyparty behind a reverse-proxy,
# the following arguments are recommended: # make sure that copyparty allows at least as many clients as the proxy does,
# # so run copyparty with -nc 512 if your nginx has the default limits
# --http-only lower latency on initial connection # (worker_processes 1, worker_connections 512)
# -i 127.0.0.1 only accept connections from nginx
#
# -nc must match or exceed the webserver's max number of concurrent clients;
# copyparty default is 1024 if OS permits it (see "max clients:" on startup),
# nginx default is 512 (worker_processes 1, worker_connections 512)
#
# you may also consider adding -j0 for CPU-intensive configurations
# (not that i can really think of any good examples)
#
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
upstream cpp { upstream cpp {
server 127.0.0.1:3923; server 127.0.0.1:3923;
keepalive 1; keepalive 120;
} }
server { server {
listen 443 ssl; listen 443 ssl;

View File

@@ -8,11 +8,11 @@
# #
# you may want to: # you may want to:
# change '/usr/bin/python' to another interpreter # change '/usr/bin/python' to another interpreter
# change '/mnt::rw' to another location or permission-set # change '/mnt::a' to another location or permission-set
name="$SVCNAME" name="$SVCNAME"
command_background=true command_background=true
pidfile="/var/run/$SVCNAME.pid" pidfile="/var/run/$SVCNAME.pid"
command="/usr/bin/python /usr/local/bin/copyparty-sfx.py" command="/usr/bin/python /usr/local/bin/copyparty-sfx.py"
command_args="-q -v /mnt::rw" command_args="-q -v /mnt::a"

View File

@@ -1,33 +0,0 @@
# example resource files
can be provided to copyparty to tweak things
## example `.epilogue.html`
save one of these as `.epilogue.html` inside a folder to customize it:
* [`minimal-up2k.html`](minimal-up2k.html) will [simplify the upload ui](https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png)
## example browser-js
point `--js-browser` to one of these by URL:
* [`minimal-up2k.js`](minimal-up2k.js) is similar to the above `minimal-up2k.html` except it applies globally to all write-only folders
* [`up2k-hooks.js`](up2k-hooks.js) lets you specify a ruleset for files to skip uploading
* [`up2k-hook-ytid.js`](up2k-hook-ytid.js) is a more specific example checking youtube-IDs against some API
## example browser-css
point `--css-browser` to one of these by URL:
* [`browser-icons.css`](browser-icons.css) adds filetype icons
## meadup.js
* turns copyparty into chromecast just more flexible (and probably way more buggy)
* usage: put the js somewhere in the webroot and `--js-browser /memes/meadup.js`

View File

@@ -1,71 +0,0 @@
/* video, alternative 1:
top-left icon, just like the other formats
=======================================================================
#ggrid>a:is(
[href$=".mkv"i],
[href$=".mp4"i],
[href$=".webm"i],
):before {
content: '📺';
}
*/
/* video, alternative 2:
play-icon in the middle of the thumbnail
=======================================================================
*/
#ggrid>a:is(
[href$=".mkv"i],
[href$=".mp4"i],
[href$=".webm"i],
) {
position: relative;
overflow: hidden;
}
#ggrid>a:is(
[href$=".mkv"i],
[href$=".mp4"i],
[href$=".webm"i],
):before {
content: '▶';
opacity: .8;
margin: 0;
padding: 1em .5em 1em .7em;
border-radius: 9em;
line-height: 0;
color: #fff;
text-shadow: none;
background: rgba(0, 0, 0, 0.7);
left: calc(50% - 1em);
top: calc(50% - 1.4em);
}
/* audio */
#ggrid>a:is(
[href$=".mp3"i],
[href$=".ogg"i],
[href$=".opus"i],
[href$=".flac"i],
[href$=".m4a"i],
[href$=".aac"i],
):before {
content: '🎵';
}
/* image */
#ggrid>a:is(
[href$=".jpg"i],
[href$=".jpeg"i],
[href$=".png"i],
[href$=".gif"i],
[href$=".webp"i],
):before {
content: '🎨';
}

View File

@@ -1,506 +0,0 @@
// USAGE:
// place this file somewhere in the webroot and then
// python3 -m copyparty --js-browser /memes/meadup.js
//
// FEATURES:
// * adds an onscreen keyboard for operating a media center remotely,
// relies on https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/very-bad-idea.py
// * adds an interactive anime girl (if you can find the dependencies)
var hambagas = [
"https://www.youtube.com/watch?v=pFA3KGp4GuU"
];
// keybaord,
// onscreen keyboard by @steinuil
function initKeybaord(BASE_URL, HAMBAGA, consoleLog, consoleError) {
document.querySelector('.keybaord-container').innerHTML = `
<div class="keybaord-body">
<div class="keybaord-row keybaord-row-1">
<div class="keybaord-key" data-keybaord-key="Escape">
esc
</div>
<div class="keybaord-key" data-keybaord-key="F1">
F1
</div>
<div class="keybaord-key" data-keybaord-key="F2">
F2
</div>
<div class="keybaord-key" data-keybaord-key="F3">
F3
</div>
<div class="keybaord-key" data-keybaord-key="F4">
F4
</div>
<div class="keybaord-key" data-keybaord-key="F5">
F5
</div>
<div class="keybaord-key" data-keybaord-key="F6">
F6
</div>
<div class="keybaord-key" data-keybaord-key="F7">
F7
</div>
<div class="keybaord-key" data-keybaord-key="F8">
F8
</div>
<div class="keybaord-key" data-keybaord-key="F9">
F9
</div>
<div class="keybaord-key" data-keybaord-key="F10">
F10
</div>
<div class="keybaord-key" data-keybaord-key="F11">
F11
</div>
<div class="keybaord-key" data-keybaord-key="F12">
F12
</div>
<div class="keybaord-key" data-keybaord-key="Insert">
ins
</div>
<div class="keybaord-key" data-keybaord-key="Delete">
del
</div>
</div>
<div class="keybaord-row keybaord-row-2">
<div class="keybaord-key" data-keybaord-key="\`">
\`
</div>
<div class="keybaord-key" data-keybaord-key="1">
1
</div>
<div class="keybaord-key" data-keybaord-key="2">
2
</div>
<div class="keybaord-key" data-keybaord-key="3">
3
</div>
<div class="keybaord-key" data-keybaord-key="4">
4
</div>
<div class="keybaord-key" data-keybaord-key="5">
5
</div>
<div class="keybaord-key" data-keybaord-key="6">
6
</div>
<div class="keybaord-key" data-keybaord-key="7">
7
</div>
<div class="keybaord-key" data-keybaord-key="8">
8
</div>
<div class="keybaord-key" data-keybaord-key="9">
9
</div>
<div class="keybaord-key" data-keybaord-key="0">
0
</div>
<div class="keybaord-key" data-keybaord-key="-">
-
</div>
<div class="keybaord-key" data-keybaord-key="=">
=
</div>
<div class="keybaord-key keybaord-backspace" data-keybaord-key="BackSpace">
backspace
</div>
</div>
<div class="keybaord-row keybaord-row-3">
<div class="keybaord-key keybaord-tab" data-keybaord-key="Tab">
tab
</div>
<div class="keybaord-key" data-keybaord-key="q">
q
</div>
<div class="keybaord-key" data-keybaord-key="w">
w
</div>
<div class="keybaord-key" data-keybaord-key="e">
e
</div>
<div class="keybaord-key" data-keybaord-key="r">
r
</div>
<div class="keybaord-key" data-keybaord-key="t">
t
</div>
<div class="keybaord-key" data-keybaord-key="y">
y
</div>
<div class="keybaord-key" data-keybaord-key="u">
u
</div>
<div class="keybaord-key" data-keybaord-key="i">
i
</div>
<div class="keybaord-key" data-keybaord-key="o">
o
</div>
<div class="keybaord-key" data-keybaord-key="p">
p
</div>
<div class="keybaord-key" data-keybaord-key="[">
[
</div>
<div class="keybaord-key" data-keybaord-key="]">
]
</div>
<div class="keybaord-key keybaord-enter" data-keybaord-key="Return">
enter
</div>
</div>
<div class="keybaord-row keybaord-row-4">
<div class="keybaord-key keybaord-capslock" data-keybaord-key="HAMBAGA">
🍔
</div>
<div class="keybaord-key" data-keybaord-key="a">
a
</div>
<div class="keybaord-key" data-keybaord-key="s">
s
</div>
<div class="keybaord-key" data-keybaord-key="d">
d
</div>
<div class="keybaord-key" data-keybaord-key="f">
f
</div>
<div class="keybaord-key" data-keybaord-key="g">
g
</div>
<div class="keybaord-key" data-keybaord-key="h">
h
</div>
<div class="keybaord-key" data-keybaord-key="j">
j
</div>
<div class="keybaord-key" data-keybaord-key="k">
k
</div>
<div class="keybaord-key" data-keybaord-key="l">
l
</div>
<div class="keybaord-key" data-keybaord-key=";">
;
</div>
<div class="keybaord-key" data-keybaord-key="'">
'
</div>
<div class="keybaord-key keybaord-backslash" data-keybaord-key="\\">
\\
</div>
</div>
<div class="keybaord-row keybaord-row-5">
<div class="keybaord-key keybaord-lshift" data-keybaord-key="Shift_L">
shift
</div>
<div class="keybaord-key" data-keybaord-key="\\">
\\
</div>
<div class="keybaord-key" data-keybaord-key="z">
z
</div>
<div class="keybaord-key" data-keybaord-key="x">
x
</div>
<div class="keybaord-key" data-keybaord-key="c">
c
</div>
<div class="keybaord-key" data-keybaord-key="v">
v
</div>
<div class="keybaord-key" data-keybaord-key="b">
b
</div>
<div class="keybaord-key" data-keybaord-key="n">
n
</div>
<div class="keybaord-key" data-keybaord-key="m">
m
</div>
<div class="keybaord-key" data-keybaord-key=",">
,
</div>
<div class="keybaord-key" data-keybaord-key=".">
.
</div>
<div class="keybaord-key" data-keybaord-key="/">
/
</div>
<div class="keybaord-key keybaord-rshift" data-keybaord-key="Shift_R">
shift
</div>
</div>
<div class="keybaord-row keybaord-row-6">
<div class="keybaord-key keybaord-lctrl" data-keybaord-key="Control_L">
ctrl
</div>
<div class="keybaord-key keybaord-super" data-keybaord-key="Meta_L">
win
</div>
<div class="keybaord-key keybaord-alt" data-keybaord-key="Alt_L">
alt
</div>
<div class="keybaord-key keybaord-spacebar" data-keybaord-key="space">
space
</div>
<div class="keybaord-key keybaord-altgr" data-keybaord-key="Alt_R">
altgr
</div>
<div class="keybaord-key keybaord-what" data-keybaord-key="Menu">
menu
</div>
<div class="keybaord-key keybaord-rctrl" data-keybaord-key="Control_R">
ctrl
</div>
</div>
<div class="keybaord-row">
<div class="keybaord-key" data-keybaord-key="XF86AudioLowerVolume">
🔉
</div>
<div class="keybaord-key" data-keybaord-key="XF86AudioRaiseVolume">
🔊
</div>
<div class="keybaord-key" data-keybaord-key="Left">
⬅️
</div>
<div class="keybaord-key" data-keybaord-key="Down">
⬇️
</div>
<div class="keybaord-key" data-keybaord-key="Up">
⬆️
</div>
<div class="keybaord-key" data-keybaord-key="Right">
➡️
</div>
<div class="keybaord-key" data-keybaord-key="Page_Up">
PgUp
</div>
<div class="keybaord-key" data-keybaord-key="Page_Down">
PgDn
</div>
<div class="keybaord-key" data-keybaord-key="Home">
🏠
</div>
<div class="keybaord-key" data-keybaord-key="End">
End
</div>
</div>
<div>
`;
function arraySample(array) {
return array[Math.floor(Math.random() * array.length)];
}
function sendMessage(msg) {
return fetch(BASE_URL, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
},
body: "msg=" + encodeURIComponent(msg),
}).then(
(r) => r.text(), // so the response body shows up in network tab
(err) => consoleError(err)
);
}
const MODIFIER_ON_CLASS = "keybaord-modifier-on";
const KEY_DATASET = "data-keybaord-key";
const KEY_CLASS = "keybaord-key";
const modifiers = new Set()
function toggleModifier(button, key) {
button.classList.toggle(MODIFIER_ON_CLASS);
if (modifiers.has(key)) {
modifiers.delete(key);
} else {
modifiers.add(key);
}
}
function popModifiers() {
let modifierString = "";
modifiers.forEach((mod) => {
document.querySelector("[" + KEY_DATASET + "='" + mod + "']")
.classList.remove(MODIFIER_ON_CLASS);
modifierString += mod + "+";
});
modifiers.clear();
return modifierString;
}
Array.from(document.querySelectorAll("." + KEY_CLASS)).forEach((button) => {
const key = button.dataset.keybaordKey;
button.addEventListener("click", (ev) => {
switch (key) {
case "HAMBAGA":
sendMessage(arraySample(HAMBAGA));
break;
case "Shift_L":
case "Shift_R":
case "Control_L":
case "Control_R":
case "Meta_L":
case "Alt_L":
case "Alt_R":
toggleModifier(button, key);
break;
default: {
const keyWithModifiers = popModifiers() + key;
consoleLog(keyWithModifiers);
sendMessage("key " + keyWithModifiers)
.then(() => consoleLog(keyWithModifiers + " OK"));
}
}
});
});
}
// keybaord integration
(function () {
var o = mknod('div');
clmod(o, 'keybaord-container', 1);
ebi('op_msg').appendChild(o);
o = mknod('style');
o.innerHTML = `
.keybaord-body {
display: flex;
flex-flow: column nowrap;
margin: .6em 0;
}
.keybaord-row {
display: flex;
}
.keybaord-key {
border: 1px solid rgba(128,128,128,0.2);
width: 41px;
height: 40px;
display: flex;
justify-content: center;
align-items: center;
}
.keybaord-key:active {
background-color: lightgrey;
}
.keybaord-key.keybaord-modifier-on {
background-color: lightblue;
}
.keybaord-key.keybaord-backspace {
width: 82px;
}
.keybaord-key.keybaord-tab {
width: 55px;
}
.keybaord-key.keybaord-enter {
width: 69px;
}
.keybaord-key.keybaord-capslock {
width: 80px;
}
.keybaord-key.keybaord-backslash {
width: 88px;
}
.keybaord-key.keybaord-lshift {
width: 65px;
}
.keybaord-key.keybaord-rshift {
width: 103px;
}
.keybaord-key.keybaord-lctrl {
width: 55px;
}
.keybaord-key.keybaord-super {
width: 55px;
}
.keybaord-key.keybaord-alt {
width: 55px;
}
.keybaord-key.keybaord-altgr {
width: 55px;
}
.keybaord-key.keybaord-what {
width: 55px;
}
.keybaord-key.keybaord-rctrl {
width: 55px;
}
.keybaord-key.keybaord-spacebar {
width: 302px;
}
`;
document.head.appendChild(o);
initKeybaord('/', hambagas,
(msg) => { toast.inf(2, msg.toString()) },
(msg) => { toast.err(30, msg.toString()) });
})();
// live2d (dumb pointless meme)
// dependencies for this part are not tracked in git
// so delete this section if you wanna use this file
// (or supply your own l2d model and js)
(function () {
var o = mknod('link');
o.setAttribute('rel', 'stylesheet');
o.setAttribute('href', "/bad-memes/pio.css");
document.head.appendChild(o);
o = mknod('style');
o.innerHTML = '.pio-container{text-shadow:none;z-index:1}';
document.head.appendChild(o);
o = mknod('div');
clmod(o, 'pio-container', 1);
o.innerHTML = '<div class="pio-action"></div><canvas id="pio" width="280" height="500"></canvas>';
document.body.appendChild(o);
var remaining = 3;
for (var a of ['pio', 'l2d', 'fireworks']) {
import_js(`/bad-memes/${a}.js`, function () {
if (remaining --> 1)
return;
o = mknod('script');
o.innerHTML = 'var pio = new Paul_Pio({"selector":[],"mode":"fixed","hidden":false,"content":{"close":"ok bye"},"model":["/bad-memes/sagiri/model.json"]});';
document.body.appendChild(o);
});
}
})();

View File

@@ -1,59 +0,0 @@
/*
makes the up2k ui REALLY minimal by hiding a bunch of stuff
almost the same as minimal-up2k.html except this one...:
-- applies to every write-only folder when used with --js-browser
-- only applies if javascript is enabled
-- doesn't hide the total upload ETA display
-- looks slightly better
*/
var u2min = `
<style>
#ops, #path, #tree, #files, #epi+div+h2,
#u2conf td.c+.c, #u2cards, #srch_dz, #srch_zd {
display: none !important;
}
#u2conf {margin:5em auto 0 auto !important}
#u2conf.ww {width:70em}
#u2conf.w {width:50em}
#u2conf.w .c,
#u2conf.w #u2btn_cw {text-align:left}
#u2conf.w #u2btn_cw {width:70%}
#u2etaw {margin:3em auto}
#u2etaw.w {
text-align: center;
margin: -3.5em auto 5em auto;
}
#u2etaw.w #u2etas {margin-right:-37em}
#u2etaw.w #u2etas.o {margin-top:-2.2em}
#u2etaw.ww {margin:-1em auto}
#u2etaw.ww #u2etas {padding-left:4em}
#u2etas {
background: none !important;
border: none !important;
}
#wrap {margin-left:2em !important}
.logue {
border: none !important;
margin: 2em auto !important;
}
.logue:before {content:'' !important}
</style>
<a href="#" onclick="this.parentNode.innerHTML='';">show advanced options</a>
`;
if (!has(perms, 'read')) {
var e2 = mknod('div');
e2.innerHTML = u2min;
ebi('wrap').insertBefore(e2, QS('#epi+h2'));
}

View File

@@ -1,297 +0,0 @@
// way more specific example --
// assumes all files dropped into the uploader have a youtube-id somewhere in the filename,
// locates the youtube-ids and passes them to an API which returns a list of IDs which should be uploaded
//
// also tries to find the youtube-id in the embedded metadata
//
// assumes copyparty is behind nginx as /ytq is a standalone service which must be rproxied in place
function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var passthru = up2k.uc.fsearch;
if (passthru)
return hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
a_up2k_namefilter(good_files, nil_files, bad_files, hooks).then(() => { });
}
// ebi('op_up2k').appendChild(mknod('input','unick'));
function bstrpos(buf, ptn) {
var ofs = 0,
ch0 = ptn[0],
sz = buf.byteLength;
while (true) {
ofs = buf.indexOf(ch0, ofs);
if (ofs < 0 || ofs >= sz)
return -1;
for (var a = 1; a < ptn.length; a++)
if (buf[ofs + a] !== ptn[a])
break;
if (a === ptn.length)
return ofs;
++ofs;
}
}
async function a_up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var t0 = Date.now(),
yt_ids = new Set(),
textdec = new TextDecoder('latin1'),
md_ptn = new TextEncoder().encode('youtube.com/watch?v='),
file_ids = [], // all IDs found for each good_files
md_only = [], // `${id} ${fn}` where ID was only found in metadata
mofs = 0,
mnchk = 0,
mfile = '',
myid = localStorage.getItem('ytid_t0');
if (!myid)
localStorage.setItem('ytid_t0', myid = Date.now());
for (var a = 0; a < good_files.length; a++) {
var [fobj, name] = good_files[a],
cname = name, // will clobber
sz = fobj.size,
ids = [],
fn_ids = [],
md_ids = [],
id_ok = false,
m;
// all IDs found in this file
file_ids.push(ids);
// look for ID in filename; reduce the
// metadata-scan intensity if the id looks safe
m = /[\[(-]([\w-]{11})[\])]?\.(?:mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name);
id_ok = !!m;
while (true) {
// fuzzy catch-all;
// some ytdl fork did %(title)-%(id).%(ext) ...
m = /(?:^|[^\w])([\w-]{11})(?:$|[^\w-])/.exec(cname);
if (!m)
break;
cname = cname.replace(m[1], '');
yt_ids.add(m[1]);
fn_ids.unshift(m[1]);
}
// look for IDs in video metadata,
if (/\.(mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name)) {
toast.show('inf r', 0, `analyzing file ${a + 1} / ${good_files.length} :\n${name}\n\nhave analysed ${++mnchk} files in ${(Date.now() - t0) / 1000} seconds, ${humantime((good_files.length - (a + 1)) * (((Date.now() - t0) / 1000) / mnchk))} remaining,\n\nbiggest offset so far is ${mofs}, in this file:\n\n${mfile}`);
// check first and last 128 MiB;
// pWxOroN5WCo.mkv @ 6edb98 (6.92M)
// Nf-nN1wF5Xo.mp4 @ 4a98034 (74.6M)
var chunksz = 1024 * 1024 * 2, // byte
aspan = id_ok ? 128 : 512; // MiB
aspan = parseInt(Math.min(sz / 2, aspan * 1024 * 1024) / chunksz) * chunksz;
if (!aspan)
aspan = Math.min(sz, chunksz);
for (var side = 0; side < 2; side++) {
var ofs = side ? Math.max(0, sz - aspan) : 0,
nchunks = aspan / chunksz;
for (var chunk = 0; chunk < nchunks; chunk++) {
var bchunk = await fobj.slice(ofs, ofs + chunksz + 16).arrayBuffer(),
uchunk = new Uint8Array(bchunk, 0, bchunk.byteLength),
bofs = bstrpos(uchunk, md_ptn),
absofs = Math.min(ofs + bofs, (sz - ofs) + bofs),
txt = bofs < 0 ? '' : textdec.decode(uchunk.subarray(bofs)),
m;
//console.log(`side ${ side }, chunk ${ chunk }, ofs ${ ofs }, bchunk ${ bchunk.byteLength }, txt ${ txt.length }`);
while (true) {
// mkv/webm have [a-z] immediately after url
m = /(youtube\.com\/watch\?v=[\w-]{11})/.exec(txt);
if (!m)
break;
txt = txt.replace(m[1], '');
m = m[1].slice(-11);
console.log(`found ${m} @${bofs}, ${name} `);
yt_ids.add(m);
if (!has(fn_ids, m) && !has(md_ids, m)) {
md_ids.push(m);
md_only.push(`${m} ${name}`);
}
else
// id appears several times; make it preferred
md_ids.unshift(m);
// bail after next iteration
chunk = nchunks - 1;
side = 9;
if (mofs < absofs) {
mofs = absofs;
mfile = name;
}
}
ofs += chunksz;
if (ofs >= sz)
break;
}
}
}
for (var yi of md_ids)
ids.push(yi);
for (var yi of fn_ids)
if (!has(ids, yi))
ids.push(yi);
}
if (md_only.length)
console.log('recovered the following youtube-IDs by inspecting metadata:\n\n' + md_only.join('\n'));
else if (yt_ids.size)
console.log('did not discover any additional youtube-IDs by inspecting metadata; all the IDs also existed in the filenames');
else
console.log('failed to find any youtube-IDs at all, sorry');
if (false) {
var msg = `finished analysing ${mnchk} files in ${(Date.now() - t0) / 1000} seconds,\n\nbiggest offset was ${mofs} in this file:\n\n${mfile}`,
mfun = function () { toast.ok(0, msg); };
mfun();
setTimeout(mfun, 200);
return hooks[0]([], [], [], hooks.slice(1));
}
var el = ebi('unick'), unick = el ? el.value : '';
if (unick) {
console.log(`sending uploader nickname [${unick}]`);
fetch(document.location, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8' },
body: 'msg=' + encodeURIComponent(unick)
});
}
toast.inf(5, `running query for ${yt_ids.size} youtube-IDs...`);
var xhr = new XHR();
xhr.open('POST', '/ytq', true);
xhr.setRequestHeader('Content-Type', 'text/plain');
xhr.onload = xhr.onerror = function () {
if (this.status != 200)
return toast.err(0, `sorry, database query failed ;_;\n\nplease let us know so we can look at it, thx!!\n\nerror ${this.status}: ${(this.response && this.response.err) || this.responseText}`);
process_id_list(this.responseText);
};
xhr.send(Array.from(yt_ids).join('\n'));
function process_id_list(txt) {
var wanted_ids = new Set(txt.trim().split('\n')),
name_id = {},
wanted_names = new Set(), // basenames with a wanted ID -- not including relpath
wanted_names_scoped = {}, // basenames with a wanted ID -> list of dirs to search under
wanted_files = new Set(); // filedrops
for (var a = 0; a < good_files.length; a++) {
var name = good_files[a][1];
for (var b = 0; b < file_ids[a].length; b++)
if (wanted_ids.has(file_ids[a][b])) {
// let the next stage handle this to prevent dupes
//wanted_files.add(good_files[a]);
var m = /(.*)\.(mp4|webm|mkv|flv|opus|ogg|mp3|m4a|aac)$/i.exec(name);
if (!m)
continue;
var [rd, fn] = vsplit(m[1]);
if (fn in wanted_names_scoped)
wanted_names_scoped[fn].push(rd);
else
wanted_names_scoped[fn] = [rd];
wanted_names.add(fn);
name_id[m[1]] = file_ids[a][b];
break;
}
}
// add all files with the same basename as each explicitly wanted file
// (infojson/chatlog/etc when ID was discovered from metadata)
for (var a = 0; a < good_files.length; a++) {
var [rd, name] = vsplit(good_files[a][1]);
for (var b = 0; b < 3; b++) {
name = name.replace(/\.[^\.]+$/, '');
if (!wanted_names.has(name))
continue;
var vid_fp = false;
for (var c of wanted_names_scoped[name])
if (rd.startsWith(c))
vid_fp = c + name;
if (!vid_fp)
continue;
var subdir = name_id[vid_fp];
subdir = `v${subdir.slice(0, 1)}/${subdir}-${myid}`;
var newpath = subdir + '/' + good_files[a][1].split(/\//g).pop();
// check if this file is a dupe
for (var c of good_files)
if (c[1] == newpath)
newpath = null;
if (!newpath)
break;
good_files[a][1] = newpath;
wanted_files.add(good_files[a]);
break;
}
}
function upload_filtered() {
if (!wanted_files.size)
return modal.alert('Good news -- turns out we already have all those.\n\nBut thank you for checking in!');
hooks[0](Array.from(wanted_files), nil_files, bad_files, hooks.slice(1));
}
function upload_all() {
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}
var n_skip = good_files.length - wanted_files.size,
msg = `you added ${good_files.length} files; ${good_files.length == n_skip ? 'all' : n_skip} of them were skipped --\neither because we already have them,\nor because there is no youtube-ID in your filenames.\n\n<code>OK</code> / <code>Enter</code> = continue uploading just the ${wanted_files.size} files we definitely need\n\n<code>Cancel</code> / <code>ESC</code> = override the filter; upload ALL the files you added`;
if (!n_skip)
upload_filtered();
else
modal.confirm(msg, upload_filtered, upload_all);
};
}
up2k_hooks.push(function () {
up2k.gotallfiles.unshift(up2k_namefilter);
});
// persist/restore nickname field if present
setInterval(function () {
var o = ebi('unick');
if (!o || document.activeElement == o)
return;
o.oninput = function () {
localStorage.setItem('unick', o.value);
};
o.value = localStorage.getItem('unick') || '';
}, 1000);

View File

@@ -1,45 +0,0 @@
// hooks into up2k
function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
// is called when stuff is dropped into the browser,
// after iterating through the directory tree and discovering all files,
// before the upload confirmation dialogue is shown
// good_files will successfully upload
// nil_files are empty files and will show an alert in the final hook
// bad_files are unreadable and cannot be uploaded
var file_lists = [good_files, nil_files, bad_files];
// build a list of filenames
var filenames = [];
for (var lst of file_lists)
for (var ent of lst)
filenames.push(ent[1]);
toast.inf(5, "running database query...");
// simulate delay while passing the list to some api for checking
setTimeout(function () {
// only keep webm files as an example
var new_lists = [];
for (var lst of file_lists) {
var keep = [];
new_lists.push(keep);
for (var ent of lst)
if (/\.webm$/.test(ent[1]))
keep.push(ent);
}
// finally, call the next hook in the chain
[good_files, nil_files, bad_files] = new_lists;
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}, 1000);
}
// register
up2k_hooks.push(function () {
up2k.gotallfiles.unshift(up2k_namefilter);
});

View File

@@ -1,31 +0,0 @@
#!/bin/sh
#
# PROVIDE: copyparty
# REQUIRE: networking
# KEYWORD:
. /etc/rc.subr
name="copyparty"
rcvar="copyparty_enable"
copyparty_user="copyparty"
copyparty_args="-e2dsa -v /storage:/storage:r" # change as you see fit
copyparty_command="/usr/local/bin/python3.8 /usr/local/copyparty/copyparty-sfx.py ${copyparty_args}"
pidfile="/var/run/copyparty/${name}.pid"
command="/usr/sbin/daemon"
command_args="-P ${pidfile} -r -f ${copyparty_command}"
stop_postcmd="copyparty_shutdown"
copyparty_shutdown()
{
if [ -e "${pidfile}" ]; then
echo "Stopping supervising daemon."
kill -s TERM `cat ${pidfile}`
fi
}
load_rc_config $name
: ${copyparty_enable:=no}
run_rc_command "$1"

View File

@@ -1,23 +0,0 @@
# systemd service which generates a new TLS certificate on each boot,
# that way the one-year expiry time won't cause any issues --
# just have everyone trust the ca.pem once every 10 years
#
# assumptions/placeholder values:
# * this script and copyparty runs as user "cpp"
# * copyparty repo is at ~cpp/dev/copyparty
# * CA is named partylan
# * server IPs = 10.1.2.3 and 192.168.123.1
# * server hostname = party.lan
[Unit]
Description=copyparty certificate generator
Before=copyparty.service
[Service]
User=cpp
Type=oneshot
SyslogIdentifier=cpp-cert
ExecStart=/bin/bash -c 'cd ~/dev/copyparty/contrib && ./cfssl.sh partylan 10.1.2.3,192.168.123.1,party.lan y'
[Install]
WantedBy=multi-user.target

View File

@@ -2,60 +2,18 @@
# and share '/mnt' with anonymous read+write # and share '/mnt' with anonymous read+write
# #
# installation: # installation:
# cp -pv copyparty.service /etc/systemd/system # cp -pv copyparty.service /etc/systemd/system && systemctl enable --now copyparty
# restorecon -vr /etc/systemd/system/copyparty.service
# firewall-cmd --permanent --add-port={80,443,3923}/tcp # --zone=libvirt
# firewall-cmd --reload
# systemctl daemon-reload && systemctl enable --now copyparty
# #
# you may want to: # you may want to:
# change "User=cpp" and "/home/cpp/" to another user # change '/usr/bin/python' to another interpreter
# remove the nft lines to only listen on port 3923 # change '/mnt::a' to another location or permission-set
# and in the ExecStart= line:
# change '/usr/bin/python3' to another interpreter
# change '/mnt::rw' to another location or permission-set
# add '-q' to disable logging on busy servers
# add '-i 127.0.0.1' to only allow local connections
# add '-e2dsa' to enable filesystem scanning + indexing
# add '-e2ts' to enable metadata indexing
#
# with `Type=notify`, copyparty will signal systemd when it is ready to
# accept connections; correctly delaying units depending on copyparty.
# But note that journalctl will get the timestamps wrong due to
# python disabling line-buffering, so messages are out-of-order:
# https://user-images.githubusercontent.com/241032/126040249-cb535cc7-c599-4931-a796-a5d9af691bad.png
#
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
#
# keep ExecStartPre before ExecStart, at least on rhel8
[Unit] [Unit]
Description=copyparty file server Description=copyparty file server
[Service] [Service]
Type=notify ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -v /mnt::a
SyslogIdentifier=copyparty ExecStartPre=/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
Environment=PYTHONUNBUFFERED=x
ExecReload=/bin/kill -s USR1 $MAINPID
# user to run as + where the TLS certificate is (if any)
User=cpp
Environment=XDG_CONFIG_HOME=/home/cpp/.config
# setup forwarding from ports 80 and 443 to port 3923
ExecStartPre=+/bin/bash -c 'nft -n -a list table nat | awk "/ to :3923 /{print\$NF}" | xargs -rL1 nft delete rule nat prerouting handle; true'
ExecStartPre=+nft add table ip nat
ExecStartPre=+nft -- add chain ip nat prerouting { type nat hook prerouting priority -100 \; }
ExecStartPre=+nft add rule ip nat prerouting tcp dport 80 redirect to :3923
ExecStartPre=+nft add rule ip nat prerouting tcp dport 443 redirect to :3923
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# copyparty settings
ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -e2d -v /mnt::rw
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@@ -1,27 +0,0 @@
# this will start `/usr/local/bin/copyparty-sfx.py`
# in a chroot, preventing accidental access elsewhere
# and share '/mnt' with anonymous read+write
#
# installation:
# 1) put copyparty-sfx.py and prisonparty.sh in /usr/local/bin
# 2) cp -pv prisonparty.service /etc/systemd/system && systemctl enable --now prisonparty
#
# you may want to:
# change '/mnt::rw' to another location or permission-set
# (remember to change the '/mnt' chroot arg too)
#
# enable line-buffering for realtime logging (slight performance cost):
# inside the [Service] block, add the following line:
# Environment=PYTHONUNBUFFERED=x
[Unit]
Description=copyparty file server
[Service]
SyslogIdentifier=prisonparty
WorkingDirectory=/usr/local/bin
ExecStart=/bin/bash /usr/local/bin/prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt -- \
/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -v /mnt::rw
[Install]
WantedBy=multi-user.target

View File

@@ -1,51 +0,0 @@
@echo off
rem removes the 47.6 MiB filesize limit when downloading from webdav
rem + optionally allows/enables password-auth over plaintext http
rem + optionally helps disable wpad
setlocal enabledelayedexpansion
net session >nul 2>&1
if %errorlevel% neq 0 (
echo sorry, you must run this as administrator
pause
exit /b
)
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v FileSizeLimitInBytes /t REG_DWORD /d 0xffffffff /f
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters /v FsCtlRequestTimeoutInSec /t REG_DWORD /d 0xffffffff /f
echo(
echo OK;
echo allow webdav basic-auth over plaintext http?
echo Y: login works, but the password will be visible in wireshark etc
echo N: login will NOT work unless you use https and valid certificates
set c=.
set /p "c=(Y/N): "
echo(
if /i not "!c!"=="y" goto :g1
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v BasicAuthLevel /t REG_DWORD /d 0x2 /f
rem default is 1 (require tls)
:g1
echo(
echo OK;
echo do you want to disable wpad?
echo can give a HUGE speed boost depending on network settings
set c=.
set /p "c=(Y/N): "
echo(
if /i not "!c!"=="y" goto :g2
echo(
echo i'm about to open the [Connections] tab in [Internet Properties] for you;
echo please click [LAN settings] and disable [Automatically detect settings]
echo(
pause
control inetcpl.cpl,,4
:g2
net stop webclient
net start webclient
echo(
echo OK; all done
pause

View File

@@ -1,51 +1,50 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import os
import platform import platform
import sys
import time import time
import sys
import os
try: PY2 = sys.version_info[0] == 2
from typing import TYPE_CHECKING if PY2:
except:
TYPE_CHECKING = False
if True:
from typing import Any, Callable
PY2 = sys.version_info < (3,)
if not PY2:
unicode: Callable[[Any], str] = str
else:
sys.dont_write_bytecode = True sys.dont_write_bytecode = True
unicode = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
WINDOWS: Any = ( WINDOWS = False
[int(x) for x in platform.version().split(".")] if platform.system() == "Windows":
if platform.system() == "Windows" WINDOWS = [int(x) for x in platform.version().split(".")]
else False
)
VT100 = not WINDOWS or WINDOWS >= [10, 0, 14393] VT100 = not WINDOWS or WINDOWS >= [10, 0, 14393]
# introduced in anniversary update # introduced in anniversary update
ANYWIN = WINDOWS or sys.platform in ["msys", "cygwin"] ANYWIN = WINDOWS or sys.platform in ["msys"]
MACOS = platform.system() == "Darwin" MACOS = platform.system() == "Darwin"
try:
CORES = len(os.sched_getaffinity(0))
except:
CORES = (os.cpu_count() if hasattr(os, "cpu_count") else 0) or 2
class EnvParams(object): class EnvParams(object):
def __init__(self) -> None: def __init__(self):
self.t0 = time.time() self.t0 = time.time()
self.mod = "" self.mod = os.path.dirname(os.path.realpath(__file__))
self.cfg = "" if self.mod.endswith("__init__"):
self.ox = getattr(sys, "oxidized", None) self.mod = os.path.dirname(self.mod)
if sys.platform == "win32":
self.cfg = os.path.normpath(os.environ["APPDATA"] + "/copyparty")
elif sys.platform == "darwin":
self.cfg = os.path.expanduser("~/Library/Preferences/copyparty")
else:
self.cfg = os.path.normpath(
os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
+ "/copyparty"
)
self.cfg = self.cfg.replace("\\", "/")
try:
os.makedirs(self.cfg)
except:
if not os.path.isdir(self.cfg):
raise
E = EnvParams() E = EnvParams()

1107
copyparty/__main__.py Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
# coding: utf-8 # coding: utf-8
VERSION = (1, 5, 2) VERSION = (0, 11, 29)
CODENAME = "babel" CODENAME = "the grid"
BUILD_DT = (2022, 12, 12) BUILD_DT = (2021, 6, 30)
S_VERSION = ".".join(map(str, VERSION)) S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT) S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

File diff suppressed because it is too large Load Diff

View File

@@ -1,81 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
from ..util import SYMTIME, fsdec, fsenc
from . import path as path
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
_ = (path,)
__all__ = ["path"]
# grep -hRiE '(^|[^a-zA-Z_\.-])os\.' . | gsed -r 's/ /\n/g;s/\(/(\n/g' | grep -hRiE '(^|[^a-zA-Z_\.-])os\.' | sort | uniq -c
# printf 'os\.(%s)' "$(grep ^def bos/__init__.py | gsed -r 's/^def //;s/\(.*//' | tr '\n' '|' | gsed -r 's/.$//')"
def chmod(p: str, mode: int) -> None:
return os.chmod(fsenc(p), mode)
def listdir(p: str = ".") -> list[str]:
return [fsdec(x) for x in os.listdir(fsenc(p))]
def makedirs(name: str, mode: int = 0o755, exist_ok: bool = True) -> bool:
bname = fsenc(name)
try:
os.makedirs(bname, mode)
return True
except:
if not exist_ok or not os.path.isdir(bname):
raise
return False
def mkdir(p: str, mode: int = 0o755) -> None:
return os.mkdir(fsenc(p), mode)
def open(p: str, *a, **ka) -> int:
return os.open(fsenc(p), *a, **ka)
def rename(src: str, dst: str) -> None:
return os.rename(fsenc(src), fsenc(dst))
def replace(src: str, dst: str) -> None:
return os.replace(fsenc(src), fsenc(dst))
def rmdir(p: str) -> None:
return os.rmdir(fsenc(p))
def stat(p: str) -> os.stat_result:
return os.stat(fsenc(p))
def unlink(p: str) -> None:
return os.unlink(fsenc(p))
def utime(
p: str, times: Optional[tuple[float, float]] = None, follow_symlinks: bool = True
) -> None:
if SYMTIME:
return os.utime(fsenc(p), times, follow_symlinks=follow_symlinks)
else:
return os.utime(fsenc(p), times)
if hasattr(os, "lstat"):
def lstat(p: str) -> os.stat_result:
return os.lstat(fsenc(p))
else:
lstat = stat

View File

@@ -1,45 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
from ..util import SYMTIME, fsdec, fsenc
def abspath(p: str) -> str:
return fsdec(os.path.abspath(fsenc(p)))
def exists(p: str) -> bool:
return os.path.exists(fsenc(p))
def getmtime(p: str, follow_symlinks: bool = True) -> float:
if not follow_symlinks and SYMTIME:
return os.lstat(fsenc(p)).st_mtime
else:
return os.path.getmtime(fsenc(p))
def getsize(p: str) -> int:
return os.path.getsize(fsenc(p))
def isfile(p: str) -> bool:
return os.path.isfile(fsenc(p))
def isdir(p: str) -> bool:
return os.path.isdir(fsenc(p))
def islink(p: str) -> bool:
return os.path.islink(fsenc(p))
def lexists(p: str) -> bool:
return os.path.lexists(fsenc(p))
def realpath(p: str) -> str:
return fsdec(os.path.realpath(fsenc(p)))

View File

@@ -1,64 +1,70 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import threading
import time import time
import traceback import threading
import queue from .__init__ import PY2, WINDOWS, VT100
from .__init__ import CORES, TYPE_CHECKING
from .broker_mpw import MpWorker
from .broker_util import try_exec from .broker_util import try_exec
from .util import Daemon, mp from .broker_mpw import MpWorker
from .util import mp
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any
class MProcess(mp.Process): if PY2 and not WINDOWS:
def __init__( from multiprocessing.reduction import ForkingPickler
self, from StringIO import StringIO as MemesIO # pylint: disable=import-error
q_pend: queue.Queue[tuple[int, str, list[Any]]],
q_yield: queue.Queue[tuple[int, str, list[Any]]],
target: Any,
args: Any,
) -> None:
super(MProcess, self).__init__(target=target, args=args)
self.q_pend = q_pend
self.q_yield = q_yield
class BrokerMp(object): class BrokerMp(object):
"""external api; manages MpWorkers""" """external api; manages MpWorkers"""
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub):
self.hub = hub self.hub = hub
self.log = hub.log self.log = hub.log
self.args = hub.args self.args = hub.args
self.procs = [] self.procs = []
self.retpend = {}
self.retpend_mutex = threading.Lock()
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.num_workers = self.args.j or CORES cores = self.args.j
self.log("broker", "booting {} subprocesses".format(self.num_workers)) if not cores:
for n in range(1, self.num_workers + 1): cores = mp.cpu_count()
q_pend: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(1)
q_yield: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(64) self.log("broker", "booting {} subprocesses".format(cores))
for n in range(cores):
q_pend = mp.Queue(1)
q_yield = mp.Queue(64)
proc = mp.Process(target=MpWorker, args=(q_pend, q_yield, self.args, n))
proc.q_pend = q_pend
proc.q_yield = q_yield
proc.nid = n
proc.clients = {}
proc.workload = 0
thr = threading.Thread(
target=self.collector, args=(proc,), name="mp-collector"
)
thr.daemon = True
thr.start()
proc = MProcess(q_pend, q_yield, MpWorker, (q_pend, q_yield, self.args, n))
Daemon(self.collector, "mp-sink-{}".format(n), (proc,))
self.procs.append(proc) self.procs.append(proc)
proc.start() proc.start()
def shutdown(self) -> None: if not self.args.q:
thr = threading.Thread(
target=self.debug_load_balancer, name="mp-dbg-loadbalancer"
)
thr.daemon = True
thr.start()
def shutdown(self):
self.log("broker", "shutting down") self.log("broker", "shutting down")
for n, proc in enumerate(self.procs): for n, proc in enumerate(self.procs):
thr = threading.Thread( thr = threading.Thread(
target=proc.q_pend.put((0, "shutdown", [])), target=proc.q_pend.put([0, "shutdown", []]),
name="mp-shutdown-{}-{}".format(n, len(self.procs)), name="mp-shutdown-{}-{}".format(n, len(self.procs)),
) )
thr.start() thr.start()
@@ -74,12 +80,7 @@ class BrokerMp(object):
procs.pop() procs.pop()
def reload(self) -> None: def collector(self, proc):
self.log("broker", "reloading")
for _, proc in enumerate(self.procs):
proc.q_pend.put((0, "reload", []))
def collector(self, proc: MProcess) -> None:
"""receive message from hub in other process""" """receive message from hub in other process"""
while True: while True:
msg = proc.q_yield.get() msg = proc.q_yield.get()
@@ -88,41 +89,77 @@ class BrokerMp(object):
if dest == "log": if dest == "log":
self.log(*args) self.log(*args)
elif dest == "workload":
with self.mutex:
proc.workload = args[0]
elif dest == "httpdrop":
addr = args[0]
with self.mutex:
del proc.clients[addr]
if not proc.clients:
proc.workload = 0
self.hub.tcpsrv.num_clients.add(-1)
elif dest == "retq": elif dest == "retq":
# response from previous ipc call # response from previous ipc call
raise Exception("invalid broker_mp usage") with self.retpend_mutex:
retq = self.retpend.pop(retq_id)
retq.put(args)
else: else:
# new ipc invoking managed service in hub # new ipc invoking managed service in hub
try: obj = self.hub
obj = self.hub for node in dest.split("."):
for node in dest.split("."): obj = getattr(obj, node)
obj = getattr(obj, node)
# TODO will deadlock if dest performs another ipc # TODO will deadlock if dest performs another ipc
rv = try_exec(retq_id, obj, *args) rv = try_exec(retq_id, obj, *args)
except:
rv = ["exception", "stack", traceback.format_exc()]
if retq_id: if retq_id:
proc.q_pend.put((retq_id, "retq", rv)) proc.q_pend.put([retq_id, "retq", rv])
def say(self, dest: str, *args: Any) -> None: def put(self, want_retval, dest, *args):
""" """
send message to non-hub component in other process, send message to non-hub component in other process,
returns a Queue object which eventually contains the response if want_retval returns a Queue object which eventually contains the response if want_retval
(not-impl here since nothing uses it yet) (not-impl here since nothing uses it yet)
""" """
if dest == "listen": if dest == "httpconn":
for p in self.procs: sck, addr = args
p.q_pend.put((0, dest, [args[0], len(self.procs)])) sck2 = sck
if PY2:
buf = MemesIO()
ForkingPickler(buf).dump(sck)
sck2 = buf.getvalue()
elif dest == "set_netdevs": proc = sorted(self.procs, key=lambda x: x.workload)[0]
for p in self.procs: proc.q_pend.put([0, dest, [sck2, addr]])
p.q_pend.put((0, dest, list(args)))
elif dest == "cb_httpsrv_up": with self.mutex:
self.hub.cb_httpsrv_up() proc.clients[addr] = 50
proc.workload += 50
else: else:
raise Exception("what is " + str(dest)) raise Exception("what is " + str(dest))
def debug_load_balancer(self):
fmt = "\033[1m{}\033[0;36m{:4}\033[0m "
if not VT100:
fmt = "({}{:4})"
last = ""
while self.procs:
msg = ""
for proc in self.procs:
msg += fmt.format(len(proc.clients), proc.workload)
if msg != last:
last = msg
with self.hub.log_mutex:
print(msg)
time.sleep(0.1)

View File

@@ -1,84 +1,68 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
from copyparty.authsrv import AuthSrv
import argparse
import os
import signal
import sys import sys
import time
import signal
import threading import threading
import queue from .__init__ import PY2, WINDOWS
from .broker_util import ExceptionalQueue
from .__init__ import ANYWIN
from .authsrv import AuthSrv
from .broker_util import BrokerCli, ExceptionalQueue
from .httpsrv import HttpSrv from .httpsrv import HttpSrv
from .util import FAKE_MP, Daemon, HMaccas from .util import FAKE_MP
if True: # pylint: disable=using-constant-test if PY2 and not WINDOWS:
from types import FrameType import pickle # nosec
from typing import Any, Optional, Union
class MpWorker(BrokerCli): class MpWorker(object):
"""one single mp instance""" """one single mp instance"""
def __init__( def __init__(self, q_pend, q_yield, args, n):
self,
q_pend: queue.Queue[tuple[int, str, list[Any]]],
q_yield: queue.Queue[tuple[int, str, list[Any]]],
args: argparse.Namespace,
n: int,
) -> None:
super(MpWorker, self).__init__()
self.q_pend = q_pend self.q_pend = q_pend
self.q_yield = q_yield self.q_yield = q_yield
self.args = args self.args = args
self.n = n self.n = n
self.log = self._log_disabled if args.q and not args.lo else self._log_enabled self.retpend = {}
self.retpend: dict[int, Any] = {}
self.retpend_mutex = threading.Lock() self.retpend_mutex = threading.Lock()
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.workload_thr_alive = False
# we inherited signal_handler from parent, # we inherited signal_handler from parent,
# replace it with something harmless # replace it with something harmless
if not FAKE_MP: if not FAKE_MP:
sigs = [signal.SIGINT, signal.SIGTERM] signal.signal(signal.SIGINT, self.signal_handler)
if not ANYWIN:
sigs.append(signal.SIGUSR1)
for sig in sigs:
signal.signal(sig, self.signal_handler)
# starting to look like a good idea # starting to look like a good idea
self.asrv = AuthSrv(args, None, False) self.asrv = AuthSrv(args, None, False)
# instantiate all services here (TODO: inheritance?) # instantiate all services here (TODO: inheritance?)
self.iphash = HMaccas(os.path.join(self.args.E.cfg, "iphash"), 8) self.httpsrv = HttpSrv(self, True)
self.httpsrv = HttpSrv(self, n) self.httpsrv.disconnect_func = self.httpdrop
# on winxp and some other platforms, # on winxp and some other platforms,
# use thr.join() to block all signals # use thr.join() to block all signals
Daemon(self.main, "mpw-main").join() thr = threading.Thread(target=self.main, name="mpw-main")
thr.daemon = True
thr.start()
thr.join()
def signal_handler(self, sig: Optional[int], frame: Optional[FrameType]) -> None: def signal_handler(self, signal, frame):
# print('k') # print('k')
pass pass
def _log_enabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None: def log(self, src, msg, c=0):
self.q_yield.put((0, "log", [src, msg, c])) self.q_yield.put([0, "log", [src, msg, c]])
def _log_disabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None: def logw(self, msg, c=0):
pass
def logw(self, msg: str, c: Union[int, str] = 0) -> None:
self.log("mp{}".format(self.n), msg, c) self.log("mp{}".format(self.n), msg, c)
def main(self) -> None: def httpdrop(self, addr):
self.q_yield.put([0, "httpdrop", [addr]])
def main(self):
while True: while True:
retq_id, dest, args = self.q_pend.get() retq_id, dest, args = self.q_pend.get()
@@ -89,16 +73,24 @@ class MpWorker(BrokerCli):
sys.exit(0) sys.exit(0)
return return
elif dest == "reload": elif dest == "httpconn":
self.logw("mpw.asrv reloading") sck, addr = args
self.asrv.reload() if PY2:
self.logw("mpw.asrv reloaded") sck = pickle.loads(sck) # nosec
elif dest == "listen": if self.args.log_conn:
self.httpsrv.listen(args[0], args[1]) self.log("%s %s" % addr, "|%sC-qpop" % ("-" * 4,), c="1;30")
elif dest == "set_netdevs": self.httpsrv.accept(sck, addr)
self.httpsrv.set_netdevs(args[0])
with self.mutex:
if not self.workload_thr_alive:
self.workload_thr_alive = True
thr = threading.Thread(
target=self.thr_workload, name="mpw-workload"
)
thr.daemon = True
thr.start()
elif dest == "retq": elif dest == "retq":
# response from previous ipc call # response from previous ipc call
@@ -110,14 +102,28 @@ class MpWorker(BrokerCli):
else: else:
raise Exception("what is " + str(dest)) raise Exception("what is " + str(dest))
def ask(self, dest: str, *args: Any) -> ExceptionalQueue: def put(self, want_retval, dest, *args):
retq = ExceptionalQueue(1) if want_retval:
retq_id = id(retq) retq = ExceptionalQueue(1)
with self.retpend_mutex: retq_id = id(retq)
self.retpend[retq_id] = retq with self.retpend_mutex:
self.retpend[retq_id] = retq
else:
retq = None
retq_id = 0
self.q_yield.put((retq_id, dest, list(args))) self.q_yield.put([retq_id, dest, args])
return retq return retq
def say(self, dest: str, *args: Any) -> None: def thr_workload(self):
self.q_yield.put((0, dest, list(args))) """announce workloads to MpSrv (the mp controller / loadbalancer)"""
# avoid locking in extract_filedata by tracking difference here
while True:
time.sleep(0.2)
with self.mutex:
if self.httpsrv.num_clients() == 0:
# no clients rn, termiante thread
self.workload_thr_alive = False
return
self.q_yield.put([0, "workload", [self.httpsrv.workload]])

View File

@@ -1,73 +1,56 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import os
import threading import threading
from .__init__ import TYPE_CHECKING from .authsrv import AuthSrv
from .broker_util import BrokerCli, ExceptionalQueue, try_exec
from .httpsrv import HttpSrv from .httpsrv import HttpSrv
from .util import HMaccas from .broker_util import ExceptionalQueue, try_exec
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any
class BrokerThr(BrokerCli): class BrokerThr(object):
"""external api; behaves like BrokerMP but using plain threads""" """external api; behaves like BrokerMP but using plain threads"""
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub):
super(BrokerThr, self).__init__()
self.hub = hub self.hub = hub
self.log = hub.log self.log = hub.log
self.args = hub.args self.args = hub.args
self.asrv = hub.asrv self.asrv = hub.asrv
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.num_workers = 1
# instantiate all services here (TODO: inheritance?) # instantiate all services here (TODO: inheritance?)
self.iphash = HMaccas(os.path.join(self.args.E.cfg, "iphash"), 8) self.httpsrv = HttpSrv(self)
self.httpsrv = HttpSrv(self, None) self.httpsrv.disconnect_func = self.httpdrop
self.reload = self.noop
def shutdown(self) -> None: def shutdown(self):
# self.log("broker", "shutting down") # self.log("broker", "shutting down")
self.httpsrv.shutdown() self.httpsrv.shutdown()
def noop(self) -> None:
pass pass
def ask(self, dest: str, *args: Any) -> ExceptionalQueue: def put(self, want_retval, dest, *args):
if dest == "httpconn":
sck, addr = args
if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-qpop" % ("-" * 4,), c="1;30")
# new ipc invoking managed service in hub self.httpsrv.accept(sck, addr)
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
rv = try_exec(True, obj, *args) else:
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
# pretend we're broker_mp # TODO will deadlock if dest performs another ipc
retq = ExceptionalQueue(1) rv = try_exec(want_retval, obj, *args)
retq.put(rv) if not want_retval:
return retq return
def say(self, dest: str, *args: Any) -> None: # pretend we're broker_mp
if dest == "listen": retq = ExceptionalQueue(1)
self.httpsrv.listen(args[0], 1) retq.put(rv)
return return retq
if dest == "set_netdevs": def httpdrop(self, addr):
self.httpsrv.set_netdevs(args[0]) self.hub.tcpsrv.num_clients.add(-1)
return
# new ipc invoking managed service in hub
obj = self.hub
for node in dest.split("."):
obj = getattr(obj, node)
try_exec(False, obj, *args)

View File

@@ -1,28 +1,17 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse
import traceback import traceback
from queue import Queue from .util import Pebkac, Queue
from .__init__ import TYPE_CHECKING
from .authsrv import AuthSrv
from .util import HMaccas, Pebkac
if True: # pylint: disable=using-constant-test
from typing import Any, Optional, Union
from .util import RootLogger
if TYPE_CHECKING:
from .httpsrv import HttpSrv
class ExceptionalQueue(Queue, object): class ExceptionalQueue(Queue, object):
def get(self, block: bool = True, timeout: Optional[float] = None) -> Any: def get(self, block=True, timeout=None):
rv = super(ExceptionalQueue, self).get(block, timeout) rv = super(ExceptionalQueue, self).get(block, timeout)
# TODO: how expensive is this?
if isinstance(rv, list): if isinstance(rv, list):
if rv[0] == "exception": if rv[0] == "exception":
if rv[1] == "pebkac": if rv[1] == "pebkac":
@@ -33,29 +22,7 @@ class ExceptionalQueue(Queue, object):
return rv return rv
class BrokerCli(object): def try_exec(want_retval, func, *args):
"""
helps mypy understand httpsrv.broker but still fails a few levels deeper,
for example resolving httpconn.* in httpcli -- see lines tagged #mypy404
"""
log: "RootLogger"
args: argparse.Namespace
asrv: AuthSrv
httpsrv: "HttpSrv"
iphash: HMaccas
def __init__(self) -> None:
pass
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
return ExceptionalQueue(1)
def say(self, dest: str, *args: Any) -> None:
pass
def try_exec(want_retval: Union[bool, int], func: Any, *args: list[Any]) -> Any:
try: try:
return func(*args) return func(*args)

View File

@@ -1,72 +0,0 @@
import importlib
import sys
import xml.etree.ElementTree as ET
from .__init__ import PY2
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
def get_ET() -> ET.XMLParser:
pn = "xml.etree.ElementTree"
cn = "_elementtree"
cmod = sys.modules.pop(cn, None)
if not cmod:
return ET.XMLParser # type: ignore
pmod = sys.modules.pop(pn)
sys.modules[cn] = None # type: ignore
ret = importlib.import_module(pn)
for name, mod in ((pn, pmod), (cn, cmod)):
if mod:
sys.modules[name] = mod
else:
sys.modules.pop(name, None)
sys.modules["xml.etree"].ElementTree = pmod # type: ignore
ret.ParseError = ET.ParseError # type: ignore
return ret.XMLParser # type: ignore
XMLParser: ET.XMLParser = get_ET()
class DXMLParser(XMLParser): # type: ignore
def __init__(self) -> None:
tb = ET.TreeBuilder()
super(DXMLParser, self).__init__(target=tb)
p = self._parser if PY2 else self.parser
p.StartDoctypeDeclHandler = self.nope
p.EntityDeclHandler = self.nope
p.UnparsedEntityDeclHandler = self.nope
p.ExternalEntityRefHandler = self.nope
def nope(self, *a: Any, **ka: Any) -> None:
raise BadXML("{}, {}".format(a, ka))
class BadXML(Exception):
pass
def parse_xml(txt: str) -> ET.Element:
parser = DXMLParser()
parser.feed(txt)
return parser.close() # type: ignore
def mktnod(name: str, text: str) -> ET.Element:
el = ET.Element(name)
el.text = text
return el
def mkenod(name: str, sub_el: Optional[ET.Element] = None) -> ET.Element:
el = ET.Element(name)
if sub_el is not None:
el.append(sub_el)
return el

View File

@@ -1,152 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
import re
import time
from .__init__ import ANYWIN, MACOS
from .authsrv import AXS, VFS
from .bos import bos
from .util import chkcmd, min_ex
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
from .util import RootLogger
class Fstab(object):
def __init__(self, log: "RootLogger"):
self.log_func = log
self.trusted = False
self.tab: Optional[VFS] = None
self.cache: dict[str, str] = {}
self.age = 0.0
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("fstab", msg, c)
def get(self, path: str) -> str:
if len(self.cache) > 9000:
self.age = time.time()
self.tab = None
self.cache = {}
fs = "ext4"
msg = "failed to determine filesystem at [{}]; assuming {}\n{}"
if ANYWIN:
fs = "vfat"
try:
path = self._winpath(path)
except:
self.log(msg.format(path, fs, min_ex()), 3)
return fs
path = path.lstrip("/")
try:
return self.cache[path]
except:
pass
try:
fs = self.get_w32(path) if ANYWIN else self.get_unix(path)
except:
self.log(msg.format(path, fs, min_ex()), 3)
fs = fs.lower()
self.cache[path] = fs
self.log("found {} at {}".format(fs, path))
return fs
def _winpath(self, path: str) -> str:
# try to combine volume-label + st_dev (vsn)
path = path.replace("/", "\\")
vid = path.split(":", 1)[0].strip("\\").split("\\", 1)[0]
try:
return "{}*{}".format(vid, bos.stat(path).st_dev)
except:
return vid
def build_fallback(self) -> None:
self.tab = VFS(self.log_func, "idk", "/", AXS(), {})
self.trusted = False
def build_tab(self) -> None:
self.log("building tab")
sptn = r"^.*? on (.*) type ([^ ]+) \(.*"
if MACOS:
sptn = r"^.*? on (.*) \(([^ ]+), .*"
ptn = re.compile(sptn)
so, _ = chkcmd(["mount"])
tab1: list[tuple[str, str]] = []
for ln in so.split("\n"):
m = ptn.match(ln)
if not m:
continue
zs1, zs2 = m.groups()
tab1.append((str(zs1), str(zs2)))
tab1.sort(key=lambda x: (len(x[0]), x[0]))
path1, fs1 = tab1[0]
tab = VFS(self.log_func, fs1, path1, AXS(), {})
for path, fs in tab1[1:]:
tab.add(fs, path.lstrip("/"))
self.tab = tab
def relabel(self, path: str, nval: str) -> None:
assert self.tab
self.cache = {}
if ANYWIN:
path = self._winpath(path)
path = path.lstrip("/")
ptn = re.compile(r"^[^\\/]*")
vn, rem = self.tab._find(path)
if not self.trusted:
# no mtab access; have to build as we go
if "/" in rem:
self.tab.add("idk", os.path.join(vn.vpath, rem.split("/")[0]))
if rem:
self.tab.add(nval, path)
else:
vn.realpath = nval
return
visit = [vn]
while visit:
vn = visit.pop()
vn.realpath = ptn.sub(nval, vn.realpath)
visit.extend(list(vn.nodes.values()))
def get_unix(self, path: str) -> str:
if not self.tab:
try:
self.build_tab()
self.trusted = True
except:
# prisonparty or other restrictive environment
self.log("failed to build tab:\n{}".format(min_ex()), 3)
self.build_fallback()
assert self.tab
ret = self.tab._find(path)[0]
if self.trusted or path == ret.vpath:
return ret.realpath.split("/")[0]
else:
return "idk"
def get_w32(self, path: str) -> str:
if not self.tab:
self.build_fallback()
assert self.tab
ret = self.tab._find(path)[0]
return ret.realpath

View File

@@ -1,448 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import argparse
import logging
import os
import stat
import sys
import time
from pyftpdlib.authorizers import AuthenticationFailed, DummyAuthorizer
from pyftpdlib.filesystems import AbstractedFS, FilesystemError
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from .__init__ import PY2, TYPE_CHECKING, E
from .bos import bos
from .util import Daemon, Pebkac, exclude_dotfiles, fsenc, ipnorm
try:
from pyftpdlib.ioloop import IOLoop
except ImportError:
p = os.path.join(E.mod, "vend")
print("loading asynchat from " + p)
sys.path.append(p)
from pyftpdlib.ioloop import IOLoop
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
import typing
from typing import Any, Optional
class FtpAuth(DummyAuthorizer):
def __init__(self, hub: "SvcHub") -> None:
super(FtpAuth, self).__init__()
self.hub = hub
def validate_authentication(
self, username: str, password: str, handler: Any
) -> None:
handler.username = "{}:{}".format(username, password)
ip = handler.addr[0]
if ip.startswith("::ffff:"):
ip = ip[7:]
ip = ipnorm(ip)
bans = self.hub.bans
if ip in bans:
rt = bans[ip] - time.time()
if rt < 0:
logging.info("client unbanned")
del bans[ip]
else:
raise AuthenticationFailed("banned")
asrv = self.hub.asrv
if username == "anonymous":
uname = "*"
else:
uname = asrv.iacct.get(password, "") or asrv.iacct.get(username, "") or "*"
if not uname or not (asrv.vfs.aread.get(uname) or asrv.vfs.awrite.get(uname)):
g = self.hub.gpwd
if g.lim:
bonk, ip = g.bonk(ip, handler.username)
if bonk:
logging.warning("client banned: invalid passwords")
bans[ip] = bonk
raise AuthenticationFailed("Authentication failed.")
handler.username = uname
def get_home_dir(self, username: str) -> str:
return "/"
def has_user(self, username: str) -> bool:
asrv = self.hub.asrv
return username in asrv.acct
def has_perm(self, username: str, perm: int, path: Optional[str] = None) -> bool:
return True # handled at filesystem layer
def get_perms(self, username: str) -> str:
return "elradfmwMT"
def get_msg_login(self, username: str) -> str:
return "sup {}".format(username)
def get_msg_quit(self, username: str) -> str:
return "cya"
class FtpFs(AbstractedFS):
def __init__(
self, root: str, cmd_channel: Any
) -> None: # pylint: disable=super-init-not-called
self.h = self.cmd_channel = cmd_channel # type: FTPHandler
self.hub: "SvcHub" = cmd_channel.hub
self.args = cmd_channel.args
self.uname = self.hub.asrv.iacct.get(cmd_channel.password, "*")
self.cwd = "/" # pyftpdlib convention of leading slash
self.root = "/var/lib/empty"
self.can_read = self.can_write = self.can_move = False
self.can_delete = self.can_get = self.can_upget = False
self.listdirinfo = self.listdir
self.chdir(".")
def v2a(
self,
vpath: str,
r: bool = False,
w: bool = False,
m: bool = False,
d: bool = False,
) -> str:
try:
vpath = vpath.replace("\\", "/").lstrip("/")
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, r, w, m, d)
if not vfs.realpath:
raise FilesystemError("no filesystem mounted at this path")
return os.path.join(vfs.realpath, rem)
except Pebkac as ex:
raise FilesystemError(str(ex))
def rv2a(
self,
vpath: str,
r: bool = False,
w: bool = False,
m: bool = False,
d: bool = False,
) -> str:
return self.v2a(os.path.join(self.cwd, vpath), r, w, m, d)
def ftp2fs(self, ftppath: str) -> str:
# return self.v2a(ftppath)
return ftppath # self.cwd must be vpath
def fs2ftp(self, fspath: str) -> str:
# raise NotImplementedError()
return fspath
def validpath(self, path: str) -> bool:
if "/.hist/" in path:
if "/up2k." in path or path.endswith("/dir.txt"):
raise FilesystemError("access to this file is forbidden")
return True
def open(self, filename: str, mode: str) -> typing.IO[Any]:
r = "r" in mode
w = "w" in mode or "a" in mode or "+" in mode
ap = self.rv2a(filename, r, w)
if w:
try:
st = bos.stat(ap)
td = time.time() - st.st_mtime
except:
td = 0
if td < -1 or td > self.args.ftp_wt:
raise FilesystemError("cannot open existing file for writing")
self.validpath(ap)
return open(fsenc(ap), mode)
def chdir(self, path: str) -> None:
nwd = join(self.cwd, path)
vfs, rem = self.hub.asrv.vfs.get(nwd, self.uname, False, False)
ap = vfs.canonical(rem)
if not bos.path.isdir(ap):
# returning 550 is library-default and suitable
raise FilesystemError("Failed to change directory")
self.cwd = nwd
(
self.can_read,
self.can_write,
self.can_move,
self.can_delete,
self.can_get,
self.can_upget,
) = self.hub.asrv.vfs.can_access(self.cwd.lstrip("/"), self.h.username)
def mkdir(self, path: str) -> None:
ap = self.rv2a(path, w=True)
bos.mkdir(ap)
def listdir(self, path: str) -> list[str]:
vpath = join(self.cwd, path).lstrip("/")
try:
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, True, False)
fsroot, vfs_ls1, vfs_virt = vfs.ls(
rem,
self.uname,
not self.args.no_scandir,
[[True, False], [False, True]],
)
vfs_ls = [x[0] for x in vfs_ls1]
vfs_ls.extend(vfs_virt.keys())
if not self.args.ed:
vfs_ls = exclude_dotfiles(vfs_ls)
vfs_ls.sort()
return vfs_ls
except:
if vpath:
# display write-only folders as empty
return []
# return list of volumes
r = {x.split("/")[0]: 1 for x in self.hub.asrv.vfs.all_vols.keys()}
return list(sorted(list(r.keys())))
def rmdir(self, path: str) -> None:
ap = self.rv2a(path, d=True)
bos.rmdir(ap)
def remove(self, path: str) -> None:
if self.args.no_del:
raise FilesystemError("the delete feature is disabled in server config")
vp = join(self.cwd, path).lstrip("/")
try:
self.hub.up2k.handle_rm(self.uname, self.h.remote_ip, [vp], [])
except Exception as ex:
raise FilesystemError(str(ex))
def rename(self, src: str, dst: str) -> None:
if not self.can_move:
raise FilesystemError("not allowed for user " + self.h.username)
if self.args.no_mv:
t = "the rename/move feature is disabled in server config"
raise FilesystemError(t)
svp = join(self.cwd, src).lstrip("/")
dvp = join(self.cwd, dst).lstrip("/")
try:
self.hub.up2k.handle_mv(self.uname, svp, dvp)
except Exception as ex:
raise FilesystemError(str(ex))
def chmod(self, path: str, mode: str) -> None:
pass
def stat(self, path: str) -> os.stat_result:
try:
ap = self.rv2a(path, r=True)
return bos.stat(ap)
except:
ap = self.rv2a(path)
st = bos.stat(ap)
if not stat.S_ISDIR(st.st_mode):
raise
return st
def utime(self, path: str, timeval: float) -> None:
ap = self.rv2a(path, w=True)
return bos.utime(ap, (timeval, timeval))
def lstat(self, path: str) -> os.stat_result:
ap = self.rv2a(path)
return bos.stat(ap)
def isfile(self, path: str) -> bool:
try:
st = self.stat(path)
return stat.S_ISREG(st.st_mode)
except:
return False # expected for mojibake in ftp_SIZE()
def islink(self, path: str) -> bool:
ap = self.rv2a(path)
return bos.path.islink(ap)
def isdir(self, path: str) -> bool:
try:
st = self.stat(path)
return stat.S_ISDIR(st.st_mode)
except:
return True
def getsize(self, path: str) -> int:
ap = self.rv2a(path)
return bos.path.getsize(ap)
def getmtime(self, path: str) -> float:
ap = self.rv2a(path)
return bos.path.getmtime(ap)
def realpath(self, path: str) -> str:
return path
def lexists(self, path: str) -> bool:
ap = self.rv2a(path)
return bos.path.lexists(ap)
def get_user_by_uid(self, uid: int) -> str:
return "root"
def get_group_by_uid(self, gid: int) -> str:
return "root"
class FtpHandler(FTPHandler):
abstracted_fs = FtpFs
hub: "SvcHub"
args: argparse.Namespace
def __init__(self, conn: Any, server: Any, ioloop: Any = None) -> None:
self.hub: "SvcHub" = FtpHandler.hub
self.args: argparse.Namespace = FtpHandler.args
if PY2:
FTPHandler.__init__(self, conn, server, ioloop)
else:
super(FtpHandler, self).__init__(conn, server, ioloop)
# abspath->vpath mapping to resolve log_transfer paths
self.vfs_map: dict[str, str] = {}
# reduce non-debug logging
self.log_cmds_list = [x for x in self.log_cmds_list if x not in ("CWD", "XCWD")]
def ftp_STOR(self, file: str, mode: str = "w") -> Any:
# Optional[str]
vp = join(self.fs.cwd, file).lstrip("/")
ap = self.fs.v2a(vp)
self.vfs_map[ap] = vp
# print("ftp_STOR: {} {} => {}".format(vp, mode, ap))
ret = FTPHandler.ftp_STOR(self, file, mode)
# print("ftp_STOR: {} {} OK".format(vp, mode))
return ret
def log_transfer(
self,
cmd: str,
filename: bytes,
receive: bool,
completed: bool,
elapsed: float,
bytes: int,
) -> Any:
# None
ap = filename.decode("utf-8", "replace")
vp = self.vfs_map.pop(ap, None)
# print("xfer_end: {} => {}".format(ap, vp))
if vp:
vp, fn = os.path.split(vp)
vfs, rem = self.hub.asrv.vfs.get(vp, self.username, False, True)
vfs, rem = vfs.get_dbv(rem)
self.hub.up2k.hash_file(
vfs.realpath,
vfs.flags,
rem,
fn,
self.remote_ip,
time.time(),
)
return FTPHandler.log_transfer(
self, cmd, filename, receive, completed, elapsed, bytes
)
try:
from pyftpdlib.handlers import TLS_FTPHandler
class SftpHandler(FtpHandler, TLS_FTPHandler):
pass
except:
pass
class Ftpd(object):
def __init__(self, hub: "SvcHub") -> None:
self.hub = hub
self.args = hub.args
hs = []
if self.args.ftp:
hs.append([FtpHandler, self.args.ftp])
if self.args.ftps:
try:
h1 = SftpHandler
except:
t = "\nftps requires pyopenssl;\nplease run the following:\n\n {} -m pip install --user pyopenssl\n"
print(t.format(sys.executable))
sys.exit(1)
h1.certfile = os.path.join(self.args.E.cfg, "cert.pem")
h1.tls_control_required = True
h1.tls_data_required = True
hs.append([h1, self.args.ftps])
for h_lp in hs:
h2, lp = h_lp
h2.hub = hub
h2.args = hub.args
h2.authorizer = FtpAuth(hub)
if self.args.ftp_pr:
p1, p2 = [int(x) for x in self.args.ftp_pr.split("-")]
if self.args.ftp and self.args.ftps:
# divide port range in half
d = int((p2 - p1) / 2)
if lp == self.args.ftp:
p2 = p1 + d
else:
p1 += d + 1
h2.passive_ports = list(range(p1, p2 + 1))
if self.args.ftp_nat:
h2.masquerade_address = self.args.ftp_nat
lgr = logging.getLogger("pyftpdlib")
lgr.setLevel(logging.DEBUG if self.args.ftpv else logging.INFO)
ioloop = IOLoop()
for ip in self.args.i:
for h, lp in hs:
FTPServer((ip, int(lp)), h, ioloop)
Daemon(ioloop.loop, "ftp")
def join(p1: str, p2: str) -> str:
w = os.path.join(p1, p2.replace("\\", "/"))
return os.path.normpath(w).replace("\\", "/")

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +1,24 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse # typechk
import os
import re import re
import socket import os
import threading # typechk
import time import time
import socket
HAVE_SSL = True
try: try:
HAVE_SSL = True
import ssl import ssl
except: except:
HAVE_SSL = False HAVE_SSL = False
from . import util as Util from .__init__ import E
from .__init__ import TYPE_CHECKING, EnvParams from .util import Unrecv
from .authsrv import AuthSrv # typechk
from .httpcli import HttpCli from .httpcli import HttpCli
from .ico import Ico
from .mtag import HAVE_FFMPEG
from .th_cli import ThumbCli
from .th_srv import HAVE_PIL, HAVE_VIPS
from .u2idx import U2idx from .u2idx import U2idx
from .util import HMaccas, shut_socket from .th_cli import ThumbCli
from .th_srv import HAVE_PIL
if True: # pylint: disable=using-constant-test from .ico import Ico
from typing import Optional, Pattern, Union
if TYPE_CHECKING:
from .httpsrv import HttpSrv
PTN_HTTP = re.compile(br"[A-Z]{3}[A-Z ]")
class HttpConn(object): class HttpConn(object):
@@ -41,49 +27,39 @@ class HttpConn(object):
creates an HttpCli for each request (Connection: Keep-Alive) creates an HttpCli for each request (Connection: Keep-Alive)
""" """
def __init__( def __init__(self, sck, addr, hsrv):
self, sck: socket.socket, addr: tuple[str, int], hsrv: "HttpSrv"
) -> None:
self.s = sck self.s = sck
self.sr: Optional[Util._Unrecv] = None
self.cli: Optional[HttpCli] = None
self.addr = addr self.addr = addr
self.hsrv = hsrv self.hsrv = hsrv
self.mutex: threading.Lock = hsrv.mutex # mypy404 self.args = hsrv.args
self.args: argparse.Namespace = hsrv.args # mypy404 self.asrv = hsrv.asrv
self.E: EnvParams = self.args.E self.is_mp = hsrv.is_mp
self.asrv: AuthSrv = hsrv.asrv # mypy404
self.cert_path = hsrv.cert_path self.cert_path = hsrv.cert_path
self.u2fh: Util.FHC = hsrv.u2fh # mypy404
self.iphash: HMaccas = hsrv.broker.iphash
self.bans: dict[str, int] = hsrv.bans
self.aclose: dict[str, int] = hsrv.aclose
enth = (HAVE_PIL or HAVE_VIPS or HAVE_FFMPEG) and not self.args.no_thumb enth = HAVE_PIL and not self.args.no_thumb
self.thumbcli: Optional[ThumbCli] = ThumbCli(hsrv) if enth else None # mypy404 self.thumbcli = ThumbCli(hsrv.broker) if enth else None
self.ico: Ico = Ico(self.args) # mypy404 self.ico = Ico(self.args)
self.t0: float = time.time() # mypy404 self.t0 = time.time()
self.stopping = False self.stopping = False
self.nreq: int = -1 # mypy404 self.nreq = 0
self.nbyte: int = 0 # mypy404 self.nbyte = 0
self.u2idx: Optional[U2idx] = None self.workload = 0
self.log_func: "Util.RootLogger" = hsrv.log # mypy404 self.u2idx = None
self.log_src: str = "httpconn" # mypy404 self.log_func = hsrv.log
self.lf_url: Optional[Pattern[str]] = ( self.lf_url = re.compile(self.args.lf_url) if self.args.lf_url else None
re.compile(self.args.lf_url) if self.args.lf_url else None
) # mypy404
self.set_rproxy() self.set_rproxy()
def shutdown(self) -> None: def shutdown(self):
self.stopping = True self.stopping = True
try: try:
shut_socket(self.log, self.s, 1) self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
except: except:
pass pass
def set_rproxy(self, ip: Optional[str] = None) -> str: def set_rproxy(self, ip=None):
if ip is None: if ip is None:
color = 36 color = 36
ip = self.addr[0] ip = self.addr[0]
@@ -96,37 +72,35 @@ class HttpConn(object):
self.log_src = "{} \033[{}m{}".format(ip, color, self.addr[1]).ljust(26) self.log_src = "{} \033[{}m{}".format(ip, color, self.addr[1]).ljust(26)
return self.log_src return self.log_src
def respath(self, res_name: str) -> str: def respath(self, res_name):
return os.path.join(self.E.mod, "web", res_name) return os.path.join(E.mod, "web", res_name)
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg, c=0):
self.log_func(self.log_src, msg, c) self.log_func(self.log_src, msg, c)
def get_u2idx(self) -> U2idx: def get_u2idx(self):
# one u2idx per tcp connection;
# sqlite3 fully parallelizes under python threads
if not self.u2idx: if not self.u2idx:
self.u2idx = U2idx(self) self.u2idx = U2idx(self)
return self.u2idx return self.u2idx
def _detect_https(self) -> bool: def _detect_https(self):
method = None method = None
if self.cert_path: if self.cert_path:
try: try:
method = self.s.recv(4, socket.MSG_PEEK) method = self.s.recv(4, socket.MSG_PEEK)
except socket.timeout: except socket.timeout:
return False return
except AttributeError: except AttributeError:
# jython does not support msg_peek; forget about https # jython does not support msg_peek; forget about https
method = self.s.recv(4) method = self.s.recv(4)
self.sr = Util.Unrecv(self.s, self.log) self.sr = Unrecv(self.s)
self.sr.buf = method self.sr.buf = method
# jython used to do this, they stopped since it's broken # jython used to do this, they stopped since it's broken
# but reimplementing sendall is out of scope for now # but reimplementing sendall is out of scope for now
if not getattr(self.s, "sendall", None): if not getattr(self.s, "sendall", None):
self.s.sendall = self.s.send # type: ignore self.s.sendall = self.s.send
if len(method) != 4: if len(method) != 4:
err = "need at least 4 bytes in the first packet; got {}".format( err = "need at least 4 bytes in the first packet; got {}".format(
@@ -136,20 +110,17 @@ class HttpConn(object):
self.log(err) self.log(err)
self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8")) self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8"))
return False return
return not method or not bool(PTN_HTTP.match(method)) return method not in [None, b"GET ", b"HEAD", b"POST", b"PUT ", b"OPTI"]
def run(self) -> None:
self.s.settimeout(10)
def run(self):
self.sr = None self.sr = None
if self.args.https_only: if self.args.https_only:
is_https = True is_https = True
elif self.args.http_only or not HAVE_SSL: elif self.args.http_only or not HAVE_SSL:
is_https = False is_https = False
else: else:
# raise Exception("asdf")
is_https = self._detect_https() is_https = self._detect_https()
if is_https: if is_https:
@@ -178,15 +149,14 @@ class HttpConn(object):
self.s = ctx.wrap_socket(self.s, server_side=True) self.s = ctx.wrap_socket(self.s, server_side=True)
msg = [ msg = [
"\033[1;3{:d}m{}".format(c, s) "\033[1;3{:d}m{}".format(c, s)
for c, s in zip([0, 5, 0], self.s.cipher()) # type: ignore for c, s in zip([0, 5, 0], self.s.cipher())
] ]
self.log(" ".join(msg) + "\033[0m") self.log(" ".join(msg) + "\033[0m")
if self.args.ssl_dbg and hasattr(self.s, "shared_ciphers"): if self.args.ssl_dbg and hasattr(self.s, "shared_ciphers"):
ciphers = self.s.shared_ciphers() overlap = [y[::-1] for y in self.s.shared_ciphers()]
assert ciphers lines = [str(x) for x in (["TLS cipher overlap:"] + overlap)]
overlap = [str(y[::-1]) for y in ciphers] self.log("\n".join(lines))
self.log("TLS cipher overlap:" + "\n".join(overlap))
for k, v in [ for k, v in [
["compression", self.s.compression()], ["compression", self.s.compression()],
["ALPN proto", self.s.selected_alpn_protocol()], ["ALPN proto", self.s.selected_alpn_protocol()],
@@ -197,7 +167,11 @@ class HttpConn(object):
except Exception as ex: except Exception as ex:
em = str(ex) em = str(ex)
if "ALERT_CERTIFICATE_UNKNOWN" in em: if "ALERT_BAD_CERTIFICATE" in em:
# firefox-linux if there is no exception yet
self.log("client rejected our certificate (nice)")
elif "ALERT_CERTIFICATE_UNKNOWN" in em:
# android-chrome keeps doing this # android-chrome keeps doing this
pass pass
@@ -207,10 +181,15 @@ class HttpConn(object):
return return
if not self.sr: if not self.sr:
self.sr = Util.Unrecv(self.s, self.log) self.sr = Unrecv(self.s)
while not self.stopping: while not self.stopping:
if self.is_mp:
self.workload += 50
if self.workload >= 2 ** 31:
self.workload = 100
self.nreq += 1 self.nreq += 1
self.cli = HttpCli(self) cli = HttpCli(self)
if not self.cli.run(): if not cli.run():
return return

View File

@@ -1,15 +1,13 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import base64
import math
import os import os
import socket
import sys import sys
import threading
import time import time
import base64
import queue import struct
import socket
import threading
try: try:
import jinja2 import jinja2
@@ -28,31 +26,8 @@ except ImportError:
) )
sys.exit(1) sys.exit(1)
from .__init__ import ANYWIN, MACOS, TYPE_CHECKING, EnvParams from .__init__ import E, MACOS
from .bos import bos
from .httpconn import HttpConn from .httpconn import HttpConn
from .util import (
E_SCK,
FHC,
Daemon,
Garda,
Magician,
Netdev,
NetMap,
ipnorm,
min_ex,
shut_socket,
spack,
start_log_thrs,
start_stackmon,
)
if TYPE_CHECKING:
from .broker_util import BrokerCli
from .ssdp import SSDPr
if True: # pylint: disable=using-constant-test
from typing import Any, Optional
class HttpSrv(object): class HttpSrv(object):
@@ -61,344 +36,89 @@ class HttpSrv(object):
relying on MpSrv for performance (HttpSrv is just plain threads) relying on MpSrv for performance (HttpSrv is just plain threads)
""" """
def __init__(self, broker: "BrokerCli", nid: Optional[int]) -> None: def __init__(self, broker, is_mp=False):
self.broker = broker self.broker = broker
self.nid = nid self.is_mp = is_mp
self.args = broker.args self.args = broker.args
self.E: EnvParams = self.args.E
self.log = broker.log self.log = broker.log
self.asrv = broker.asrv self.asrv = broker.asrv
# redefine in case of multiprocessing self.disconnect_func = None
socket.setdefaulttimeout(120)
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
self.magician = Magician()
self.nm = NetMap([], {})
self.ssdp: Optional["SSDPr"] = None
self.gpwd = Garda(self.args.ban_pw)
self.g404 = Garda(self.args.ban_404)
self.bans: dict[str, int] = {}
self.aclose: dict[str, int] = {}
self.ip = ""
self.port = 0
self.name = "hsrv" + nsuf
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.stopping = False
self.tp_nthr = 0 # actual self.clients = {}
self.tp_ncli = 0 # fading self.workload = 0
self.tp_time = 0.0 # latest worker collect self.workload_thr_alive = False
self.tp_q: Optional[queue.LifoQueue[Any]] = ( self.cb_ts = 0
None if self.args.no_htp else queue.LifoQueue() self.cb_v = 0
)
self.t_periodic: Optional[threading.Thread] = None
self.u2fh = FHC()
self.srvs: list[socket.socket] = []
self.ncli = 0 # exact
self.clients: set[HttpConn] = set() # laggy
self.nclimax = 0
self.cb_ts = 0.0
self.cb_v = ""
env = jinja2.Environment() env = jinja2.Environment()
env.loader = jinja2.FileSystemLoader(os.path.join(self.E.mod, "web")) env.loader = jinja2.FileSystemLoader(os.path.join(E.mod, "web"))
jn = ["splash", "svcs", "browser", "browser2", "msg", "md", "mde", "cf"] self.j2 = {
self.j2 = {x: env.get_template(x + ".html") for x in jn} x: env.get_template(x + ".html")
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz") for x in ["splash", "browser", "browser2", "msg", "md", "mde"]
self.prism = os.path.exists(zs) }
if self.args.zs: cert_path = os.path.join(E.cfg, "cert.pem")
from .ssdp import SSDPr if os.path.exists(cert_path):
self.ssdp = SSDPr(broker)
cert_path = os.path.join(self.E.cfg, "cert.pem")
if bos.path.exists(cert_path):
self.cert_path = cert_path self.cert_path = cert_path
else: else:
self.cert_path = "" self.cert_path = None
if self.tp_q: def accept(self, sck, addr):
self.start_threads(4)
if nid:
if self.args.stackmon:
start_stackmon(self.args.stackmon, nid)
if self.args.log_thrs:
start_log_thrs(self.log, self.args.log_thrs, nid)
self.th_cfg: dict[str, Any] = {}
Daemon(self.post_init, "hsrv-init2")
def post_init(self) -> None:
try:
x = self.broker.ask("thumbsrv.getcfg")
self.th_cfg = x.get()
except:
pass
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
self.nm = NetMap([self.ip], netdevs)
def start_threads(self, n: int) -> None:
self.tp_nthr += n
if self.args.log_htp:
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
for _ in range(n):
Daemon(self.thr_poolw, self.name + "-poolw")
def stop_threads(self, n: int) -> None:
self.tp_nthr -= n
if self.args.log_htp:
self.log(self.name, "workers -= {} = {}".format(n, self.tp_nthr), 6)
assert self.tp_q
for _ in range(n):
self.tp_q.put(None)
def periodic(self) -> None:
while True:
time.sleep(2 if self.tp_ncli or self.ncli else 10)
with self.mutex:
self.u2fh.clean()
if self.tp_q:
self.tp_ncli = max(self.ncli, self.tp_ncli - 2)
if self.tp_nthr > self.tp_ncli + 8:
self.stop_threads(4)
if not self.ncli and not self.u2fh.cache and self.tp_nthr <= 8:
self.t_periodic = None
return
def listen(self, sck: socket.socket, nlisteners: int) -> None:
if self.args.j != 1:
# lost in the pickle; redefine
if not ANYWIN or self.args.reuseaddr:
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sck.settimeout(None) # < does not inherit, ^ opts above do
self.ip, self.port = sck.getsockname()[:2]
self.srvs.append(sck)
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
Daemon(
self.thr_listen,
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", self.ip, self.port),
(sck,),
)
def thr_listen(self, srv_sck: socket.socket) -> None:
"""listens on a shared tcp server"""
ip, port = srv_sck.getsockname()[:2]
fno = srv_sck.fileno()
hip = "[{}]".format(ip) if ":" in ip else ip
msg = "subscribed @ {}:{} f{} p{}".format(hip, port, fno, os.getpid())
self.log(self.name, msg)
def fun() -> None:
self.broker.say("cb_httpsrv_up")
threading.Thread(target=fun, name="sig-hsrv-up1").start()
while not self.stopping:
if self.args.log_conn:
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="90")
spins = 0
while self.ncli >= self.nclimax:
if not spins:
self.log(self.name, "at connection limit; waiting", 3)
spins += 1
time.sleep(0.1)
if spins != 50 or not self.args.aclose:
continue
ipfreq: dict[str, int] = {}
with self.mutex:
for c in self.clients:
ip = ipnorm(c.ip)
try:
ipfreq[ip] += 1
except:
ipfreq[ip] = 1
ip, n = sorted(ipfreq.items(), key=lambda x: x[1], reverse=True)[0]
if n < self.nclimax / 2:
continue
self.aclose[ip] = int(time.time() + self.args.aclose * 60)
nclose = 0
nloris = 0
nconn = 0
with self.mutex:
for c in self.clients:
cip = ipnorm(c.ip)
if ip != cip:
continue
nconn += 1
try:
if (
c.nreq >= 1
or not c.cli
or c.cli.in_hdr_recv
or c.cli.keepalive
):
Daemon(c.shutdown)
nclose += 1
if c.nreq <= 0 and (not c.cli or c.cli.in_hdr_recv):
nloris += 1
except:
pass
t = "{} downgraded to connection:close for {} min; dropped {}/{} connections"
self.log(self.name, t.format(ip, self.args.aclose, nclose, nconn), 1)
if nloris < nconn / 2:
continue
t = "slowloris (idle-conn): {} banned for {} min"
self.log(self.name, t.format(ip, self.args.loris, nclose), 1)
self.bans[ip] = int(time.time() + self.args.loris * 60)
if self.args.log_conn:
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="90")
try:
sck, saddr = srv_sck.accept()
cip, cport = saddr[:2]
if cip.startswith("::ffff:"):
cip = cip[7:]
addr = (cip, cport)
except (OSError, socket.error) as ex:
if self.stopping:
break
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
time.sleep(0.02)
continue
if self.args.log_conn:
t = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
"-" * 3, ip, port % 8, port
)
self.log("%s %s" % addr, t, c="90")
self.accept(sck, addr)
def accept(self, sck: socket.socket, addr: tuple[str, int]) -> None:
"""takes an incoming tcp connection and creates a thread to handle it""" """takes an incoming tcp connection and creates a thread to handle it"""
now = time.time() if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-cthr" % ("-" * 5,), c="1;30")
if now - (self.tp_time or now) > 300: thr = threading.Thread(
t = "httpserver threadpool died: tpt {:.2f}, now {:.2f}, nthr {}, ncli {}" target=self.thr_client,
self.log(self.name, t.format(self.tp_time, now, self.tp_nthr, self.ncli), 1) args=(sck, addr),
self.tp_time = 0 name="httpsrv-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
self.tp_q = None
with self.mutex:
self.ncli += 1
if not self.t_periodic:
name = "hsrv-pt"
if self.nid:
name += "-{}".format(self.nid)
self.t_periodic = Daemon(self.periodic, name)
if self.tp_q:
self.tp_time = self.tp_time or now
self.tp_ncli = max(self.tp_ncli, self.ncli)
if self.tp_nthr < self.ncli + 4:
self.start_threads(8)
self.tp_q.put((sck, addr))
return
if not self.args.no_htp:
t = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
self.log(self.name, t, 1)
Daemon(
self.thr_client,
"httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
(sck, addr),
) )
thr.daemon = True
thr.start()
def thr_poolw(self) -> None: def num_clients(self):
assert self.tp_q with self.mutex:
while True: return len(self.clients)
task = self.tp_q.get()
if not task:
break
with self.mutex:
self.tp_time = 0
def shutdown(self):
clients = list(self.clients.keys())
for cli in clients:
try: try:
sck, addr = task cli.shutdown()
me = threading.current_thread()
me.name = "httpconn-{}-{}".format(
addr[0].split(".", 2)[-1][-6:], addr[1]
)
self.thr_client(sck, addr)
me.name = self.name + "-poolw"
except Exception as ex:
if str(ex).startswith("client d/c "):
self.log(self.name, "thr_client: " + str(ex), 6)
else:
self.log(self.name, "thr_client: " + min_ex(), 3)
def shutdown(self) -> None:
self.stopping = True
for srv in self.srvs:
try:
srv.close()
except: except:
pass pass
thrs = [] self.log("httpsrv-n", "ok bye")
clients = list(self.clients)
for cli in clients:
t = threading.Thread(target=cli.shutdown)
thrs.append(t)
t.start()
if self.tp_q: def thr_client(self, sck, addr):
self.stop_threads(self.tp_nthr)
for _ in range(10):
time.sleep(0.05)
if self.tp_q.empty():
break
for t in thrs:
t.join()
self.log(self.name, "ok bye")
def thr_client(self, sck: socket.socket, addr: tuple[str, int]) -> None:
"""thread managing one tcp client""" """thread managing one tcp client"""
sck.settimeout(120)
cli = HttpConn(sck, addr, self) cli = HttpConn(sck, addr, self)
with self.mutex: with self.mutex:
self.clients.add(cli) self.clients[cli] = 0
if self.is_mp:
self.workload += 50
if not self.workload_thr_alive:
self.workload_thr_alive = True
thr = threading.Thread(
target=self.thr_workload, name="httpsrv-workload"
)
thr.daemon = True
thr.start()
# print("{}\n".format(len(self.clients)), end="")
fno = sck.fileno() fno = sck.fileno()
try: try:
if self.args.log_conn: if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="90") self.log("%s %s" % addr, "|%sC-crun" % ("-" * 6,), c="1;30")
cli.run() cli.run()
except (OSError, socket.error) as ex: except (OSError, socket.error) as ex:
if ex.errno not in E_SCK: if ex.errno not in [10038, 10054, 107, 57, 9]:
self.log( self.log(
"%s %s" % addr, "%s %s" % addr,
"run({}): {}".format(fno, ex), "run({}): {}".format(fno, ex),
@@ -408,26 +128,61 @@ class HttpSrv(object):
finally: finally:
sck = cli.s sck = cli.s
if self.args.log_conn: if self.args.log_conn:
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="90") self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 7,), c="1;30")
try: try:
fno = sck.fileno() fno = sck.fileno()
shut_socket(cli.log, sck) sck.shutdown(socket.SHUT_RDWR)
sck.close()
except (OSError, socket.error) as ex: except (OSError, socket.error) as ex:
if not MACOS: if not MACOS:
self.log( self.log(
"%s %s" % addr, "%s %s" % addr,
"shut({}): {}".format(fno, ex), "shut({}): {}".format(fno, ex),
c="90", c="1;30",
) )
if ex.errno not in E_SCK: if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
# 10038 No longer considered a socket
# 10054 Foribly closed by remote
# 107 Transport endpoint not connected
# 57 Socket is not connected
# 49 Can't assign requested address (wifi down)
# 9 Bad file descriptor
raise raise
finally: finally:
with self.mutex: with self.mutex:
self.clients.remove(cli) del self.clients[cli]
self.ncli -= 1
def cachebuster(self) -> str: if self.disconnect_func:
self.disconnect_func(addr) # pylint: disable=not-callable
def thr_workload(self):
"""indicates the python interpreter workload caused by this HttpSrv"""
# avoid locking in extract_filedata by tracking difference here
while True:
time.sleep(0.2)
with self.mutex:
if not self.clients:
# no clients rn, termiante thread
self.workload_thr_alive = False
self.workload = 0
return
total = 0
with self.mutex:
for cli in self.clients.keys():
now = cli.workload
delta = now - self.clients[cli]
if delta < 0:
# was reset in HttpCli to prevent overflow
delta = now
total += delta
self.clients[cli] = now
self.workload = total
def cachebuster(self):
if time.time() - self.cb_ts < 1: if time.time() - self.cb_ts < 1:
return self.cb_v return self.cb_v
@@ -435,16 +190,16 @@ class HttpSrv(object):
if time.time() - self.cb_ts < 1: if time.time() - self.cb_ts < 1:
return self.cb_v return self.cb_v
v = self.E.t0 v = E.t0
try: try:
with os.scandir(os.path.join(self.E.mod, "web")) as dh: with os.scandir(os.path.join(E.mod, "web")) as dh:
for fh in dh: for fh in dh:
inf = fh.stat() inf = fh.stat(follow_symlinks=False)
v = max(v, inf.st_mtime) v = max(v, inf.st_mtime)
except: except:
pass pass
v = base64.urlsafe_b64encode(spack(b">xxL", int(v))) v = base64.urlsafe_b64encode(struct.pack(">xxL", int(v)))
self.cb_v = v.decode("ascii")[-4:] self.cb_v = v.decode("ascii")[-4:]
self.cb_ts = time.time() self.cb_ts = time.time()
return self.cb_v return self.cb_v

View File

@@ -1,69 +1,33 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse # typechk
import colorsys
import hashlib import hashlib
import colorsys
from .__init__ import PY2 from .__init__ import PY2
from .th_srv import HAVE_PIL
from .util import BytesIO
class Ico(object): class Ico(object):
def __init__(self, args: argparse.Namespace) -> None: def __init__(self, args):
self.args = args self.args = args
def get(self, ext: str, as_thumb: bool, chrome: bool) -> tuple[str, bytes]: def get(self, ext, as_thumb):
"""placeholder to make thumbnails not break""" """placeholder to make thumbnails not break"""
zb = hashlib.sha1(ext.encode("utf-8")).digest()[2:4] h = hashlib.md5(ext.encode("utf-8")).digest()[:2]
if PY2: if PY2:
zb = [ord(x) for x in zb] h = [ord(x) for x in h]
c1 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 0.3) c1 = colorsys.hsv_to_rgb(h[0] / 256.0, 1, 0.3)
c2 = colorsys.hsv_to_rgb(zb[0] / 256.0, 1, 1) c2 = colorsys.hsv_to_rgb(h[0] / 256.0, 1, 1)
ci = [int(x * 255) for x in list(c1) + list(c2)] c = list(c1) + list(c2)
c = "".join(["{:02x}".format(x) for x in ci]) c = [int(x * 255) for x in c]
c = "".join(["{:02x}".format(x) for x in c])
w = 100
h = 30 h = 30
if not self.args.th_no_crop and as_thumb: if not self.args.th_no_crop and as_thumb:
sw, sh = self.args.th_size.split("x") w, h = self.args.th_size.split("x")
h = int(100 / (float(sw) / float(sh))) h = int(100 / (float(w) / float(h)))
w = 100
if chrome and as_thumb:
# cannot handle more than ~2000 unique SVGs
if HAVE_PIL:
# svg: 3s, cache: 6s, this: 8s
from PIL import Image, ImageDraw
h = int(64 * h / w)
w = 64
img = Image.new("RGB", (w, h), "#" + c[:6])
pb = ImageDraw.Draw(img)
tw, th = pb.textsize(ext)
pb.text(((w - tw) // 2, (h - th) // 2), ext, fill="#" + c[6:])
img = img.resize((w * 3, h * 3), Image.NEAREST)
buf = BytesIO()
img.save(buf, format="PNG", compress_level=1)
return "image/png", buf.getvalue()
elif False:
# 48s, too slow
import pyvips
h = int(192 * h / w)
w = 192
img = pyvips.Image.text(
ext, width=w, height=h, dpi=192, align=pyvips.Align.CENTRE
)
img = img.ifthenelse(ci[3:], ci[:3], blend=True)
# i = i.resize(3, kernel=pyvips.Kernel.NEAREST)
buf = img.write_to_buffer(".png[compression=1]")
return "image/png", buf
svg = """\ svg = """\
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
@@ -73,6 +37,6 @@ class Ico(object):
fill="#{}" font-family="monospace" font-size="14px" style="letter-spacing:.5px">{}</text> fill="#{}" font-family="monospace" font-size="14px" style="letter-spacing:.5px">{}</text>
</g></svg> </g></svg>
""" """
svg = svg.format(h, c[:6], c[6:], ext) svg = svg.format(h, c[:6], c[6:], ext).encode("utf-8")
return "image/svg+xml", svg.encode("utf-8") return ["image/svg+xml", svg]

View File

@@ -1,512 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import random
import select
import socket
import time
from ipaddress import IPv4Network, IPv6Network
from .__init__ import TYPE_CHECKING
from .__init__ import unicode as U
from .multicast import MC_Sck, MCast
from .stolen.dnslib import CLASS as DC
from .stolen.dnslib import (
NSEC,
PTR,
QTYPE,
RR,
SRV,
TXT,
A,
AAAA,
DNSHeader,
DNSQuestion,
DNSRecord,
)
from .util import CachedSet, Daemon, Netdev, min_ex
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Any, Optional, Union
MDNS4 = "224.0.0.251"
MDNS6 = "ff02::fb"
class MDNS_Sck(MC_Sck):
def __init__(
self,
sck: socket.socket,
nd: Netdev,
grp: str,
ip: str,
net: Union[IPv4Network, IPv6Network],
):
super(MDNS_Sck, self).__init__(sck, nd, grp, ip, net)
self.bp_probe = b""
self.bp_ip = b""
self.bp_svc = b""
self.bp_bye = b""
self.last_tx = 0.0
class MDNS(MCast):
def __init__(self, hub: "SvcHub") -> None:
al = hub.args
grp4 = "" if al.zm6 else MDNS4
grp6 = "" if al.zm4 else MDNS6
super(MDNS, self).__init__(
hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv
)
self.srv: dict[socket.socket, MDNS_Sck] = {}
self.ttl = 300
zs = self.args.name + ".local."
zs = zs.encode("ascii", "replace").decode("ascii", "replace")
self.hn = "-".join(x for x in zs.split("?") if x) or (
"vault-{}".format(random.randint(1, 255))
)
self.lhn = self.hn.lower()
# requester ip -> (response deadline, srv, body):
self.q: dict[str, tuple[float, MDNS_Sck, bytes]] = {}
self.rx4 = CachedSet(0.42) # 3 probes @ 250..500..750 => 500ms span
self.rx6 = CachedSet(0.42)
self.svcs, self.sfqdns = self.build_svcs()
self.lsvcs = {k.lower(): v for k, v in self.svcs.items()}
self.lsfqdns = set([x.lower() for x in self.sfqdns])
self.probing = 0.0
self.unsolicited: list[float] = [] # scheduled announces on all nics
self.defend: dict[MDNS_Sck, float] = {} # server -> deadline
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("mDNS", msg, c)
def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]:
zms = self.args.zms
http = {"port": 80 if 80 in self.args.p else self.args.p[0]}
https = {"port": 443 if 443 in self.args.p else self.args.p[0]}
webdav = http.copy()
webdavs = https.copy()
webdav["u"] = webdavs["u"] = "u" # KDE requires username
ftp = {"port": (self.args.ftp if "f" in zms else self.args.ftps)}
smb = {"port": self.args.smb_port}
# some gvfs require path
zs = self.args.zm_ld or "/"
if zs:
webdav["path"] = zs
webdavs["path"] = zs
if self.args.zm_lh:
http["path"] = self.args.zm_lh
https["path"] = self.args.zm_lh
if self.args.zm_lf:
ftp["path"] = self.args.zm_lf
if self.args.zm_ls:
smb["path"] = self.args.zm_ls
svcs: dict[str, dict[str, Any]] = {}
if "d" in zms:
svcs["_webdav._tcp.local."] = webdav
if "D" in zms:
svcs["_webdavs._tcp.local."] = webdavs
if "h" in zms:
svcs["_http._tcp.local."] = http
if "H" in zms:
svcs["_https._tcp.local."] = https
if "f" in zms.lower():
svcs["_ftp._tcp.local."] = ftp
if "s" in zms.lower():
svcs["_smb._tcp.local."] = smb
sfqdns: set[str] = set()
for k, v in svcs.items():
name = "{}-c-{}".format(self.args.name, k.split(".")[0][1:])
v["name"] = name
sfqdns.add("{}.{}".format(name, k))
return svcs, sfqdns
def build_replies(self) -> None:
for srv in self.srv.values():
probe = DNSRecord(DNSHeader(0, 0), q=DNSQuestion(self.hn, QTYPE.ANY))
areply = DNSRecord(DNSHeader(0, 0x8400))
sreply = DNSRecord(DNSHeader(0, 0x8400))
bye = DNSRecord(DNSHeader(0, 0x8400))
have4 = have6 = False
for s2 in self.srv.values():
if srv.idx != s2.idx:
continue
if s2.v6:
have6 = True
else:
have4 = True
for ip in srv.ips:
if ":" in ip:
qt = QTYPE.AAAA
ar = {"rclass": DC.F_IN, "rdata": AAAA(ip)}
else:
qt = QTYPE.A
ar = {"rclass": DC.F_IN, "rdata": A(ip)}
r0 = RR(self.hn, qt, ttl=0, **ar)
r120 = RR(self.hn, qt, ttl=120, **ar)
# rfc-10:
# SHOULD rr ttl 120sec for A/AAAA/SRV
# (and recommend 75min for all others)
probe.add_auth(r120)
areply.add_answer(r120)
sreply.add_answer(r120)
bye.add_answer(r0)
for sclass, props in self.svcs.items():
sname = props["name"]
sport = props["port"]
sfqdn = sname + "." + sclass
k = "_services._dns-sd._udp.local."
r = RR(k, QTYPE.PTR, DC.IN, 4500, PTR(sclass))
sreply.add_answer(r)
r = RR(sclass, QTYPE.PTR, DC.IN, 4500, PTR(sfqdn))
sreply.add_answer(r)
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 120, SRV(0, 0, sport, self.hn))
sreply.add_answer(r)
areply.add_answer(r)
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 0, SRV(0, 0, sport, self.hn))
bye.add_answer(r)
txts = []
for k in ("u", "path"):
if k not in props:
continue
zb = "{}={}".format(k, props[k]).encode("utf-8")
if len(zb) > 255:
t = "value too long for mdns: [{}]"
raise Exception(t.format(props[k]))
txts.append(zb)
# gvfs really wants txt even if they're empty
r = RR(sfqdn, QTYPE.TXT, DC.F_IN, 4500, TXT(txts))
sreply.add_answer(r)
if not (have4 and have6) and not self.args.zm_noneg:
ns = NSEC(self.hn, ["AAAA" if have6 else "A"])
r = RR(self.hn, QTYPE.NSEC, DC.F_IN, 120, ns)
areply.add_ar(r)
if len(sreply.pack()) < 1400:
sreply.add_ar(r)
srv.bp_probe = probe.pack()
srv.bp_ip = areply.pack()
srv.bp_svc = sreply.pack()
srv.bp_bye = bye.pack()
# since all replies are small enough to fit in one packet,
# always send full replies rather than just a/aaaa records
srv.bp_ip = srv.bp_svc
def send_probes(self) -> None:
slp = random.random() * 0.25
for _ in range(3):
time.sleep(slp)
slp = 0.25
if not self.running:
break
if self.args.zmv:
self.log("sending hostname probe...")
# ipv4: need to probe each ip (each server)
# ipv6: only need to probe each set of looped nics
probed6: set[str] = set()
for srv in self.srv.values():
if srv.ip in probed6:
continue
try:
srv.sck.sendto(srv.bp_probe, (srv.grp, 5353))
if srv.v6:
for ip in srv.ips:
probed6.add(ip)
except Exception as ex:
self.log("sendto failed: {} ({})".format(srv.ip, ex), "90")
def run(self) -> None:
try:
bound = self.create_servers()
except:
t = "no server IP matches the mdns config\n{}"
self.log(t.format(min_ex()), 1)
bound = []
if not bound:
self.log("failed to announce copyparty services on the network", 3)
return
self.build_replies()
Daemon(self.send_probes)
zf = time.time() + 2
self.probing = zf # cant unicast so give everyone an extra sec
self.unsolicited = [zf, zf + 1, zf + 3, zf + 7] # rfc-8.3
last_hop = time.time()
ihop = self.args.mc_hop
while self.running:
timeout = (
0.02 + random.random() * 0.07
if self.probing or self.q or self.defend or self.unsolicited
else (last_hop + ihop if ihop else 180)
)
rdy = select.select(self.srv, [], [], timeout)
rx: list[socket.socket] = rdy[0] # type: ignore
self.rx4.cln()
self.rx6.cln()
for sck in rx:
buf, addr = sck.recvfrom(4096)
try:
self.eat(buf, addr, sck)
except:
if not self.running:
return
t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
)
self.log(t, 6)
if not self.probing:
self.process()
continue
if self.probing < time.time():
t = "probe ok; announcing [{}]"
self.log(t.format(self.hn[:-1]), 2)
self.probing = 0
def stop(self, panic=False) -> None:
self.running = False
if not panic:
for srv in self.srv.values():
try:
srv.sck.sendto(srv.bp_bye, (srv.grp, 5353))
except:
pass
self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int], sck: socket.socket) -> None:
cip = addr[0]
v6 = ":" in cip
if (cip.startswith("169.254") and not self.ll_ok) or (
v6 and not cip.startswith("fe80")
):
return
cache = self.rx6 if v6 else self.rx4
if buf in cache.c:
return
srv: Optional[MDNS_Sck] = self.srv[sck] if v6 else self.map_client(cip) # type: ignore
if not srv:
return
cache.add(buf)
now = time.time()
if self.args.zmv and cip != srv.ip and cip not in srv.ips:
t = "{} [{}] \033[36m{} \033[0m|{}|"
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
p = DNSRecord.parse(buf)
if self.args.zmvv:
self.log(str(p))
# check for incoming probes for our hostname
cips = [U(x.rdata) for x in p.auth if U(x.rname).lower() == self.lhn]
if cips and self.sips.isdisjoint(cips):
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
# avahi broadcasting 127.0.0.1-only packets
return
self.log("someone trying to steal our hostname: {}".format(cips), 3)
# immediately unicast
if not self.probing:
srv.sck.sendto(srv.bp_ip, (cip, 5353))
# and schedule multicast
self.defend[srv] = self.defend.get(srv, now + 0.1)
return
# check for someone rejecting our probe / hijacking our hostname
cips = [
U(x.rdata)
for x in p.rr
if U(x.rname).lower() == self.lhn and x.rclass == DC.F_IN
]
if cips and self.sips.isdisjoint(cips):
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
# avahi broadcasting 127.0.0.1-only packets
return
t = "mdns zeroconf: "
if self.probing:
t += "Cannot start; hostname '{}' is occupied"
else:
t += "Emergency stop; hostname '{}' got stolen"
t += " on {}! Use --name to set another hostname.\n\nName taken by {}\n\nYour IPs: {}\n"
self.log(t.format(self.args.name, srv.name, cips, list(self.sips)), 1)
self.stop(True)
return
# then rfc-6.7; dns pretending to be mdns (android...)
if p.header.id or addr[1] != 5353:
rsp: Optional[DNSRecord] = None
for r in p.questions:
try:
lhn = U(r.qname).lower()
except:
self.log("invalid question: {}".format(r))
continue
if lhn != self.lhn:
continue
if p.header.id and r.qtype in (QTYPE.A, QTYPE.AAAA):
rsp = rsp or DNSRecord(DNSHeader(p.header.id, 0x8400))
rsp.add_question(r)
for ip in srv.ips:
qt = r.qtype
v6 = ":" in ip
if v6 == (qt == QTYPE.AAAA):
rd = AAAA(ip) if v6 else A(ip)
rr = RR(self.hn, qt, DC.IN, 10, rd)
rsp.add_answer(rr)
if rsp:
srv.sck.sendto(rsp.pack(), addr[:2])
# but don't return in case it's a differently broken client
# then a/aaaa records
for r in p.questions:
try:
lhn = U(r.qname).lower()
except:
self.log("invalid question: {}".format(r))
continue
if lhn != self.lhn:
continue
# gvfs keeps repeating itself
found = False
unicast = False
for rr in p.rr:
try:
rname = U(rr.rname).lower()
except:
self.log("invalid rr: {}".format(rr))
continue
if rname == self.lhn:
if rr.ttl > 60:
found = True
if rr.rclass == DC.F_IN:
unicast = True
if unicast:
# spec-compliant mDNS-over-unicast
srv.sck.sendto(srv.bp_ip, (cip, 5353))
elif addr[1] != 5353:
# just in case some clients use (and want us to use) invalid ports
srv.sck.sendto(srv.bp_ip, addr[:2])
if not found:
self.q[cip] = (0, srv, srv.bp_ip)
return
deadline = now + (0.5 if p.header.tc else 0.02) # rfc-7.2
# and service queries
for r in p.questions:
if not r or not r.qname:
continue
qname = U(r.qname).lower()
if qname in self.lsvcs or qname == "_services._dns-sd._udp.local.":
self.q[cip] = (deadline, srv, srv.bp_svc)
break
# heed rfc-7.1 if there was an announce in the past 12sec
# (workaround gvfs race-condition where it occasionally
# doesn't read/decode the full response...)
if now < srv.last_tx + 12:
for rr in p.rr:
if not rr.rdata:
continue
rdata = U(rr.rdata).lower()
if rdata in self.lsfqdns:
if rr.ttl > 2250:
self.q.pop(cip, None)
break
def process(self) -> None:
tx = set()
now = time.time()
cooldown = 0.9 # rfc-6: 1
if self.unsolicited and self.unsolicited[0] < now:
self.unsolicited.pop(0)
cooldown = 0.1
for srv in self.srv.values():
tx.add(srv)
for srv, deadline in list(self.defend.items()):
if now < deadline:
continue
if self._tx(srv, srv.bp_ip, 0.02): # rfc-6: 0.25
self.defend.pop(srv)
for cip, (deadline, srv, msg) in list(self.q.items()):
if now < deadline:
continue
self.q.pop(cip)
self._tx(srv, msg, cooldown)
for srv in tx:
self._tx(srv, srv.bp_svc, cooldown)
def _tx(self, srv: MDNS_Sck, msg: bytes, cooldown: float) -> bool:
now = time.time()
if now < srv.last_tx + cooldown:
return False
srv.sck.sendto(msg, (srv.grp, 5353))
srv.last_tx = now
return True

View File

@@ -1,34 +1,30 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse
import json
import os import os
import sys
import json
import shutil import shutil
import subprocess as sp import subprocess as sp
import sys
from .__init__ import PY2, WINDOWS, E, unicode from .__init__ import PY2, WINDOWS
from .bos import bos from .util import fsenc, fsdec, uncyg, REKOBO_LKEY
from .util import REKOBO_LKEY, fsenc, min_ex, retchk, runcmd, uncyg
if True: # pylint: disable=using-constant-test if not PY2:
from typing import Any, Union unicode = str
from .util import RootLogger
def have_ff(scmd: str) -> bool: def have_ff(cmd):
if PY2: if PY2:
print("# checking {}".format(scmd)) print("# checking {}".format(cmd))
acmd = (scmd + " -version").encode("ascii").split(b" ") cmd = (cmd + " -version").encode("ascii").split(b" ")
try: try:
sp.Popen(acmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate() sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate()
return True return True
except: except:
return False return False
else: else:
return bool(shutil.which(scmd)) return bool(shutil.which(cmd))
HAVE_FFMPEG = have_ff("ffmpeg") HAVE_FFMPEG = have_ff("ffmpeg")
@@ -36,16 +32,13 @@ HAVE_FFPROBE = have_ff("ffprobe")
class MParser(object): class MParser(object):
def __init__(self, cmdline: str) -> None: def __init__(self, cmdline):
self.tag, args = cmdline.split("=", 1) self.tag, args = cmdline.split("=", 1)
self.tags = self.tag.split(",") self.tags = self.tag.split(",")
self.timeout = 60 self.timeout = 30
self.force = False self.force = False
self.kill = "t" # tree; all children recursively
self.capture = 3 # outputs to consume
self.audio = "y" self.audio = "y"
self.pri = 0 # priority; higher = later
self.ext = [] self.ext = []
while True: while True:
@@ -54,7 +47,7 @@ class MParser(object):
if WINDOWS: if WINDOWS:
bp = uncyg(bp) bp = uncyg(bp)
if bos.path.exists(bp): if os.path.exists(bp):
self.bin = bp self.bin = bp
return return
except: except:
@@ -67,14 +60,6 @@ class MParser(object):
self.audio = arg[1:] # [r]equire [n]ot [d]ontcare self.audio = arg[1:] # [r]equire [n]ot [d]ontcare
continue continue
if arg.startswith("k"):
self.kill = arg[1:] # [t]ree [m]ain [n]one
continue
if arg.startswith("c"):
self.capture = int(arg[1:]) # 0=none 1=stdout 2=stderr 3=both
continue
if arg == "f": if arg == "f":
self.force = True self.force = True
continue continue
@@ -87,16 +72,10 @@ class MParser(object):
self.ext.append(arg[1:]) self.ext.append(arg[1:])
continue continue
if arg.startswith("p"):
self.pri = int(arg[1:] or "1")
continue
raise Exception() raise Exception()
def ffprobe( def ffprobe(abspath):
abspath: str, timeout: int = 60
) -> tuple[dict[str, tuple[int, Any]], dict[str, list[Any]]]:
cmd = [ cmd = [
b"ffprobe", b"ffprobe",
b"-hide_banner", b"-hide_banner",
@@ -105,20 +84,21 @@ def ffprobe(
b"--", b"--",
fsenc(abspath), fsenc(abspath),
] ]
rc, so, se = runcmd(cmd, timeout=timeout) p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
retchk(rc, cmd, se) r = p.communicate()
return parse_ffprobe(so) txt = r[0].decode("utf-8", "replace")
return parse_ffprobe(txt)
def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[Any]]]: def parse_ffprobe(txt):
"""ffprobe -show_format -show_streams""" """ffprobe -show_format -show_streams"""
streams = [] streams = []
fmt = {} fmt = {}
g = {} g = None
for ln in [x.rstrip("\r") for x in txt.split("\n")]: for ln in [x.rstrip("\r") for x in txt.split("\n")]:
try: try:
sk, sv = ln.split("=", 1) k, v = ln.split("=", 1)
g[sk] = sv g[k] = v
continue continue
except: except:
pass pass
@@ -132,8 +112,8 @@ def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[
fmt = g fmt = g
streams = [fmt] + streams streams = [fmt] + streams
ret: dict[str, Any] = {} # processed ret = {} # processed
md: dict[str, list[Any]] = {} # raw tags md = {} # raw tags
is_audio = fmt.get("format_name") in ["mp3", "ogg", "flac", "wav"] is_audio = fmt.get("format_name") in ["mp3", "ogg", "flac", "wav"]
if fmt.get("filename", "").split(".")[-1].lower() in ["m4a", "aac"]: if fmt.get("filename", "").split(".")[-1].lower() in ["m4a", "aac"]:
@@ -181,55 +161,52 @@ def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[
] ]
if typ == "format": if typ == "format":
kvm = [["duration", ".dur"], ["bit_rate", ".q"], ["format_name", "fmt"]] kvm = [["duration", ".dur"], ["bit_rate", ".q"]]
for sk, rk in kvm: for sk, rk in kvm:
v1 = strm.get(sk) v = strm.get(sk)
if v1 is None: if v is None:
continue continue
if rk.startswith("."): if rk.startswith("."):
try: try:
zf = float(v1) v = float(v)
v2 = ret.get(rk) v2 = ret.get(rk)
if v2 is None or zf > v2: if v2 is None or v > v2:
ret[rk] = zf ret[rk] = v
except: except:
# sqlite doesnt care but the code below does # sqlite doesnt care but the code below does
if v1 not in ["N/A"]: if v not in ["N/A"]:
ret[rk] = v1 ret[rk] = v
else: else:
ret[rk] = v1 ret[rk] = v
if ret.get("vc") == "ansi": # shellscript if ret.get("vc") == "ansi": # shellscript
return {}, {} return {}, {}
for strm in streams: for strm in streams:
for sk, sv in strm.items(): for k, v in strm.items():
if not sk.startswith("TAG:"): if not k.startswith("TAG:"):
continue continue
sk = sk[4:].strip() k = k[4:].strip()
sv = sv.strip() v = v.strip()
if sk and sv and sk not in md: if k and v and k not in md:
md[sk] = [sv] md[k] = [v]
for sk in [".q", ".vq", ".aq"]: for k in [".q", ".vq", ".aq"]:
if sk in ret: if k in ret:
ret[sk] /= 1000 # bit_rate=320000 ret[k] /= 1000 # bit_rate=320000
for sk in [".q", ".vq", ".aq", ".resw", ".resh"]: for k in [".q", ".vq", ".aq", ".resw", ".resh"]:
if sk in ret: if k in ret:
ret[sk] = int(ret[sk]) ret[k] = int(ret[k])
if ".fps" in ret: if ".fps" in ret:
fps = ret[".fps"] fps = ret[".fps"]
if "/" in fps: if "/" in fps:
fa, fb = fps.split("/") fa, fb = fps.split("/")
try: fps = int(fa) * 1.0 / int(fb)
fps = int(fa) * 1.0 / int(fb)
except:
fps = 9001
if fps < 1000 and fmt.get("format_name") not in ["image2", "png_pipe"]: if fps < 1000 and fmt.get("format_name") not in ["image2", "png_pipe"]:
ret[".fps"] = round(fps, 3) ret[".fps"] = round(fps, 3)
@@ -242,52 +219,48 @@ def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[
if ".q" in ret: if ".q" in ret:
del ret[".q"] del ret[".q"]
if "fmt" in ret:
ret["fmt"] = ret["fmt"].split(",")[0]
if ".resw" in ret and ".resh" in ret: if ".resw" in ret and ".resh" in ret:
ret["res"] = "{}x{}".format(ret[".resw"], ret[".resh"]) ret["res"] = "{}x{}".format(ret[".resw"], ret[".resh"])
zd = {k: (0, v) for k, v in ret.items()} ret = {k: [0, v] for k, v in ret.items()}
return zd, md return ret, md
class MTag(object): class MTag(object):
def __init__(self, log_func: "RootLogger", args: argparse.Namespace) -> None: def __init__(self, log_func, args):
self.log_func = log_func self.log_func = log_func
self.args = args
self.usable = True self.usable = True
self.prefer_mt = not args.no_mtag_ff self.prefer_mt = False
self.backend = "ffprobe" if args.no_mutagen else "mutagen"
self.can_ffprobe = HAVE_FFPROBE and not args.no_mtag_ff
mappings = args.mtm mappings = args.mtm
or_ffprobe = " or FFprobe" self.backend = "ffprobe" if args.no_mutagen else "mutagen"
or_ffprobe = " or ffprobe"
if self.backend == "mutagen": if self.backend == "mutagen":
self.get = self.get_mutagen self.get = self.get_mutagen
try: try:
from mutagen import version # noqa: F401 import mutagen
except: except:
self.log("could not load Mutagen, trying FFprobe instead", c=3) self.log("could not load mutagen, trying ffprobe instead", c=3)
self.backend = "ffprobe" self.backend = "ffprobe"
if self.backend == "ffprobe": if self.backend == "ffprobe":
self.usable = self.can_ffprobe
self.get = self.get_ffprobe self.get = self.get_ffprobe
self.prefer_mt = True self.prefer_mt = True
# about 20x slower
self.usable = HAVE_FFPROBE
if not HAVE_FFPROBE: if self.usable and WINDOWS and sys.version_info < (3, 8):
pass self.usable = False
or_ffprobe = " or python >= 3.8"
elif args.no_mtag_ff: msg = "found ffprobe but your python is too old; need 3.8 or newer"
msg = "found FFprobe but it was disabled by --no-mtag-ff" self.log(msg, c=1)
self.log(msg, c=3)
if not self.usable: if not self.usable:
msg = "need Mutagen{} to read media tags so please run this:\n{}{} -m pip install --user mutagen\n" msg = "need mutagen{} to read media tags so please run this:\n{}{} -m pip install --user mutagen\n"
pybin = os.path.basename(sys.executable) self.log(
self.log(msg.format(or_ffprobe, " " * 37, pybin), c=1) msg.format(or_ffprobe, " " * 37, os.path.basename(sys.executable)), c=1
)
return return
# https://picard-docs.musicbrainz.org/downloads/MusicBrainz_Picard_Tag_Map.html # https://picard-docs.musicbrainz.org/downloads/MusicBrainz_Picard_Tag_Map.html
@@ -359,49 +332,41 @@ class MTag(object):
} }
# self.get = self.compare # self.get = self.compare
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg, c=0):
self.log_func("mtag", msg, c) self.log_func("mtag", msg, c)
def normalize_tags( def normalize_tags(self, ret, md):
self, parser_output: dict[str, tuple[int, Any]], md: dict[str, list[Any]] for k, v in dict(md).items():
) -> dict[str, Union[str, float]]: if not v:
for sk, tv in dict(md).items():
if not tv:
continue continue
sk = sk.lower().split("::")[0].strip() k = k.lower().split("::")[0].strip()
key_mapping = self.rmap.get(sk) mk = self.rmap.get(k)
if not key_mapping: if not mk:
continue continue
priority, alias = key_mapping pref, mk = mk
if alias not in parser_output or parser_output[alias][0] > priority: if mk not in ret or ret[mk][0] > pref:
parser_output[alias] = (priority, tv[0]) ret[mk] = [pref, v[0]]
# take first value (lowest priority / most preferred) # take first value
ret: dict[str, Union[str, float]] = { ret = {k: unicode(v[1]).strip() for k, v in ret.items()}
sk: unicode(tv[1]).strip() for sk, tv in parser_output.items()
}
# track 3/7 => track 3 # track 3/7 => track 3
for sk, zv in ret.items(): for k, v in ret.items():
if sk[0] == ".": if k[0] == ".":
sv = str(zv).split("/")[0].strip().lstrip("0") v = v.split("/")[0].strip().lstrip("0")
ret[sk] = sv or 0 ret[k] = v or 0
# normalize key notation to rkeobo # normalize key notation to rkeobo
okey = ret.get("key") okey = ret.get("key")
if okey: if okey:
key = str(okey).replace(" ", "").replace("maj", "").replace("min", "m") key = okey.replace(" ", "").replace("maj", "").replace("min", "m")
ret["key"] = REKOBO_LKEY.get(key.lower(), okey) ret["key"] = REKOBO_LKEY.get(key.lower(), okey)
if self.args.mtag_vv:
zl = " ".join("\033[36m{} \033[33m{}".format(k, v) for k, v in ret.items())
self.log("norm: {}\033[0m".format(zl), "90")
return ret return ret
def compare(self, abspath: str) -> dict[str, Union[str, float]]: def compare(self, abspath):
if abspath.endswith(".au"): if abspath.endswith(".au"):
return {} return {}
@@ -425,7 +390,7 @@ class MTag(object):
v2 = r2.get(k) v2 = r2.get(k)
if v1 == v2: if v1 == v2:
print(" ", k, v1) print(" ", k, v1)
elif v1 != "0000": # FFprobe date=0 elif v1 != "0000": # ffprobe date=0
diffs.append(k) diffs.append(k)
print(" 1", k, v1) print(" 1", k, v1)
print(" 2", k, v2) print(" 2", k, v2)
@@ -439,135 +404,65 @@ class MTag(object):
return r1 return r1
def get_mutagen(self, abspath: str) -> dict[str, Union[str, float]]: def get_mutagen(self, abspath):
ret: dict[str, tuple[int, Any]] = {} import mutagen
if not bos.path.isfile(abspath): try:
md = mutagen.File(fsenc(abspath), easy=True)
x = md.info.length
except Exception as ex:
return {} return {}
from mutagen import File ret = {}
try: try:
md = File(fsenc(abspath), easy=True) dur = int(md.info.length)
assert md try:
if self.args.mtag_vv: q = int(md.info.bitrate / 1024)
for zd in (md.info.__dict__, dict(md.tags)): except:
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()] q = int((os.path.getsize(fsenc(abspath)) / dur) / 128)
self.log("mutagen: {}\033[0m".format(" ".join(zl)), "90")
if not md.info.length and not md.info.codec:
raise Exception()
except:
return self.get_ffprobe(abspath) if self.can_ffprobe else {}
sz = bos.path.getsize(abspath) ret[".dur"] = [0, dur]
try: ret[".q"] = [0, q]
ret[".q"] = (0, int((sz / md.info.length) / 128))
except: except:
pass pass
for attr, k, norm in [
["codec", "ac", unicode],
["channels", "chs", int],
["sample_rate", ".hz", int],
["bitrate", ".aq", int],
["length", ".dur", int],
]:
try:
v = getattr(md.info, attr)
except:
if k != "ac":
continue
try:
v = str(md.info).split(".")[1]
if v.startswith("ogg"):
v = v[3:]
except:
continue
if not v:
continue
if k == ".aq":
v /= 1000
if k == "ac" and v.startswith("mp4a.40."):
v = "aac"
ret[k] = (0, norm(v))
return self.normalize_tags(ret, md) return self.normalize_tags(ret, md)
def get_ffprobe(self, abspath: str) -> dict[str, Union[str, float]]: def get_ffprobe(self, abspath):
if not bos.path.isfile(abspath): ret, md = ffprobe(abspath)
return {}
ret, md = ffprobe(abspath, self.args.mtag_to)
if self.args.mtag_vv:
for zd in (ret, dict(md)):
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()]
self.log("ffprobe: {}\033[0m".format(" ".join(zl)), "90")
return self.normalize_tags(ret, md) return self.normalize_tags(ret, md)
def get_bin( def get_bin(self, parsers, abspath):
self, parsers: dict[str, MParser], abspath: str, oth_tags: dict[str, Any] pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
) -> dict[str, Any]: pypath = [str(pypath)] + [str(x) for x in sys.path if x]
if not bos.path.isfile(abspath): pypath = str(os.pathsep.join(pypath))
return {}
env = os.environ.copy() env = os.environ.copy()
try: env["PYTHONPATH"] = pypath
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
zsl = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(zsl))
env["PYTHONPATH"] = pypath
except:
if not E.ox:
raise
ret: dict[str, Any] = {} ret = {}
for tagname, parser in sorted(parsers.items(), key=lambda x: (x[1].pri, x[0])): for tagname, mp in parsers.items():
try: try:
cmd = [parser.bin, abspath] cmd = [sys.executable, mp.bin, abspath]
if parser.bin.endswith(".py"): args = {"env": env, "timeout": mp.timeout}
cmd = [sys.executable] + cmd
args = {
"env": env,
"timeout": parser.timeout,
"kill": parser.kill,
"capture": parser.capture,
}
if parser.pri:
zd = oth_tags.copy()
zd.update(ret)
args["sin"] = json.dumps(zd).encode("utf-8", "replace")
if WINDOWS: if WINDOWS:
args["creationflags"] = 0x4000 args["creationflags"] = 0x4000
else: else:
cmd = ["nice"] + cmd cmd = ["nice"] + cmd
bcmd = [fsenc(x) for x in cmd] cmd = [fsenc(x) for x in cmd]
rc, v, err = runcmd(bcmd, **args) # type: ignore v = sp.check_output(cmd, **args).strip()
retchk(rc, bcmd, err, self.log, 5, self.args.mtag_v)
v = v.strip()
if not v: if not v:
continue continue
if "," not in tagname: if "," not in tagname:
ret[tagname] = v ret[tagname] = v.decode("utf-8")
else: else:
zj = json.loads(v) v = json.loads(v)
for tag in tagname.split(","): for tag in tagname.split(","):
if tag and tag in zj: if tag and tag in v:
ret[tag] = zj[tag] ret[tag] = v[tag]
except: except:
if self.args.mtag_v: pass
t = "mtag error: tagname {}, parser {}, file {} => {}"
self.log(t.format(tagname, parser.bin, abspath, min_ex()))
return ret return ret

View File

@@ -1,372 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import socket
import time
import ipaddress
from ipaddress import (
IPv4Address,
IPv4Network,
IPv6Address,
IPv6Network,
ip_address,
ip_network,
)
from .__init__ import TYPE_CHECKING
from .util import MACOS, Netdev, min_ex, spack
if TYPE_CHECKING:
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
if not hasattr(socket, "IPPROTO_IPV6"):
setattr(socket, "IPPROTO_IPV6", 41)
class NoIPs(Exception):
pass
class MC_Sck(object):
"""there is one socket for each server ip"""
def __init__(
self,
sck: socket.socket,
nd: Netdev,
grp: str,
ip: str,
net: Union[IPv4Network, IPv6Network],
):
self.sck = sck
self.idx = nd.idx
self.name = nd.name
self.grp = grp
self.mreq = b""
self.ip = ip
self.net = net
self.ips = {ip: net}
self.v6 = ":" in ip
self.have4 = ":" not in ip
self.have6 = ":" in ip
class MCast(object):
def __init__(
self,
hub: "SvcHub",
Srv: type[MC_Sck],
on: list[str],
off: list[str],
mc_grp_4: str,
mc_grp_6: str,
port: int,
vinit: bool,
) -> None:
"""disable ipv%d by setting mc_grp_%d empty"""
self.hub = hub
self.Srv = Srv
self.args = hub.args
self.asrv = hub.asrv
self.log_func = hub.log
self.on = on
self.off = off
self.grp4 = mc_grp_4
self.grp6 = mc_grp_6
self.port = port
self.vinit = vinit
self.srv: dict[socket.socket, MC_Sck] = {} # listening sockets
self.sips: set[str] = set() # all listening ips (including failed attempts)
self.ll_ok: set[str] = set() # fallback linklocal IPv4 and IPv6 addresses
self.b2srv: dict[bytes, MC_Sck] = {} # binary-ip -> server socket
self.b4: list[bytes] = [] # sorted list of binary-ips
self.b6: list[bytes] = [] # sorted list of binary-ips
self.cscache: dict[str, Optional[MC_Sck]] = {} # client ip -> server cache
self.running = True
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("multicast", msg, c)
def create_servers(self) -> list[str]:
bound: list[str] = []
netdevs = self.hub.tcpsrv.netdevs
ips = [x[0] for x in self.hub.tcpsrv.bound]
if "::" in ips:
ips = [x for x in ips if x != "::"] + list(
[x.split("/")[0] for x in netdevs if ":" in x]
)
ips.append("0.0.0.0")
if "0.0.0.0" in ips:
ips = [x for x in ips if x != "0.0.0.0"] + list(
[x.split("/")[0] for x in netdevs if ":" not in x]
)
ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
# ip -> ip/prefix
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips]
on = self.on[:]
off = self.off[:]
for lst in (on, off):
for av in list(lst):
try:
arg_net = ip_network(av, False)
except:
arg_net = None
for sk, sv in netdevs.items():
if arg_net:
net_ip = ip_address(sk.split("/")[0])
if net_ip in arg_net and sk not in lst:
lst.append(sk)
if (av == str(sv.idx) or av == sv.name) and sk not in lst:
lst.append(sk)
if on:
ips = [x for x in ips if x in on]
elif off:
ips = [x for x in ips if x not in off]
if not self.grp4:
ips = [x for x in ips if ":" in x]
if not self.grp6:
ips = [x for x in ips if ":" not in x]
ips = list(set(ips))
all_selected = ips[:]
# discard non-linklocal ipv6
ips = [x for x in ips if ":" not in x or x.startswith("fe80")]
if not ips:
raise NoIPs()
for ip in ips:
v6 = ":" in ip
netdev = netdevs[ip]
if not netdev.idx:
t = "using INADDR_ANY for ip [{}], netdev [{}]"
if not self.srv and ip not in ["::", "0.0.0.0"]:
self.log(t.format(ip, netdev), 3)
ipv = socket.AF_INET6 if v6 else socket.AF_INET
sck = socket.socket(ipv, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sck.settimeout(None)
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except:
pass
# most ipv6 clients expect multicast on linklocal ip only;
# add a/aaaa records for the other nic IPs
other_ips: set[str] = set()
if v6:
for nd in netdevs.values():
if nd.idx == netdev.idx and nd.ip in all_selected and ":" in nd.ip:
other_ips.add(nd.ip)
net = ipaddress.ip_network(ip, False)
ip = ip.split("/")[0]
srv = self.Srv(sck, netdev, self.grp6 if ":" in ip else self.grp4, ip, net)
for oth_ip in other_ips:
srv.ips[oth_ip.split("/")[0]] = ipaddress.ip_network(oth_ip, False)
# gvfs breaks if a linklocal ip appears in a dns reply
ll = {
k: v
for k, v in srv.ips.items()
if k.startswith("169.254") or k.startswith("fe80")
}
rt = {k: v for k, v in srv.ips.items() if k not in ll}
if self.args.ll or not rt:
self.ll_ok.update(list(ll))
if not self.args.ll:
srv.ips = rt or ll
if not srv.ips:
self.log("no IPs on {}; skipping [{}]".format(netdev, ip), 3)
continue
try:
self.setup_socket(srv)
self.srv[sck] = srv
bound.append(ip)
except:
t = "announce failed on {} [{}]:\n{}"
self.log(t.format(netdev, ip, min_ex()), 3)
if self.args.zm_msub:
for s1 in self.srv.values():
for s2 in self.srv.values():
if s1.idx != s2.idx:
continue
if s1.ip not in s2.ips:
s2.ips[s1.ip] = s1.net
if self.args.zm_mnic:
for s1 in self.srv.values():
for s2 in self.srv.values():
for ip1, net1 in list(s1.ips.items()):
for ip2, net2 in list(s2.ips.items()):
if net1 == net2 and ip1 != ip2:
s1.ips[ip2] = net2
self.sips = set([x.split("/")[0] for x in all_selected])
for srv in self.srv.values():
assert srv.ip in self.sips
return bound
def setup_socket(self, srv: MC_Sck) -> None:
sck = srv.sck
if srv.v6:
if self.vinit:
zsl = list(srv.ips.keys())
self.log("v6({}) idx({}) {}".format(srv.ip, srv.idx, zsl), 6)
for ip in srv.ips:
bip = socket.inet_pton(socket.AF_INET6, ip)
self.b2srv[bip] = srv
self.b6.append(bip)
grp = self.grp6 if srv.idx else ""
try:
if MACOS:
raise Exception()
sck.bind((grp, self.port, 0, srv.idx))
except:
sck.bind(("", self.port, 0, srv.idx))
bgrp = socket.inet_pton(socket.AF_INET6, self.grp6)
dev = spack(b"@I", srv.idx)
srv.mreq = bgrp + dev
if srv.idx != socket.INADDR_ANY:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, dev)
try:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
except:
# macos
t = "failed to set IPv6 TTL/LOOP; announcements may not survive multiple switches/routers"
self.log(t, 3)
else:
if self.vinit:
self.log("v4({}) idx({})".format(srv.ip, srv.idx), 6)
bip = socket.inet_aton(srv.ip)
self.b2srv[bip] = srv
self.b4.append(bip)
grp = self.grp4 if srv.idx else ""
try:
if MACOS:
raise Exception()
sck.bind((grp, self.port))
except:
sck.bind(("", self.port))
bgrp = socket.inet_aton(self.grp4)
dev = (
spack(b"=I", socket.INADDR_ANY)
if srv.idx == socket.INADDR_ANY
else socket.inet_aton(srv.ip)
)
srv.mreq = bgrp + dev
if srv.idx != socket.INADDR_ANY:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, dev)
try:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
except:
# probably can't happen but dontcare if it does
t = "failed to set IPv4 TTL/LOOP; announcements may not survive multiple switches/routers"
self.log(t, 3)
self.hop(srv)
self.b4.sort(reverse=True)
self.b6.sort(reverse=True)
def hop(self, srv: MC_Sck) -> None:
"""rejoin to keepalive on routers/switches without igmp-snooping"""
sck = srv.sck
req = srv.mreq
if ":" in srv.ip:
try:
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, req)
# linux does leaves/joins twice with 0.2~1.05s spacing
time.sleep(1.2)
except:
pass
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, req)
else:
try:
sck.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, req)
time.sleep(1.2)
except:
pass
# t = "joining {} from ip {} idx {} with mreq {}"
# self.log(t.format(srv.grp, srv.ip, srv.idx, repr(srv.mreq)), 6)
sck.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, req)
def map_client(self, cip: str) -> Optional[MC_Sck]:
try:
return self.cscache[cip]
except:
pass
ret: Optional[MC_Sck] = None
v6 = ":" in cip
ci = IPv6Address(cip) if v6 else IPv4Address(cip)
for x in self.b6 if v6 else self.b4:
srv = self.b2srv[x]
if any([x for x in srv.ips.values() if ci in x]):
ret = srv
break
if not ret and cip in ("127.0.0.1", "::1"):
# just give it something
ret = list(self.srv.values())[0]
if not ret and cip.startswith("169.254"):
# idk how to map LL IPv4 msgs to nics;
# just pick one and hope for the best
lls = (
x
for x in self.srv.values()
if next((y for y in x.ips if y in self.ll_ok), None)
)
ret = next(lls, None)
if ret:
t = "new client on {} ({}): {}"
self.log(t.format(ret.name, ret.net, cip), 6)
else:
t = "could not map client {} to known subnet; maybe forwarded from another network?"
self.log(t.format(cip), 3)
if len(self.cscache) > 9000:
self.cscache = {}
self.cscache[cip] = ret
return ret

View File

@@ -1,321 +0,0 @@
# coding: utf-8
import inspect
import logging
import os
import random
import stat
import sys
import time
from types import SimpleNamespace
from .__init__ import ANYWIN, TYPE_CHECKING
from .authsrv import LEELOO_DALLAS, VFS
from .bos import bos
from .util import Daemon, min_ex
if True: # pylint: disable=using-constant-test
from typing import Any
if TYPE_CHECKING:
from .svchub import SvcHub
lg = logging.getLogger("smb")
debug, info, warning, error = (lg.debug, lg.info, lg.warning, lg.error)
class SMB(object):
def __init__(self, hub: "SvcHub") -> None:
self.hub = hub
self.args = hub.args
self.asrv = hub.asrv
self.log = hub.log
self.files: dict[int, tuple[float, str]] = {}
lg.setLevel(logging.DEBUG if self.args.smbvvv else logging.INFO)
for x in ["impacket", "impacket.smbserver"]:
lgr = logging.getLogger(x)
lgr.setLevel(logging.DEBUG if self.args.smbvv else logging.INFO)
try:
from impacket import smbserver
from impacket.ntlm import compute_lmhash, compute_nthash
except ImportError:
m = "\033[36m\n{}\033[31m\n\nERROR: need 'impacket'; please run this command:\033[33m\n {} -m pip install --user impacket\n\033[0m"
print(m.format(min_ex(), sys.executable))
sys.exit(1)
# patch vfs into smbserver.os
fos = SimpleNamespace()
for k in os.__dict__:
try:
setattr(fos, k, getattr(os, k))
except:
pass
fos.close = self._close
fos.listdir = self._listdir
fos.mkdir = self._mkdir
fos.open = self._open
fos.remove = self._unlink
fos.rename = self._rename
fos.stat = self._stat
fos.unlink = self._unlink
fos.utime = self._utime
smbserver.os = fos
# ...and smbserver.os.path
fop = SimpleNamespace()
for k in os.path.__dict__:
try:
setattr(fop, k, getattr(os.path, k))
except:
pass
fop.exists = self._p_exists
fop.getsize = self._p_getsize
fop.isdir = self._p_isdir
smbserver.os.path = fop
if not self.args.smb_nwa_2:
fop.join = self._p_join
# other patches
smbserver.isInFileJail = self._is_in_file_jail
self._disarm()
ip = next((x for x in self.args.i if ":" not in x), None)
if not ip:
self.log("smb", "IPv6 not supported for SMB; listening on 0.0.0.0", 3)
ip = "0.0.0.0"
port = int(self.args.smb_port)
srv = smbserver.SimpleSMBServer(listenAddress=ip, listenPort=port)
ro = "no" if self.args.smbw else "yes" # (does nothing)
srv.addShare("A", "/", readOnly=ro)
srv.setSMB2Support(not self.args.smb1)
for name, pwd in self.asrv.acct.items():
for u, p in ((name, pwd), (pwd, "k")):
lmhash = compute_lmhash(p)
nthash = compute_nthash(p)
srv.addCredential(u, 0, lmhash, nthash)
chi = [random.randint(0, 255) for x in range(8)]
cha = "".join(["{:02x}".format(x) for x in chi])
srv.setSMBChallenge(cha)
self.srv = srv
self.stop = srv.stop
self.log("smb", "listening @ {}:{}".format(ip, port))
def start(self) -> None:
Daemon(self.srv.start)
def _v2a(self, caller: str, vpath: str, *a: Any) -> tuple[VFS, str]:
vpath = vpath.replace("\\", "/").lstrip("/")
# cf = inspect.currentframe().f_back
# c1 = cf.f_back.f_code.co_name
# c2 = cf.f_code.co_name
debug('%s("%s", %s)\033[K\033[0m', caller, vpath, str(a))
# TODO find a way to grab `identity` in smbComSessionSetupAndX and smb2SessionSetup
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, True, True)
return vfs, vfs.canonical(rem)
def _listdir(self, vpath: str, *a: Any, **ka: Any) -> list[str]:
vpath = vpath.replace("\\", "/").lstrip("/")
# caller = inspect.currentframe().f_back.f_code.co_name
debug('listdir("%s", %s)\033[K\033[0m', vpath, str(a))
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, False, False)
_, vfs_ls, vfs_virt = vfs.ls(
rem, LEELOO_DALLAS, not self.args.no_scandir, [[False, False]]
)
dirs = [x[0] for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
fils = [x[0] for x in vfs_ls if x[0] not in dirs]
ls = list(vfs_virt.keys()) + dirs + fils
if self.args.smb_nwa_1:
return ls
# clients crash somewhere around 65760 byte
ret = []
sz = 112 * 2 # ['.', '..']
for n, fn in enumerate(ls):
if sz >= 64000:
t = "listing only %d of %d files (%d byte); see impacket#1433"
warning(t, n, len(ls), sz)
break
nsz = len(fn.encode("utf-16", "replace"))
nsz = ((nsz + 7) // 8) * 8
sz += 104 + nsz
ret.append(fn)
return ret
def _open(
self, vpath: str, flags: int, *a: Any, chmod: int = 0o777, **ka: Any
) -> Any:
f_ro = os.O_RDONLY
if ANYWIN:
f_ro |= os.O_BINARY
wr = flags != f_ro
if wr and not self.args.smbw:
yeet("blocked write (no --smbw): " + vpath)
vfs, ap = self._v2a("open", vpath, *a)
if wr and not vfs.axs.uwrite:
yeet("blocked write (no-write-acc): " + vpath)
ret = bos.open(ap, flags, *a, mode=chmod, **ka)
if wr:
now = time.time()
nf = len(self.files)
if nf > 9000:
oldest = min([x[0] for x in self.files.values()])
cutoff = oldest + (now - oldest) / 2
self.files = {k: v for k, v in self.files.items() if v[0] > cutoff}
info("was tracking %d files, now %d", nf, len(self.files))
vpath = vpath.replace("\\", "/").lstrip("/")
self.files[ret] = (now, vpath)
return ret
def _close(self, fd: int) -> None:
os.close(fd)
if fd not in self.files:
return
_, vp = self.files.pop(fd)
vp, fn = os.path.split(vp)
vfs, rem = self.hub.asrv.vfs.get(vp, LEELOO_DALLAS, False, True)
vfs, rem = vfs.get_dbv(rem)
self.hub.up2k.hash_file(
vfs.realpath,
vfs.flags,
rem,
fn,
"1.7.6.2",
time.time(),
)
def _rename(self, vp1: str, vp2: str) -> None:
if not self.args.smbw:
yeet("blocked rename (no --smbw): " + vp1)
vp1 = vp1.lstrip("/")
vp2 = vp2.lstrip("/")
vfs2, ap2 = self._v2a("rename", vp2, vp1)
if not vfs2.axs.uwrite:
yeet("blocked rename (no-write-acc): " + vp2)
vfs1, _ = self.asrv.vfs.get(vp1, LEELOO_DALLAS, True, True)
if not vfs1.axs.umove:
yeet("blocked rename (no-move-acc): " + vp1)
self.hub.up2k.handle_mv(LEELOO_DALLAS, vp1, vp2)
try:
bos.makedirs(ap2)
except:
pass
def _mkdir(self, vpath: str) -> None:
if not self.args.smbw:
yeet("blocked mkdir (no --smbw): " + vpath)
vfs, ap = self._v2a("mkdir", vpath)
if not vfs.axs.uwrite:
yeet("blocked mkdir (no-write-acc): " + vpath)
return bos.mkdir(ap)
def _stat(self, vpath: str, *a: Any, **ka: Any) -> os.stat_result:
return bos.stat(self._v2a("stat", vpath, *a)[1], *a, **ka)
def _unlink(self, vpath: str) -> None:
if not self.args.smbw:
yeet("blocked delete (no --smbw): " + vpath)
# return bos.unlink(self._v2a("stat", vpath, *a)[1])
vfs, ap = self._v2a("delete", vpath)
if not vfs.axs.udel:
yeet("blocked delete (no-del-acc): " + vpath)
vpath = vpath.replace("\\", "/").lstrip("/")
self.hub.up2k.handle_rm(LEELOO_DALLAS, "1.7.6.2", [vpath], [])
def _utime(self, vpath: str, times: tuple[float, float]) -> None:
if not self.args.smbw:
yeet("blocked utime (no --smbw): " + vpath)
vfs, ap = self._v2a("utime", vpath)
if not vfs.axs.uwrite:
yeet("blocked utime (no-write-acc): " + vpath)
return bos.utime(ap, times)
def _p_exists(self, vpath: str) -> bool:
try:
bos.stat(self._v2a("p.exists", vpath)[1])
return True
except:
return False
def _p_getsize(self, vpath: str) -> int:
st = bos.stat(self._v2a("p.getsize", vpath)[1])
return st.st_size
def _p_isdir(self, vpath: str) -> bool:
try:
st = bos.stat(self._v2a("p.isdir", vpath)[1])
return stat.S_ISDIR(st.st_mode)
except:
return False
def _p_join(self, *a) -> str:
# impacket.smbserver reads globs from queryDirectoryRequest['Buffer']
# where somehow `fds.*` becomes `fds"*` so lets fix that
ret = os.path.join(*a)
return ret.replace('"', ".") # type: ignore
def _hook(self, *a: Any, **ka: Any) -> None:
src = inspect.currentframe().f_back.f_code.co_name
error("\033[31m%s:hook(%s)\033[0m", src, a)
raise Exception("nope")
def _disarm(self) -> None:
from impacket import smbserver
smbserver.os.chmod = self._hook
smbserver.os.chown = self._hook
smbserver.os.ftruncate = self._hook
smbserver.os.lchown = self._hook
smbserver.os.link = self._hook
smbserver.os.lstat = self._hook
smbserver.os.replace = self._hook
smbserver.os.scandir = self._hook
smbserver.os.symlink = self._hook
smbserver.os.truncate = self._hook
smbserver.os.walk = self._hook
smbserver.os.path.abspath = self._hook
smbserver.os.path.expanduser = self._hook
smbserver.os.path.getatime = self._hook
smbserver.os.path.getctime = self._hook
smbserver.os.path.getmtime = self._hook
smbserver.os.path.isabs = self._hook
smbserver.os.path.isfile = self._hook
smbserver.os.path.islink = self._hook
smbserver.os.path.realpath = self._hook
def _is_in_file_jail(self, *a: Any) -> bool:
# handled by vfs
return True
def yeet(msg: str) -> None:
info(msg)
raise Exception(msg)

View File

@@ -1,195 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import re
import select
import socket
from email.utils import formatdate
from .__init__ import TYPE_CHECKING
from .multicast import MC_Sck, MCast
from .util import CachedSet, min_ex, html_escape
if TYPE_CHECKING:
from .broker_util import BrokerCli
from .httpcli import HttpCli
from .svchub import SvcHub
if True: # pylint: disable=using-constant-test
from typing import Optional, Union
GRP = "239.255.255.250"
class SSDP_Sck(MC_Sck):
def __init__(self, *a):
super(SSDP_Sck, self).__init__(*a)
self.hport = 0
class SSDPr(object):
"""generates http responses for httpcli"""
def __init__(self, broker: "BrokerCli") -> None:
self.broker = broker
self.args = broker.args
def reply(self, hc: "HttpCli") -> bool:
if hc.vpath.endswith("device.xml"):
return self.tx_device(hc)
hc.reply(b"unknown request", 400)
return False
def tx_device(self, hc: "HttpCli") -> bool:
zs = """
<?xml version="1.0"?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>{}</URLBase>
<device>
<presentationURL>{}</presentationURL>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>{}</friendlyName>
<modelDescription>file server</modelDescription>
<manufacturer>ed</manufacturer>
<manufacturerURL>https://ocv.me/</manufacturerURL>
<modelName>copyparty</modelName>
<modelURL>https://github.com/9001/copyparty/</modelURL>
<UDN>{}</UDN>
<serviceList>
<service>
<serviceType>urn:schemas-upnp-org:device:Basic:1</serviceType>
<serviceId>urn:schemas-upnp-org:device:Basic</serviceId>
<controlURL>/.cpr/ssdp/services.xml</controlURL>
<eventSubURL>/.cpr/ssdp/services.xml</eventSubURL>
<SCPDURL>/.cpr/ssdp/services.xml</SCPDURL>
</service>
</serviceList>
</device>
</root>"""
c = html_escape
sip, sport = hc.s.getsockname()[:2]
proto = "https" if self.args.https_only else "http"
ubase = "{}://{}:{}".format(proto, sip, sport)
zsl = self.args.zsl
url = zsl if "://" in zsl else ubase + "/" + zsl.lstrip("/")
name = "{} @ {}".format(self.args.doctitle, self.args.name)
zs = zs.strip().format(c(ubase), c(url), c(name), c(self.args.zsid))
hc.reply(zs.encode("utf-8", "replace"))
return False # close connectino
class SSDPd(MCast):
"""communicates with ssdp clients over multicast"""
def __init__(self, hub: "SvcHub") -> None:
al = hub.args
vinit = al.zsv and not al.zmv
super(SSDPd, self).__init__(
hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit
)
self.srv: dict[socket.socket, SSDP_Sck] = {}
self.rxc = CachedSet(0.7)
self.txc = CachedSet(5) # win10: every 3 sec
self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I)
def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("SSDP", msg, c)
def run(self) -> None:
try:
bound = self.create_servers()
except:
t = "no server IP matches the ssdp config\n{}"
self.log(t.format(min_ex()), 1)
bound = []
if not bound:
self.log("failed to announce copyparty services on the network", 3)
return
# find http port for this listening ip
for srv in self.srv.values():
tcps = self.hub.tcpsrv.bound
hp = next((x[1] for x in tcps if x[0] in ("0.0.0.0", srv.ip)), 0)
hp = hp or next((x[1] for x in tcps if x[0] == "::"), 0)
if not hp:
hp = tcps[0][1]
self.log("assuming port {} for {}".format(hp, srv.ip), 3)
srv.hport = hp
self.log("listening")
while self.running:
rdy = select.select(self.srv, [], [], 180)
rx: list[socket.socket] = rdy[0] # type: ignore
self.rxc.cln()
for sck in rx:
buf, addr = sck.recvfrom(4096)
try:
self.eat(buf, addr)
except:
if not self.running:
return
t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
)
self.log(t, 6)
def stop(self) -> None:
self.running = False
self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int]) -> None:
cip = addr[0]
if cip.startswith("169.254") and not self.ll_ok:
return
if buf in self.rxc.c:
return
srv: Optional[SSDP_Sck] = self.map_client(cip) # type: ignore
if not srv:
return
self.rxc.add(buf)
if not buf.startswith(b"M-SEARCH * HTTP/1."):
raise Exception("not an ssdp message")
if not self.ptn_st.search(buf):
return
if self.args.zsv:
t = "{} [{}] \033[36m{} \033[0m|{}|"
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
zs = """
HTTP/1.1 200 OK
CACHE-CONTROL: max-age=1800
DATE: {0}
EXT:
LOCATION: http://{1}:{2}/.cpr/ssdp/device.xml
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
01-NLS: {3}
SERVER: UPnP/1.0
ST: upnp:rootdevice
USN: {3}::upnp:rootdevice
BOOTID.UPNP.ORG: 0
CONFIGID.UPNP.ORG: 1
"""
zs = zs.format(formatdate(usegmt=True), srv.ip, srv.hport, self.args.zsid)
zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace")
srv.sck.sendto(zb, addr[:2])
if cip not in self.txc.c:
self.log("{} [{}] --> {}".format(srv.name, srv.ip, cip), "6")
self.txc.add(cip)
self.txc.cln()

View File

@@ -1,30 +1,23 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import stat import os
import tarfile import tarfile
import threading
from queue import Queue from .sutil import errdesc
from .util import Queue, fsenc
from .bos import bos
from .sutil import StreamArc, errdesc
from .util import Daemon, fsenc, min_ex
if True: # pylint: disable=using-constant-test
from typing import Any, Generator, Optional
from .util import NamedLogger
class QFile(object): # inherit io.StringIO for painful typing class QFile(object):
"""file-like object which buffers writes into a queue""" """file-like object which buffers writes into a queue"""
def __init__(self) -> None: def __init__(self):
self.q: Queue[Optional[bytes]] = Queue(64) self.q = Queue(64)
self.bq: list[bytes] = [] self.bq = []
self.nq = 0 self.nq = 0
def write(self, buf: Optional[bytes]) -> None: def write(self, buf):
if buf is None or self.nq >= 240 * 1024: if buf is None or self.nq >= 240 * 1024:
self.q.put(b"".join(self.bq)) self.q.put(b"".join(self.bq))
self.bq = [] self.bq = []
@@ -37,52 +30,43 @@ class QFile(object): # inherit io.StringIO for painful typing
self.nq += len(buf) self.nq += len(buf)
class StreamTar(StreamArc): class StreamTar(object):
"""construct in-memory tar file from the given path""" """construct in-memory tar file from the given path"""
def __init__( def __init__(self, fgen, **kwargs):
self,
log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None],
**kwargs: Any
):
super(StreamTar, self).__init__(log, fgen)
self.ci = 0 self.ci = 0
self.co = 0 self.co = 0
self.qfile = QFile() self.qfile = QFile()
self.errf: dict[str, Any] = {} self.fgen = fgen
self.errf = None
# python 3.8 changed to PAX_FORMAT as default, # python 3.8 changed to PAX_FORMAT as default,
# waste of space and don't care about the new features # waste of space and don't care about the new features
fmt = tarfile.GNU_FORMAT fmt = tarfile.GNU_FORMAT
self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt) # type: ignore self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt)
Daemon(self._gen, "star-gen") w = threading.Thread(target=self._gen, name="star-gen")
w.daemon = True
w.start()
def gen(self) -> Generator[Optional[bytes], None, None]: def gen(self):
try: while True:
while True: buf = self.qfile.q.get()
buf = self.qfile.q.get() if not buf:
if not buf: break
break
self.co += len(buf) self.co += len(buf)
yield buf yield buf
yield None yield None
finally: if self.errf:
if self.errf: os.unlink(self.errf["ap"])
bos.unlink(self.errf["ap"])
def ser(self, f: dict[str, Any]) -> None: def ser(self, f):
name = f["vp"] name = f["vp"]
src = f["ap"] src = f["ap"]
fsi = f["st"] fsi = f["st"]
if stat.S_ISDIR(fsi.st_mode):
return
inf = tarfile.TarInfo(name=name) inf = tarfile.TarInfo(name=name)
inf.mode = fsi.st_mode inf.mode = fsi.st_mode
inf.size = fsi.st_size inf.size = fsi.st_size
@@ -91,25 +75,23 @@ class StreamTar(StreamArc):
inf.gid = 0 inf.gid = 0
self.ci += inf.size self.ci += inf.size
with open(fsenc(src), "rb", 512 * 1024) as fo: with open(fsenc(src), "rb", 512 * 1024) as f:
self.tar.addfile(inf, fo) self.tar.addfile(inf, f)
def _gen(self) -> None: def _gen(self):
errors = [] errors = []
for f in self.fgen: for f in self.fgen:
if "err" in f: if "err" in f:
errors.append((f["vp"], f["err"])) errors.append([f["vp"], f["err"]])
continue continue
try: try:
self.ser(f) self.ser(f)
except: except Exception as ex:
ex = min_ex(5, True).replace("\n", "\n-- ") errors.append([f["vp"], repr(ex)])
errors.append((f["vp"], ex))
if errors: if errors:
self.errf, txt = errdesc(errors) self.errf = errdesc(errors)
self.log("\n".join(([repr(self.errf)] + txt[1:])))
self.ser(self.errf) self.ser(self.errf)
self.tar.close() self.tar.close()

View File

@@ -1,5 +0,0 @@
`dnslib` but heavily simplified/feature-stripped
L: MIT
Copyright (c) 2010 - 2017 Paul Chakravarti
https://github.com/paulc/dnslib/

View File

@@ -1,11 +0,0 @@
# coding: utf-8
"""
L: MIT
Copyright (c) 2010 - 2017 Paul Chakravarti
https://github.com/paulc/dnslib/tree/0.9.23
"""
from .dns import *
version = "0.9.23"

View File

@@ -1,41 +0,0 @@
# coding: utf-8
import types
class BimapError(Exception):
pass
class Bimap(object):
def __init__(self, name, forward, error=AttributeError):
self.name = name
self.error = error
self.forward = forward.copy()
self.reverse = dict([(v, k) for (k, v) in list(forward.items())])
def get(self, k, default=None):
try:
return self.forward[k]
except KeyError:
return default or str(k)
def __getitem__(self, k):
try:
return self.forward[k]
except KeyError:
if isinstance(self.error, types.FunctionType):
return self.error(self.name, k, True)
else:
raise self.error("%s: Invalid forward lookup: [%s]" % (self.name, k))
def __getattr__(self, k):
try:
if k == "__wrapped__":
raise AttributeError()
return self.reverse[k]
except KeyError:
if isinstance(self.error, types.FunctionType):
return self.error(self.name, k, False)
else:
raise self.error("%s: Invalid reverse lookup: [%s]" % (self.name, k))

View File

@@ -1,15 +0,0 @@
# coding: utf-8
from __future__ import print_function
def get_bits(data, offset, bits=1):
mask = ((1 << bits) - 1) << offset
return (data & mask) >> offset
def set_bits(data, value, offset, bits=1):
mask = ((1 << bits) - 1) << offset
clear = 0xFFFF ^ mask
data = (data & clear) | ((value << offset) & mask)
return data

View File

@@ -1,56 +0,0 @@
# coding: utf-8
import binascii
import struct
class BufferError(Exception):
pass
class Buffer(object):
def __init__(self, data=b""):
self.data = bytearray(data)
self.offset = 0
def remaining(self):
return len(self.data) - self.offset
def get(self, length):
if length > self.remaining():
raise BufferError(
"Not enough bytes [offset=%d,remaining=%d,requested=%d]"
% (self.offset, self.remaining(), length)
)
start = self.offset
end = self.offset + length
self.offset += length
return bytes(self.data[start:end])
def hex(self):
return binascii.hexlify(self.data)
def pack(self, fmt, *args):
self.offset += struct.calcsize(fmt)
self.data += struct.pack(fmt, *args)
def append(self, s):
self.offset += len(s)
self.data += s
def update(self, ptr, fmt, *args):
s = struct.pack(fmt, *args)
self.data[ptr : ptr + len(s)] = s
def unpack(self, fmt):
try:
data = self.get(struct.calcsize(fmt))
return struct.unpack(fmt, data)
except struct.error:
raise BufferError(
"Error unpacking struct '%s' <%s>"
% (fmt, binascii.hexlify(data).decode())
)
def __len__(self):
return len(self.data)

View File

@@ -1,775 +0,0 @@
# coding: utf-8
from __future__ import print_function
import binascii
from itertools import chain
from .bimap import Bimap, BimapError
from .bit import get_bits, set_bits
from .buffer import BufferError
from .label import DNSBuffer, DNSLabel
from .ranges import IP4, IP6, H, I, check_bytes
class DNSError(Exception):
pass
def unknown_qtype(name, key, forward):
if forward:
try:
return "TYPE%d" % (key,)
except:
raise DNSError("%s: Invalid forward lookup: [%s]" % (name, key))
else:
if key.startswith("TYPE"):
try:
return int(key[4:])
except:
pass
raise DNSError("%s: Invalid reverse lookup: [%s]" % (name, key))
QTYPE = Bimap(
"QTYPE",
{1: "A", 12: "PTR", 16: "TXT", 28: "AAAA", 33: "SRV", 47: "NSEC", 255: "ANY"},
unknown_qtype,
)
CLASS = Bimap("CLASS", {1: "IN", 254: "None", 255: "*", 0x8001: "F_IN"}, DNSError)
QR = Bimap("QR", {0: "QUERY", 1: "RESPONSE"}, DNSError)
RCODE = Bimap(
"RCODE",
{
0: "NOERROR",
1: "FORMERR",
2: "SERVFAIL",
3: "NXDOMAIN",
4: "NOTIMP",
5: "REFUSED",
6: "YXDOMAIN",
7: "YXRRSET",
8: "NXRRSET",
9: "NOTAUTH",
10: "NOTZONE",
},
DNSError,
)
OPCODE = Bimap(
"OPCODE", {0: "QUERY", 1: "IQUERY", 2: "STATUS", 4: "NOTIFY", 5: "UPDATE"}, DNSError
)
def label(label, origin=None):
if label.endswith("."):
return DNSLabel(label)
else:
return (origin if isinstance(origin, DNSLabel) else DNSLabel(origin)).add(label)
class DNSRecord(object):
@classmethod
def parse(cls, packet) -> "DNSRecord":
buffer = DNSBuffer(packet)
try:
header = DNSHeader.parse(buffer)
questions = []
rr = []
auth = []
ar = []
for i in range(header.q):
questions.append(DNSQuestion.parse(buffer))
for i in range(header.a):
rr.append(RR.parse(buffer))
for i in range(header.auth):
auth.append(RR.parse(buffer))
for i in range(header.ar):
ar.append(RR.parse(buffer))
return cls(header, questions, rr, auth=auth, ar=ar)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSRecord [offset=%d]: %s" % (buffer.offset, e)
)
@classmethod
def question(cls, qname, qtype="A", qclass="IN"):
return DNSRecord(
q=DNSQuestion(qname, getattr(QTYPE, qtype), getattr(CLASS, qclass))
)
def __init__(
self, header=None, questions=None, rr=None, q=None, a=None, auth=None, ar=None
) -> None:
self.header = header or DNSHeader()
self.questions: list[DNSQuestion] = questions or []
self.rr: list[RR] = rr or []
self.auth: list[RR] = auth or []
self.ar: list[RR] = ar or []
if q:
self.questions.append(q)
if a:
self.rr.append(a)
self.set_header_qa()
def reply(self, ra=1, aa=1):
return DNSRecord(
DNSHeader(id=self.header.id, bitmap=self.header.bitmap, qr=1, ra=ra, aa=aa),
q=self.q,
)
def add_question(self, *q) -> None:
self.questions.extend(q)
self.set_header_qa()
def add_answer(self, *rr) -> None:
self.rr.extend(rr)
self.set_header_qa()
def add_auth(self, *auth) -> None:
self.auth.extend(auth)
self.set_header_qa()
def add_ar(self, *ar) -> None:
self.ar.extend(ar)
self.set_header_qa()
def set_header_qa(self) -> None:
self.header.q = len(self.questions)
self.header.a = len(self.rr)
self.header.auth = len(self.auth)
self.header.ar = len(self.ar)
def get_q(self):
return self.questions[0] if self.questions else DNSQuestion()
q = property(get_q)
def get_a(self):
return self.rr[0] if self.rr else RR()
a = property(get_a)
def pack(self) -> bytes:
self.set_header_qa()
buffer = DNSBuffer()
self.header.pack(buffer)
for q in self.questions:
q.pack(buffer)
for rr in self.rr:
rr.pack(buffer)
for auth in self.auth:
auth.pack(buffer)
for ar in self.ar:
ar.pack(buffer)
return buffer.data
def truncate(self):
return DNSRecord(DNSHeader(id=self.header.id, bitmap=self.header.bitmap, tc=1))
def format(self, prefix="", sort=False):
s = sorted if sort else lambda x: x
sections = [repr(self.header)]
sections.extend(s([repr(q) for q in self.questions]))
sections.extend(s([repr(rr) for rr in self.rr]))
sections.extend(s([repr(rr) for rr in self.auth]))
sections.extend(s([repr(rr) for rr in self.ar]))
return prefix + ("\n" + prefix).join(sections)
short = format
def __repr__(self):
return self.format()
__str__ = __repr__
class DNSHeader(object):
id = H("id")
bitmap = H("bitmap")
q = H("q")
a = H("a")
auth = H("auth")
ar = H("ar")
@classmethod
def parse(cls, buffer):
try:
(id, bitmap, q, a, auth, ar) = buffer.unpack("!HHHHHH")
return cls(id, bitmap, q, a, auth, ar)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSHeader [offset=%d]: %s" % (buffer.offset, e)
)
def __init__(self, id=None, bitmap=None, q=0, a=0, auth=0, ar=0, **args) -> None:
self.id = id if id else 0
if bitmap is None:
self.bitmap = 0
else:
self.bitmap = bitmap
self.q = q
self.a = a
self.auth = auth
self.ar = ar
for k, v in args.items():
if k.lower() == "qr":
self.qr = v
elif k.lower() == "opcode":
self.opcode = v
elif k.lower() == "aa":
self.aa = v
elif k.lower() == "tc":
self.tc = v
elif k.lower() == "rd":
self.rd = v
elif k.lower() == "ra":
self.ra = v
elif k.lower() == "z":
self.z = v
elif k.lower() == "ad":
self.ad = v
elif k.lower() == "cd":
self.cd = v
elif k.lower() == "rcode":
self.rcode = v
def get_qr(self):
return get_bits(self.bitmap, 15)
def set_qr(self, val):
self.bitmap = set_bits(self.bitmap, val, 15)
qr = property(get_qr, set_qr)
def get_opcode(self):
return get_bits(self.bitmap, 11, 4)
def set_opcode(self, val):
self.bitmap = set_bits(self.bitmap, val, 11, 4)
opcode = property(get_opcode, set_opcode)
def get_aa(self):
return get_bits(self.bitmap, 10)
def set_aa(self, val):
self.bitmap = set_bits(self.bitmap, val, 10)
aa = property(get_aa, set_aa)
def get_tc(self):
return get_bits(self.bitmap, 9)
def set_tc(self, val):
self.bitmap = set_bits(self.bitmap, val, 9)
tc = property(get_tc, set_tc)
def get_rd(self):
return get_bits(self.bitmap, 8)
def set_rd(self, val):
self.bitmap = set_bits(self.bitmap, val, 8)
rd = property(get_rd, set_rd)
def get_ra(self):
return get_bits(self.bitmap, 7)
def set_ra(self, val):
self.bitmap = set_bits(self.bitmap, val, 7)
ra = property(get_ra, set_ra)
def get_z(self):
return get_bits(self.bitmap, 6)
def set_z(self, val):
self.bitmap = set_bits(self.bitmap, val, 6)
z = property(get_z, set_z)
def get_ad(self):
return get_bits(self.bitmap, 5)
def set_ad(self, val):
self.bitmap = set_bits(self.bitmap, val, 5)
ad = property(get_ad, set_ad)
def get_cd(self):
return get_bits(self.bitmap, 4)
def set_cd(self, val):
self.bitmap = set_bits(self.bitmap, val, 4)
cd = property(get_cd, set_cd)
def get_rcode(self):
return get_bits(self.bitmap, 0, 4)
def set_rcode(self, val):
self.bitmap = set_bits(self.bitmap, val, 0, 4)
rcode = property(get_rcode, set_rcode)
def pack(self, buffer):
buffer.pack("!HHHHHH", self.id, self.bitmap, self.q, self.a, self.auth, self.ar)
def __repr__(self):
f = [
self.aa and "AA",
self.tc and "TC",
self.rd and "RD",
self.ra and "RA",
self.z and "Z",
self.ad and "AD",
self.cd and "CD",
]
if OPCODE.get(self.opcode) == "UPDATE":
f1 = "zo"
f2 = "pr"
f3 = "up"
f4 = "ad"
else:
f1 = "q"
f2 = "a"
f3 = "ns"
f4 = "ar"
return (
"<DNS Header: id=0x%x type=%s opcode=%s flags=%s "
"rcode='%s' %s=%d %s=%d %s=%d %s=%d>"
% (
self.id,
QR.get(self.qr),
OPCODE.get(self.opcode),
",".join(filter(None, f)),
RCODE.get(self.rcode),
f1,
self.q,
f2,
self.a,
f3,
self.auth,
f4,
self.ar,
)
)
__str__ = __repr__
class DNSQuestion(object):
@classmethod
def parse(cls, buffer):
try:
qname = buffer.decode_name()
qtype, qclass = buffer.unpack("!HH")
return cls(qname, qtype, qclass)
except (BufferError, BimapError) as e:
raise DNSError(
"Error unpacking DNSQuestion [offset=%d]: %s" % (buffer.offset, e)
)
def __init__(self, qname=None, qtype=1, qclass=1) -> None:
self.qname = qname
self.qtype = qtype
self.qclass = qclass
def set_qname(self, qname):
if isinstance(qname, DNSLabel):
self._qname = qname
else:
self._qname = DNSLabel(qname)
def get_qname(self):
return self._qname
qname = property(get_qname, set_qname)
def pack(self, buffer):
buffer.encode_name(self.qname)
buffer.pack("!HH", self.qtype, self.qclass)
def __repr__(self):
return "<DNS Question: '%s' qtype=%s qclass=%s>" % (
self.qname,
QTYPE.get(self.qtype),
CLASS.get(self.qclass),
)
__str__ = __repr__
class RR(object):
rtype = H("rtype")
rclass = H("rclass")
ttl = I("ttl")
rdlength = H("rdlength")
@classmethod
def parse(cls, buffer):
try:
rname = buffer.decode_name()
rtype, rclass, ttl, rdlength = buffer.unpack("!HHIH")
if rdlength:
rdata = RDMAP.get(QTYPE.get(rtype), RD).parse(buffer, rdlength)
else:
rdata = ""
return cls(rname, rtype, rclass, ttl, rdata)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking RR [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, rname=None, rtype=1, rclass=1, ttl=0, rdata=None) -> None:
self.rname = rname
self.rtype = rtype
self.rclass = rclass
self.ttl = ttl
self.rdata = rdata
def set_rname(self, rname):
if isinstance(rname, DNSLabel):
self._rname = rname
else:
self._rname = DNSLabel(rname)
def get_rname(self):
return self._rname
rname = property(get_rname, set_rname)
def pack(self, buffer):
buffer.encode_name(self.rname)
buffer.pack("!HHI", self.rtype, self.rclass, self.ttl)
rdlength_ptr = buffer.offset
buffer.pack("!H", 0)
start = buffer.offset
self.rdata.pack(buffer)
end = buffer.offset
buffer.update(rdlength_ptr, "!H", end - start)
def __repr__(self):
return "<DNS RR: '%s' rtype=%s rclass=%s ttl=%d rdata='%s'>" % (
self.rname,
QTYPE.get(self.rtype),
CLASS.get(self.rclass),
self.ttl,
self.rdata,
)
__str__ = __repr__
class RD(object):
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.get(length)
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking RD [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data=b"") -> None:
check_bytes("data", data)
self.data = bytes(data)
def pack(self, buffer):
buffer.append(self.data)
def __repr__(self):
if len(self.data) > 0:
return "\\# %d %s" % (
len(self.data),
binascii.hexlify(self.data).decode().upper(),
)
else:
return "\\# 0"
attrs = ("data",)
def _force_bytes(x):
if isinstance(x, bytes):
return x
else:
return x.encode()
class TXT(RD):
@classmethod
def parse(cls, buffer, length):
try:
data = list()
start_bo = buffer.offset
now_length = 0
while buffer.offset < start_bo + length:
(txtlength,) = buffer.unpack("!B")
if now_length + txtlength < length:
now_length += txtlength
data.append(buffer.get(txtlength))
else:
raise DNSError(
"Invalid TXT record: len(%d) > RD len(%d)" % (txtlength, length)
)
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking TXT [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = [_force_bytes(x) for x in data]
else:
self.data = [_force_bytes(data)]
if any([len(x) > 255 for x in self.data]):
raise DNSError("TXT record too long: %s" % self.data)
def pack(self, buffer):
for ditem in self.data:
if len(ditem) > 255:
raise DNSError("TXT record too long: %s" % ditem)
buffer.pack("!B", len(ditem))
buffer.append(ditem)
def __repr__(self):
return ",".join([repr(x) for x in self.data])
class A(RD):
data = IP4("data")
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.unpack("!BBBB")
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking A [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = tuple(data)
else:
self.data = tuple(map(int, data.rstrip(".").split(".")))
def pack(self, buffer):
buffer.pack("!BBBB", *self.data)
def __repr__(self):
return "%d.%d.%d.%d" % self.data
def _parse_ipv6(a):
l, _, r = a.partition("::")
l_groups = list(chain(*[divmod(int(x, 16), 256) for x in l.split(":") if x]))
r_groups = list(chain(*[divmod(int(x, 16), 256) for x in r.split(":") if x]))
zeros = [0] * (16 - len(l_groups) - len(r_groups))
return tuple(l_groups + zeros + r_groups)
def _format_ipv6(a):
left = []
right = []
current = "left"
for i in range(0, 16, 2):
group = (a[i] << 8) + a[i + 1]
if current == "left":
if group == 0 and i < 14:
if (a[i + 2] << 8) + a[i + 3] == 0:
current = "right"
else:
left.append("0")
else:
left.append("%x" % group)
else:
if group == 0 and len(right) == 0:
pass
else:
right.append("%x" % group)
if len(left) < 8:
return ":".join(left) + "::" + ":".join(right)
else:
return ":".join(left)
class AAAA(RD):
data = IP6("data")
@classmethod
def parse(cls, buffer, length):
try:
data = buffer.unpack("!16B")
return cls(data)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking AAAA [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, data) -> None:
if type(data) in (tuple, list):
self.data = tuple(data)
else:
self.data = _parse_ipv6(data)
def pack(self, buffer):
buffer.pack("!16B", *self.data)
def __repr__(self):
return _format_ipv6(self.data)
class CNAME(RD):
@classmethod
def parse(cls, buffer, length):
try:
label = buffer.decode_name()
return cls(label)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking CNAME [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, label=None) -> None:
self.label = label
def set_label(self, label):
if isinstance(label, DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label, set_label)
def pack(self, buffer):
buffer.encode_name(self.label)
def __repr__(self):
return "%s" % (self.label)
attrs = ("label",)
class PTR(CNAME):
pass
class SRV(RD):
priority = H("priority")
weight = H("weight")
port = H("port")
@classmethod
def parse(cls, buffer, length):
try:
priority, weight, port = buffer.unpack("!HHH")
target = buffer.decode_name()
return cls(priority, weight, port, target)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking SRV [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, priority=0, weight=0, port=0, target=None) -> None:
self.priority = priority
self.weight = weight
self.port = port
self.target = target
def set_target(self, target):
if isinstance(target, DNSLabel):
self._target = target
else:
self._target = DNSLabel(target)
def get_target(self):
return self._target
target = property(get_target, set_target)
def pack(self, buffer):
buffer.pack("!HHH", self.priority, self.weight, self.port)
buffer.encode_name(self.target)
def __repr__(self):
return "%d %d %d %s" % (self.priority, self.weight, self.port, self.target)
attrs = ("priority", "weight", "port", "target")
def decode_type_bitmap(type_bitmap):
rrlist = []
buf = DNSBuffer(type_bitmap)
while buf.remaining():
winnum, winlen = buf.unpack("BB")
bitmap = bytearray(buf.get(winlen))
for (pos, value) in enumerate(bitmap):
for i in range(8):
if (value << i) & 0x80:
bitpos = (256 * winnum) + (8 * pos) + i
rrlist.append(QTYPE[bitpos])
return rrlist
def encode_type_bitmap(rrlist):
rrlist = sorted([getattr(QTYPE, rr) for rr in rrlist])
buf = DNSBuffer()
curWindow = rrlist[0] // 256
bitmap = bytearray(32)
n = len(rrlist) - 1
for i, rr in enumerate(rrlist):
v = rr - curWindow * 256
bitmap[v // 8] |= 1 << (7 - v % 8)
if i == n or rrlist[i + 1] >= (curWindow + 1) * 256:
while bitmap[-1] == 0:
bitmap = bitmap[:-1]
buf.pack("BB", curWindow, len(bitmap))
buf.append(bitmap)
if i != n:
curWindow = rrlist[i + 1] // 256
bitmap = bytearray(32)
return buf.data
class NSEC(RD):
@classmethod
def parse(cls, buffer, length):
try:
end = buffer.offset + length
name = buffer.decode_name()
rrlist = decode_type_bitmap(buffer.get(end - buffer.offset))
return cls(name, rrlist)
except (BufferError, BimapError) as e:
raise DNSError("Error unpacking NSEC [offset=%d]: %s" % (buffer.offset, e))
def __init__(self, label, rrlist) -> None:
self.label = label
self.rrlist = rrlist
def set_label(self, label):
if isinstance(label, DNSLabel):
self._label = label
else:
self._label = DNSLabel(label)
def get_label(self):
return self._label
label = property(get_label, set_label)
def pack(self, buffer):
buffer.encode_name(self.label)
buffer.append(encode_type_bitmap(self.rrlist))
def __repr__(self):
return "%s %s" % (self.label, " ".join(self.rrlist))
attrs = ("label", "rrlist")
RDMAP = {"A": A, "AAAA": AAAA, "TXT": TXT, "PTR": PTR, "SRV": SRV, "NSEC": NSEC}

View File

@@ -1,154 +0,0 @@
# coding: utf-8
from __future__ import print_function
import re
from .bit import get_bits, set_bits
from .buffer import Buffer, BufferError
LDH = set(range(33, 127))
ESCAPE = re.compile(r"\\([0-9][0-9][0-9])")
class DNSLabelError(Exception):
pass
class DNSLabel(object):
def __init__(self, label):
if type(label) == DNSLabel:
self.label = label.label
elif type(label) in (list, tuple):
self.label = tuple(label)
else:
if not label or label in (b".", "."):
self.label = ()
elif type(label) is not bytes:
if type("") != type(b""):
label = ESCAPE.sub(lambda m: chr(int(m[1])), label)
self.label = tuple(label.encode("idna").rstrip(b".").split(b"."))
else:
if type("") == type(b""):
label = ESCAPE.sub(lambda m: chr(int(m.groups()[0])), label)
self.label = tuple(label.rstrip(b".").split(b"."))
def add(self, name):
new = DNSLabel(name)
if self.label:
new.label += self.label
return new
def idna(self):
return ".".join([s.decode("idna") for s in self.label]) + "."
def _decode(self, s):
if set(s).issubset(LDH):
return s.decode()
else:
return "".join([(chr(c) if (c in LDH) else "\\%03d" % c) for c in s])
def __str__(self):
return ".".join([self._decode(bytearray(s)) for s in self.label]) + "."
def __repr__(self):
return "<DNSLabel: '%s'>" % str(self)
def __hash__(self):
return hash(tuple(map(lambda x: x.lower(), self.label)))
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if type(other) != DNSLabel:
return self.__eq__(DNSLabel(other))
else:
return [l.lower() for l in self.label] == [l.lower() for l in other.label]
def __len__(self):
return len(b".".join(self.label))
class DNSBuffer(Buffer):
def __init__(self, data=b""):
super(DNSBuffer, self).__init__(data)
self.names = {}
def decode_name(self, last=-1):
label = []
done = False
while not done:
(length,) = self.unpack("!B")
if get_bits(length, 6, 2) == 3:
self.offset -= 1
pointer = get_bits(self.unpack("!H")[0], 0, 14)
save = self.offset
if last == save:
raise BufferError(
"Recursive pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
% (self.offset, pointer, len(self.data))
)
if pointer < self.offset:
self.offset = pointer
else:
raise BufferError(
"Invalid pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
% (self.offset, pointer, len(self.data))
)
label.extend(self.decode_name(save).label)
self.offset = save
done = True
else:
if length > 0:
l = self.get(length)
try:
l.decode()
except UnicodeDecodeError:
raise BufferError("Invalid label <%s>" % l)
label.append(l)
else:
done = True
return DNSLabel(label)
def encode_name(self, name):
if not isinstance(name, DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
if tuple(name) in self.names:
pointer = self.names[tuple(name)]
pointer = set_bits(pointer, 3, 14, 2)
self.pack("!H", pointer)
return
else:
self.names[tuple(name)] = self.offset
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B", len(element))
self.append(element)
self.append(b"\x00")
def encode_name_nocompress(self, name):
if not isinstance(name, DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B", len(element))
self.append(element)
self.append(b"\x00")

View File

@@ -1,105 +0,0 @@
# coding: utf-8
from __future__ import print_function
import collections
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Lexer(object):
escape_chars = "\\"
escape = {"n": "\n", "t": "\t", "r": "\r"}
def __init__(self, f, debug=False):
if hasattr(f, "read"):
self.f = f
elif type(f) == str:
self.f = StringIO(f)
elif type(f) == bytes:
self.f = StringIO(f.decode())
else:
raise ValueError("Invalid input")
self.debug = debug
self.q = collections.deque()
self.state = self.lexStart
self.escaped = False
self.eof = False
def __iter__(self):
return self.parse()
def next_token(self):
if self.debug:
print("STATE", self.state)
(tok, self.state) = self.state()
return tok
def parse(self):
while self.state is not None and not self.eof:
tok = self.next_token()
if tok:
yield tok
def read(self, n=1):
s = ""
while self.q and n > 0:
s += self.q.popleft()
n -= 1
s += self.f.read(n)
if s == "":
self.eof = True
if self.debug:
print("Read: >%s<" % repr(s))
return s
def peek(self, n=1):
s = ""
i = 0
while len(self.q) > i and n > 0:
s += self.q[i]
i += 1
n -= 1
r = self.f.read(n)
if n > 0 and r == "":
self.eof = True
self.q.extend(r)
if self.debug:
print("Peek : >%s<" % repr(s + r))
return s + r
def pushback(self, s):
p = collections.deque(s)
p.extend(self.q)
self.q = p
def readescaped(self):
c = self.read(1)
if c in self.escape_chars:
self.escaped = True
n = self.peek(3)
if n.isdigit():
n = self.read(3)
if self.debug:
print("Escape: >%s<" % n)
return chr(int(n, 8))
elif n[0] in "x":
x = self.read(3)
if self.debug:
print("Escape: >%s<" % x)
return chr(int(x[1:], 16))
else:
c = self.read(1)
if self.debug:
print("Escape: >%s<" % c)
return self.escape.get(c, c)
else:
self.escaped = False
return c
def lexStart(self):
return (None, None)

View File

@@ -1,81 +0,0 @@
# coding: utf-8
import sys
if sys.version_info < (3,):
int_types = (
int,
long,
)
byte_types = (str, bytearray)
else:
int_types = (int,)
byte_types = (bytes, bytearray)
def check_instance(name, val, types):
if not isinstance(val, types):
raise ValueError(
"Attribute '%s' must be instance of %s [%s]" % (name, types, type(val))
)
def check_bytes(name, val):
return check_instance(name, val, byte_types)
def range_property(attr, min, max):
def getter(obj):
return getattr(obj, "_%s" % attr)
def setter(obj, val):
if isinstance(val, int_types) and min <= val <= max:
setattr(obj, "_%s" % attr, val)
else:
raise ValueError(
"Attribute '%s' must be between %d-%d [%s]" % (attr, min, max, val)
)
return property(getter, setter)
def B(attr):
return range_property(attr, 0, 255)
def H(attr):
return range_property(attr, 0, 65535)
def I(attr):
return range_property(attr, 0, 4294967295)
def ntuple_range(attr, n, min, max):
f = lambda x: isinstance(x, int_types) and min <= x <= max
def getter(obj):
return getattr(obj, "_%s" % attr)
def setter(obj, val):
if len(val) != n:
raise ValueError(
"Attribute '%s' must be tuple with %d elements [%s]" % (attr, n, val)
)
if all(map(f, val)):
setattr(obj, "_%s" % attr, val)
else:
raise ValueError(
"Attribute '%s' elements must be between %d-%d [%s]"
% (attr, min, max, val)
)
return property(getter, setter)
def IP4(attr):
return ntuple_range(attr, 4, 0, 255)
def IP6(attr):
return ntuple_range(attr, 16, 0, 255)

View File

@@ -1,5 +0,0 @@
`ifaddr` with py2.7 support enabled by make-sfx.sh which strips py3 hints using strip_hints and removes the `^if True:` blocks
L: BSD-2-Clause
Copyright (c) 2014 Stefan C. Mueller
https://github.com/pydron/ifaddr/

View File

@@ -1,21 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
"""
L: BSD-2-Clause
Copyright (c) 2014 Stefan C. Mueller
https://github.com/pydron/ifaddr/tree/0.2.0
"""
import os
from ._shared import IP, Adapter
if os.name == "nt":
from ._win32 import get_adapters
elif os.name == "posix":
from ._posix import get_adapters
else:
raise RuntimeError("Unsupported Operating System: %s" % os.name)
__all__ = ["Adapter", "IP", "get_adapters"]

View File

@@ -1,84 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import collections
import ctypes.util
import os
import socket
import ipaddress
if True: # pylint: disable=using-constant-test
from typing import Iterable, Optional
from . import _shared as shared
from ._shared import U
class ifaddrs(ctypes.Structure):
pass
ifaddrs._fields_ = [
("ifa_next", ctypes.POINTER(ifaddrs)),
("ifa_name", ctypes.c_char_p),
("ifa_flags", ctypes.c_uint),
("ifa_addr", ctypes.POINTER(shared.sockaddr)),
("ifa_netmask", ctypes.POINTER(shared.sockaddr)),
]
libc = ctypes.CDLL(ctypes.util.find_library("socket" if os.uname()[0] == "SunOS" else "c"), use_errno=True) # type: ignore
def get_adapters(include_unconfigured: bool = False) -> Iterable[shared.Adapter]:
addr0 = addr = ctypes.POINTER(ifaddrs)()
retval = libc.getifaddrs(ctypes.byref(addr))
if retval != 0:
eno = ctypes.get_errno()
raise OSError(eno, os.strerror(eno))
ips = collections.OrderedDict()
def add_ip(adapter_name: str, ip: Optional[shared.IP]) -> None:
if adapter_name not in ips:
index = None # type: Optional[int]
try:
# Mypy errors on this when the Windows CI runs:
# error: Module has no attribute "if_nametoindex"
index = socket.if_nametoindex(adapter_name) # type: ignore
except (OSError, AttributeError):
pass
ips[adapter_name] = shared.Adapter(
adapter_name, adapter_name, [], index=index
)
if ip is not None:
ips[adapter_name].ips.append(ip)
while addr:
name = addr[0].ifa_name.decode(encoding="UTF-8")
ip_addr = shared.sockaddr_to_ip(addr[0].ifa_addr)
if ip_addr:
if addr[0].ifa_netmask and not addr[0].ifa_netmask[0].sa_familiy:
addr[0].ifa_netmask[0].sa_familiy = addr[0].ifa_addr[0].sa_familiy
netmask = shared.sockaddr_to_ip(addr[0].ifa_netmask)
if isinstance(netmask, tuple):
netmaskStr = U(netmask[0])
prefixlen = shared.ipv6_prefixlength(ipaddress.IPv6Address(netmaskStr))
else:
if netmask is None:
t = "sockaddr_to_ip({}) returned None"
raise Exception(t.format(addr[0].ifa_netmask))
netmaskStr = U("0.0.0.0/" + netmask)
prefixlen = ipaddress.IPv4Network(netmaskStr).prefixlen
ip = shared.IP(ip_addr, prefixlen, name)
add_ip(name, ip)
else:
if include_unconfigured:
add_ip(name, None)
addr = addr[0].ifa_next
libc.freeifaddrs(addr0)
return ips.values()

View File

@@ -1,203 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import ctypes
import platform
import socket
import sys
import ipaddress
if True: # pylint: disable=using-constant-test
from typing import Callable, List, Optional, Union
PY2 = sys.version_info < (3,)
if not PY2:
U: Callable[[str], str] = str
else:
U = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
class Adapter(object):
"""
Represents a network interface device controller (NIC), such as a
network card. An adapter can have multiple IPs.
On Linux aliasing (multiple IPs per physical NIC) is implemented
by creating 'virtual' adapters, each represented by an instance
of this class. Each of those 'virtual' adapters can have both
a IPv4 and an IPv6 IP address.
"""
def __init__(
self, name: str, nice_name: str, ips: List["IP"], index: Optional[int] = None
) -> None:
#: Unique name that identifies the adapter in the system.
#: On Linux this is of the form of `eth0` or `eth0:1`, on
#: Windows it is a UUID in string representation, such as
#: `{846EE342-7039-11DE-9D20-806E6F6E6963}`.
self.name = name
#: Human readable name of the adpater. On Linux this
#: is currently the same as :attr:`name`. On Windows
#: this is the name of the device.
self.nice_name = nice_name
#: List of :class:`ifaddr.IP` instances in the order they were
#: reported by the system.
self.ips = ips
#: Adapter index as used by some API (e.g. IPv6 multicast group join).
self.index = index
def __repr__(self) -> str:
return "Adapter(name={name}, nice_name={nice_name}, ips={ips}, index={index})".format(
name=repr(self.name),
nice_name=repr(self.nice_name),
ips=repr(self.ips),
index=repr(self.index),
)
if True:
# Type of an IPv4 address (a string in "xxx.xxx.xxx.xxx" format)
_IPv4Address = str
# Type of an IPv6 address (a three-tuple `(ip, flowinfo, scope_id)`)
_IPv6Address = tuple[str, int, int]
class IP(object):
"""
Represents an IP address of an adapter.
"""
def __init__(
self, ip: Union[_IPv4Address, _IPv6Address], network_prefix: int, nice_name: str
) -> None:
#: IP address. For IPv4 addresses this is a string in
#: "xxx.xxx.xxx.xxx" format. For IPv6 addresses this
#: is a three-tuple `(ip, flowinfo, scope_id)`, where
#: `ip` is a string in the usual collon separated
#: hex format.
self.ip = ip
#: Number of bits of the IP that represent the
#: network. For a `255.255.255.0` netmask, this
#: number would be `24`.
self.network_prefix = network_prefix
#: Human readable name for this IP.
#: On Linux is this currently the same as the adapter name.
#: On Windows this is the name of the network connection
#: as configured in the system control panel.
self.nice_name = nice_name
@property
def is_IPv4(self) -> bool:
"""
Returns `True` if this IP is an IPv4 address and `False`
if it is an IPv6 address.
"""
return not isinstance(self.ip, tuple)
@property
def is_IPv6(self) -> bool:
"""
Returns `True` if this IP is an IPv6 address and `False`
if it is an IPv4 address.
"""
return isinstance(self.ip, tuple)
def __repr__(self) -> str:
return "IP(ip={ip}, network_prefix={network_prefix}, nice_name={nice_name})".format(
ip=repr(self.ip),
network_prefix=repr(self.network_prefix),
nice_name=repr(self.nice_name),
)
if platform.system() == "Darwin" or "BSD" in platform.system():
# BSD derived systems use marginally different structures
# than either Linux or Windows.
# I still keep it in `shared` since we can use
# both structures equally.
class sockaddr(ctypes.Structure):
_fields_ = [
("sa_len", ctypes.c_uint8),
("sa_familiy", ctypes.c_uint8),
("sa_data", ctypes.c_uint8 * 14),
]
class sockaddr_in(ctypes.Structure):
_fields_ = [
("sa_len", ctypes.c_uint8),
("sa_familiy", ctypes.c_uint8),
("sin_port", ctypes.c_uint16),
("sin_addr", ctypes.c_uint8 * 4),
("sin_zero", ctypes.c_uint8 * 8),
]
class sockaddr_in6(ctypes.Structure):
_fields_ = [
("sa_len", ctypes.c_uint8),
("sa_familiy", ctypes.c_uint8),
("sin6_port", ctypes.c_uint16),
("sin6_flowinfo", ctypes.c_uint32),
("sin6_addr", ctypes.c_uint8 * 16),
("sin6_scope_id", ctypes.c_uint32),
]
else:
class sockaddr(ctypes.Structure): # type: ignore
_fields_ = [("sa_familiy", ctypes.c_uint16), ("sa_data", ctypes.c_uint8 * 14)]
class sockaddr_in(ctypes.Structure): # type: ignore
_fields_ = [
("sin_familiy", ctypes.c_uint16),
("sin_port", ctypes.c_uint16),
("sin_addr", ctypes.c_uint8 * 4),
("sin_zero", ctypes.c_uint8 * 8),
]
class sockaddr_in6(ctypes.Structure): # type: ignore
_fields_ = [
("sin6_familiy", ctypes.c_uint16),
("sin6_port", ctypes.c_uint16),
("sin6_flowinfo", ctypes.c_uint32),
("sin6_addr", ctypes.c_uint8 * 16),
("sin6_scope_id", ctypes.c_uint32),
]
def sockaddr_to_ip(
sockaddr_ptr: "ctypes.pointer[sockaddr]",
) -> Optional[Union[_IPv4Address, _IPv6Address]]:
if sockaddr_ptr:
if sockaddr_ptr[0].sa_familiy == socket.AF_INET:
ipv4 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in))
ippacked = bytes(bytearray(ipv4[0].sin_addr))
ip = U(ipaddress.ip_address(ippacked))
return ip
elif sockaddr_ptr[0].sa_familiy == socket.AF_INET6:
ipv6 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in6))
flowinfo = ipv6[0].sin6_flowinfo
ippacked = bytes(bytearray(ipv6[0].sin6_addr))
ip = U(ipaddress.ip_address(ippacked))
scope_id = ipv6[0].sin6_scope_id
return (ip, flowinfo, scope_id)
return None
def ipv6_prefixlength(address: ipaddress.IPv6Address) -> int:
prefix_length = 0
for i in range(address.max_prefixlen):
if int(address) >> i & 1:
prefix_length = prefix_length + 1
return prefix_length

View File

@@ -1,135 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import ctypes
from ctypes import wintypes
if True: # pylint: disable=using-constant-test
from typing import Iterable, List
from . import _shared as shared
NO_ERROR = 0
ERROR_BUFFER_OVERFLOW = 111
MAX_ADAPTER_NAME_LENGTH = 256
MAX_ADAPTER_DESCRIPTION_LENGTH = 128
MAX_ADAPTER_ADDRESS_LENGTH = 8
AF_UNSPEC = 0
class SOCKET_ADDRESS(ctypes.Structure):
_fields_ = [
("lpSockaddr", ctypes.POINTER(shared.sockaddr)),
("iSockaddrLength", wintypes.INT),
]
class IP_ADAPTER_UNICAST_ADDRESS(ctypes.Structure):
pass
IP_ADAPTER_UNICAST_ADDRESS._fields_ = [
("Length", wintypes.ULONG),
("Flags", wintypes.DWORD),
("Next", ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)),
("Address", SOCKET_ADDRESS),
("PrefixOrigin", ctypes.c_uint),
("SuffixOrigin", ctypes.c_uint),
("DadState", ctypes.c_uint),
("ValidLifetime", wintypes.ULONG),
("PreferredLifetime", wintypes.ULONG),
("LeaseLifetime", wintypes.ULONG),
("OnLinkPrefixLength", ctypes.c_uint8),
]
class IP_ADAPTER_ADDRESSES(ctypes.Structure):
pass
IP_ADAPTER_ADDRESSES._fields_ = [
("Length", wintypes.ULONG),
("IfIndex", wintypes.DWORD),
("Next", ctypes.POINTER(IP_ADAPTER_ADDRESSES)),
("AdapterName", ctypes.c_char_p),
("FirstUnicastAddress", ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)),
("FirstAnycastAddress", ctypes.c_void_p),
("FirstMulticastAddress", ctypes.c_void_p),
("FirstDnsServerAddress", ctypes.c_void_p),
("DnsSuffix", ctypes.c_wchar_p),
("Description", ctypes.c_wchar_p),
("FriendlyName", ctypes.c_wchar_p),
]
iphlpapi = ctypes.windll.LoadLibrary("Iphlpapi") # type: ignore
def enumerate_interfaces_of_adapter(
nice_name: str, address: IP_ADAPTER_UNICAST_ADDRESS
) -> Iterable[shared.IP]:
# Iterate through linked list and fill list
addresses = [] # type: List[IP_ADAPTER_UNICAST_ADDRESS]
while True:
addresses.append(address)
if not address.Next:
break
address = address.Next[0]
for address in addresses:
ip = shared.sockaddr_to_ip(address.Address.lpSockaddr)
if ip is None:
t = "sockaddr_to_ip({}) returned None"
raise Exception(t.format(address.Address.lpSockaddr))
network_prefix = address.OnLinkPrefixLength
yield shared.IP(ip, network_prefix, nice_name)
def get_adapters(include_unconfigured: bool = False) -> Iterable[shared.Adapter]:
# Call GetAdaptersAddresses() with error and buffer size handling
addressbuffersize = wintypes.ULONG(15 * 1024)
retval = ERROR_BUFFER_OVERFLOW
while retval == ERROR_BUFFER_OVERFLOW:
addressbuffer = ctypes.create_string_buffer(addressbuffersize.value)
retval = iphlpapi.GetAdaptersAddresses(
wintypes.ULONG(AF_UNSPEC),
wintypes.ULONG(0),
None,
ctypes.byref(addressbuffer),
ctypes.byref(addressbuffersize),
)
if retval != NO_ERROR:
raise ctypes.WinError() # type: ignore
# Iterate through adapters fill array
address_infos = [] # type: List[IP_ADAPTER_ADDRESSES]
address_info = IP_ADAPTER_ADDRESSES.from_buffer(addressbuffer)
while True:
address_infos.append(address_info)
if not address_info.Next:
break
address_info = address_info.Next[0]
# Iterate through unicast addresses
result = [] # type: List[shared.Adapter]
for adapter_info in address_infos:
# We don't expect non-ascii characters here, so encoding shouldn't matter
name = adapter_info.AdapterName.decode()
nice_name = adapter_info.Description
index = adapter_info.IfIndex
if adapter_info.FirstUnicastAddress:
ips = enumerate_interfaces_of_adapter(
adapter_info.FriendlyName, adapter_info.FirstUnicastAddress[0]
)
ips = list(ips)
result.append(shared.Adapter(name, nice_name, ips, index=index))
elif include_unconfigured:
result.append(shared.Adapter(name, nice_name, [], index=index))
return result

View File

@@ -1,591 +0,0 @@
# coding: utf-8
# modified copy of Project Nayuki's qrcodegen (MIT-licensed);
# https://github.com/nayuki/QR-Code-generator/blob/daa3114/python/qrcodegen.py
# the original ^ is extremely well commented so refer to that for explanations
# hacks: binary-only, auto-ecc, render, py2-compat
from __future__ import print_function, unicode_literals
import collections
import itertools
if True: # pylint: disable=using-constant-test
from collections.abc import Sequence
from typing import Callable, List, Optional, Tuple, Union
def num_char_count_bits(ver: int) -> int:
return 16 if (ver + 7) // 17 else 8
class Ecc(object):
ordinal: int
formatbits: int
def __init__(self, i: int, fb: int) -> None:
self.ordinal = i
self.formatbits = fb
LOW: "Ecc"
MEDIUM: "Ecc"
QUARTILE: "Ecc"
HIGH: "Ecc"
Ecc.LOW = Ecc(0, 1)
Ecc.MEDIUM = Ecc(1, 0)
Ecc.QUARTILE = Ecc(2, 3)
Ecc.HIGH = Ecc(3, 2)
class QrSegment(object):
@staticmethod
def make_seg(data: Union[bytes, Sequence[int]]) -> "QrSegment":
bb = _BitBuffer()
for b in data:
bb.append_bits(b, 8)
return QrSegment(len(data), bb)
numchars: int # num bytes, not the same as the data's bit length
bitdata: List[int] # The data bits of this segment
def __init__(self, numch: int, bitdata: Sequence[int]) -> None:
if numch < 0:
raise ValueError()
self.numchars = numch
self.bitdata = list(bitdata)
@staticmethod
def get_total_bits(segs: Sequence["QrSegment"], ver: int) -> Optional[int]:
result = 0
for seg in segs:
ccbits: int = num_char_count_bits(ver)
if seg.numchars >= (1 << ccbits):
return None # segment length doesn't fit the field's bit width
result += 4 + ccbits + len(seg.bitdata)
return result
class QrCode(object):
@staticmethod
def encode_binary(data: Union[bytes, Sequence[int]]) -> "QrCode":
return QrCode.encode_segments([QrSegment.make_seg(data)])
@staticmethod
def encode_segments(
segs: Sequence[QrSegment],
ecl: Ecc = Ecc.LOW,
minver: int = 2,
maxver: int = 40,
mask: int = -1,
) -> "QrCode":
for ver in range(minver, maxver + 1):
datacapacitybits: int = QrCode._get_num_data_codewords(ver, ecl) * 8
datausedbits: Optional[int] = QrSegment.get_total_bits(segs, ver)
if (datausedbits is not None) and (datausedbits <= datacapacitybits):
break
assert datausedbits
for newecl in (
Ecc.MEDIUM,
Ecc.QUARTILE,
Ecc.HIGH,
):
if datausedbits <= QrCode._get_num_data_codewords(ver, newecl) * 8:
ecl = newecl
# Concatenate all segments to create the data bit string
bb = _BitBuffer()
for seg in segs:
bb.append_bits(4, 4)
bb.append_bits(seg.numchars, num_char_count_bits(ver))
bb.extend(seg.bitdata)
assert len(bb) == datausedbits
# Add terminator and pad up to a byte if applicable
datacapacitybits = QrCode._get_num_data_codewords(ver, ecl) * 8
assert len(bb) <= datacapacitybits
bb.append_bits(0, min(4, datacapacitybits - len(bb)))
bb.append_bits(0, -len(bb) % 8)
assert len(bb) % 8 == 0
# Pad with alternating bytes until data capacity is reached
for padbyte in itertools.cycle((0xEC, 0x11)):
if len(bb) >= datacapacitybits:
break
bb.append_bits(padbyte, 8)
# Pack bits into bytes in big endian
datacodewords = bytearray([0] * (len(bb) // 8))
for (i, bit) in enumerate(bb):
datacodewords[i >> 3] |= bit << (7 - (i & 7))
return QrCode(ver, ecl, datacodewords, mask)
ver: int
size: int # w/h; 21..177 (ver * 4 + 17)
ecclvl: Ecc
mask: int # 0..7
modules: List[List[bool]]
unmaskable: List[List[bool]]
def __init__(
self,
ver: int,
ecclvl: Ecc,
datacodewords: Union[bytes, Sequence[int]],
msk: int,
) -> None:
self.ver = ver
self.size = ver * 4 + 17
self.ecclvl = ecclvl
self.modules = [[False] * self.size for _ in range(self.size)]
self.unmaskable = [[False] * self.size for _ in range(self.size)]
# Compute ECC, draw modules
self._draw_function_patterns()
allcodewords: bytes = self._add_ecc_and_interleave(bytearray(datacodewords))
self._draw_codewords(allcodewords)
if msk == -1: # automask
minpenalty: int = 1 << 32
for i in range(8):
self._apply_mask(i)
self._draw_format_bits(i)
penalty = self._get_penalty_score()
if penalty < minpenalty:
msk = i
minpenalty = penalty
self._apply_mask(i) # xor/undo
assert 0 <= msk <= 7
self.mask = msk
self._apply_mask(msk) # Apply the final choice of mask
self._draw_format_bits(msk) # Overwrite old format bits
def render(self, zoom=1, pad=4) -> str:
tab = self.modules
sz = self.size
if sz % 2 and zoom == 1:
tab.append([False] * sz)
tab = [[False] * sz] * pad + tab + [[False] * sz] * pad
tab = [[False] * pad + x + [False] * pad for x in tab]
rows: list[str] = []
if zoom == 1:
for y in range(0, len(tab), 2):
row = ""
for x in range(len(tab[y])):
v = 2 if tab[y][x] else 0
v += 1 if tab[y + 1][x] else 0
row += " ▄▀█"[v]
rows.append(row)
else:
for tr in tab:
row = ""
for zb in tr:
row += ""[int(zb)] * 2
rows.append(row)
return "\n".join(rows)
def _draw_function_patterns(self) -> None:
# Draw horizontal and vertical timing patterns
for i in range(self.size):
self._set_function_module(6, i, i % 2 == 0)
self._set_function_module(i, 6, i % 2 == 0)
# Draw 3 finder patterns (all corners except bottom right; overwrites some timing modules)
self._draw_finder_pattern(3, 3)
self._draw_finder_pattern(self.size - 4, 3)
self._draw_finder_pattern(3, self.size - 4)
# Draw numerous alignment patterns
alignpatpos: List[int] = self._get_alignment_pattern_positions()
numalign: int = len(alignpatpos)
skips: Sequence[Tuple[int, int]] = (
(0, 0),
(0, numalign - 1),
(numalign - 1, 0),
)
for i in range(numalign):
for j in range(numalign):
if (i, j) not in skips: # avoid finder corners
self._draw_alignment_pattern(alignpatpos[i], alignpatpos[j])
# draw config data with dummy mask value; ctor overwrites it
self._draw_format_bits(0)
self._draw_ver()
def _draw_format_bits(self, mask: int) -> None:
# Calculate error correction code and pack bits; ecclvl is uint2, mask is uint3
data: int = self.ecclvl.formatbits << 3 | mask
rem: int = data
for _ in range(10):
rem = (rem << 1) ^ ((rem >> 9) * 0x537)
bits: int = (data << 10 | rem) ^ 0x5412 # uint15
assert bits >> 15 == 0
# first copy
for i in range(0, 6):
self._set_function_module(8, i, _get_bit(bits, i))
self._set_function_module(8, 7, _get_bit(bits, 6))
self._set_function_module(8, 8, _get_bit(bits, 7))
self._set_function_module(7, 8, _get_bit(bits, 8))
for i in range(9, 15):
self._set_function_module(14 - i, 8, _get_bit(bits, i))
# second copy
for i in range(0, 8):
self._set_function_module(self.size - 1 - i, 8, _get_bit(bits, i))
for i in range(8, 15):
self._set_function_module(8, self.size - 15 + i, _get_bit(bits, i))
self._set_function_module(8, self.size - 8, True) # Always dark
def _draw_ver(self) -> None:
if self.ver < 7:
return
# Calculate error correction code and pack bits
rem: int = self.ver # ver is uint6, 7..40
for _ in range(12):
rem = (rem << 1) ^ ((rem >> 11) * 0x1F25)
bits: int = self.ver << 12 | rem # uint18
assert bits >> 18 == 0
# Draw two copies
for i in range(18):
bit: bool = _get_bit(bits, i)
a: int = self.size - 11 + i % 3
b: int = i // 3
self._set_function_module(a, b, bit)
self._set_function_module(b, a, bit)
def _draw_finder_pattern(self, x: int, y: int) -> None:
for dy in range(-4, 5):
for dx in range(-4, 5):
xx, yy = x + dx, y + dy
if (0 <= xx < self.size) and (0 <= yy < self.size):
# Chebyshev/infinity norm
self._set_function_module(
xx, yy, max(abs(dx), abs(dy)) not in (2, 4)
)
def _draw_alignment_pattern(self, x: int, y: int) -> None:
for dy in range(-2, 3):
for dx in range(-2, 3):
self._set_function_module(x + dx, y + dy, max(abs(dx), abs(dy)) != 1)
def _set_function_module(self, x: int, y: int, isdark: bool) -> None:
self.modules[y][x] = isdark
self.unmaskable[y][x] = True
def _add_ecc_and_interleave(self, data: bytearray) -> bytes:
ver: int = self.ver
assert len(data) == QrCode._get_num_data_codewords(ver, self.ecclvl)
# Calculate parameter numbers
numblocks: int = QrCode._NUM_ERROR_CORRECTION_BLOCKS[self.ecclvl.ordinal][ver]
blockecclen: int = QrCode._ECC_CODEWORDS_PER_BLOCK[self.ecclvl.ordinal][ver]
rawcodewords: int = QrCode._get_num_raw_data_modules(ver) // 8
numshortblocks: int = numblocks - rawcodewords % numblocks
shortblocklen: int = rawcodewords // numblocks
# Split data into blocks and append ECC to each block
blocks: List[bytes] = []
rsdiv: bytes = QrCode._reed_solomon_compute_divisor(blockecclen)
k: int = 0
for i in range(numblocks):
dat: bytearray = data[
k : k + shortblocklen - blockecclen + (0 if i < numshortblocks else 1)
]
k += len(dat)
ecc: bytes = QrCode._reed_solomon_compute_remainder(dat, rsdiv)
if i < numshortblocks:
dat.append(0)
blocks.append(dat + ecc)
assert k == len(data)
# Interleave (not concatenate) the bytes from every block into a single sequence
result = bytearray()
for i in range(len(blocks[0])):
for (j, blk) in enumerate(blocks):
# Skip the padding byte in short blocks
if (i != shortblocklen - blockecclen) or (j >= numshortblocks):
result.append(blk[i])
assert len(result) == rawcodewords
return result
def _draw_codewords(self, data: bytes) -> None:
assert len(data) == QrCode._get_num_raw_data_modules(self.ver) // 8
i: int = 0 # Bit index into the data
for right in range(self.size - 1, 0, -2):
# idx of right column in each column pair
if right <= 6:
right -= 1
for vert in range(self.size): # Vertical counter
for j in range(2):
x: int = right - j
upward: bool = (right + 1) & 2 == 0
y: int = (self.size - 1 - vert) if upward else vert
if (not self.unmaskable[y][x]) and (i < len(data) * 8):
self.modules[y][x] = _get_bit(data[i >> 3], 7 - (i & 7))
i += 1
# any remainder bits (0..7) were set 0/false/light by ctor
assert i == len(data) * 8
def _apply_mask(self, mask: int) -> None:
masker: Callable[[int, int], int] = QrCode._MASK_PATTERNS[mask]
for y in range(self.size):
for x in range(self.size):
self.modules[y][x] ^= (masker(x, y) == 0) and (
not self.unmaskable[y][x]
)
def _get_penalty_score(self) -> int:
result: int = 0
size: int = self.size
modules: List[List[bool]] = self.modules
# Adjacent modules in row having same color, and finder-like patterns
for y in range(size):
runcolor: bool = False
runx: int = 0
runhistory = collections.deque([0] * 7, 7)
for x in range(size):
if modules[y][x] == runcolor:
runx += 1
if runx == 5:
result += QrCode._PENALTY_N1
elif runx > 5:
result += 1
else:
self._finder_penalty_add_history(runx, runhistory)
if not runcolor:
result += (
self._finder_penalty_count_patterns(runhistory)
* QrCode._PENALTY_N3
)
runcolor = modules[y][x]
runx = 1
result += (
self._finder_penalty_terminate_and_count(runcolor, runx, runhistory)
* QrCode._PENALTY_N3
)
# Adjacent modules in column having same color, and finder-like patterns
for x in range(size):
runcolor = False
runy = 0
runhistory = collections.deque([0] * 7, 7)
for y in range(size):
if modules[y][x] == runcolor:
runy += 1
if runy == 5:
result += QrCode._PENALTY_N1
elif runy > 5:
result += 1
else:
self._finder_penalty_add_history(runy, runhistory)
if not runcolor:
result += (
self._finder_penalty_count_patterns(runhistory)
* QrCode._PENALTY_N3
)
runcolor = modules[y][x]
runy = 1
result += (
self._finder_penalty_terminate_and_count(runcolor, runy, runhistory)
* QrCode._PENALTY_N3
)
# 2*2 blocks of modules having same color
for y in range(size - 1):
for x in range(size - 1):
if (
modules[y][x]
== modules[y][x + 1]
== modules[y + 1][x]
== modules[y + 1][x + 1]
):
result += QrCode._PENALTY_N2
# Balance of dark and light modules
dark: int = sum((1 if cell else 0) for row in modules for cell in row)
total: int = size ** 2 # Note that size is odd, so dark/total != 1/2
# Compute the smallest integer k >= 0 such that (45-5k)% <= dark/total <= (55+5k)%
k: int = (abs(dark * 20 - total * 10) + total - 1) // total - 1
assert 0 <= k <= 9
result += k * QrCode._PENALTY_N4
assert 0 <= result <= 2568888
# ^ Non-tight upper bound based on default values of PENALTY_N1, ..., N4
return result
def _get_alignment_pattern_positions(self) -> List[int]:
ver: int = self.ver
if ver == 1:
return []
numalign: int = ver // 7 + 2
step: int = (
26
if (ver == 32)
else (ver * 4 + numalign * 2 + 1) // (numalign * 2 - 2) * 2
)
result: List[int] = [
(self.size - 7 - i * step) for i in range(numalign - 1)
] + [6]
return list(reversed(result))
@staticmethod
def _get_num_raw_data_modules(ver: int) -> int:
result: int = (16 * ver + 128) * ver + 64
if ver >= 2:
numalign: int = ver // 7 + 2
result -= (25 * numalign - 10) * numalign - 55
if ver >= 7:
result -= 36
assert 208 <= result <= 29648
return result
@staticmethod
def _get_num_data_codewords(ver: int, ecl: Ecc) -> int:
return (
QrCode._get_num_raw_data_modules(ver) // 8
- QrCode._ECC_CODEWORDS_PER_BLOCK[ecl.ordinal][ver]
* QrCode._NUM_ERROR_CORRECTION_BLOCKS[ecl.ordinal][ver]
)
@staticmethod
def _reed_solomon_compute_divisor(degree: int) -> bytes:
if not (1 <= degree <= 255):
raise ValueError("Degree out of range")
# Polynomial coefficients are stored from highest to lowest power, excluding the leading term which is always 1.
# For example the polynomial x^3 + 255x^2 + 8x + 93 is stored as the uint8 array [255, 8, 93].
result = bytearray([0] * (degree - 1) + [1]) # start with monomial x^0
# Compute the product polynomial (x - r^0) * (x - r^1) * (x - r^2) * ... * (x - r^{degree-1}),
# and drop the highest monomial term which is always 1x^degree.
# Note that r = 0x02, which is a generator element of this field GF(2^8/0x11D).
root: int = 1
for _ in range(degree):
# Multiply the current product by (x - r^i)
for j in range(degree):
result[j] = QrCode._reed_solomon_multiply(result[j], root)
if j + 1 < degree:
result[j] ^= result[j + 1]
root = QrCode._reed_solomon_multiply(root, 0x02)
return result
@staticmethod
def _reed_solomon_compute_remainder(data: bytes, divisor: bytes) -> bytes:
result = bytearray([0] * len(divisor))
for b in data: # Polynomial division
factor: int = b ^ result.pop(0)
result.append(0)
for (i, coef) in enumerate(divisor):
result[i] ^= QrCode._reed_solomon_multiply(coef, factor)
return result
@staticmethod
def _reed_solomon_multiply(x: int, y: int) -> int:
if (x >> 8 != 0) or (y >> 8 != 0):
raise ValueError("Byte out of range")
z: int = 0 # Russian peasant multiplication
for i in reversed(range(8)):
z = (z << 1) ^ ((z >> 7) * 0x11D)
z ^= ((y >> i) & 1) * x
assert z >> 8 == 0
return z
def _finder_penalty_count_patterns(self, runhistory: collections.deque[int]) -> int:
n: int = runhistory[1]
assert n <= self.size * 3
core: bool = (
n > 0
and (runhistory[2] == runhistory[4] == runhistory[5] == n)
and runhistory[3] == n * 3
)
return (
1 if (core and runhistory[0] >= n * 4 and runhistory[6] >= n) else 0
) + (1 if (core and runhistory[6] >= n * 4 and runhistory[0] >= n) else 0)
def _finder_penalty_terminate_and_count(
self,
currentruncolor: bool,
currentrunlength: int,
runhistory: collections.deque[int],
) -> int:
if currentruncolor: # Terminate dark run
self._finder_penalty_add_history(currentrunlength, runhistory)
currentrunlength = 0
currentrunlength += self.size # Add light border to final run
self._finder_penalty_add_history(currentrunlength, runhistory)
return self._finder_penalty_count_patterns(runhistory)
def _finder_penalty_add_history(
self, currentrunlength: int, runhistory: collections.deque[int]
) -> None:
if runhistory[0] == 0:
currentrunlength += self.size # Add light border to initial run
runhistory.appendleft(currentrunlength)
_PENALTY_N1: int = 3
_PENALTY_N2: int = 3
_PENALTY_N3: int = 40
_PENALTY_N4: int = 10
# fmt: off
_ECC_CODEWORDS_PER_BLOCK: Sequence[Sequence[int]] = (
(-1, 7, 10, 15, 20, 26, 18, 20, 24, 30, 18, 20, 24, 26, 30, 22, 24, 28, 30, 28, 28, 28, 28, 30, 30, 26, 28, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30), # noqa: E241 # L
(-1, 10, 16, 26, 18, 24, 16, 18, 22, 22, 26, 30, 22, 22, 24, 24, 28, 28, 26, 26, 26, 26, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28), # noqa: E241 # M
(-1, 13, 22, 18, 26, 18, 24, 18, 22, 20, 24, 28, 26, 24, 20, 30, 24, 28, 28, 26, 30, 28, 30, 30, 30, 30, 28, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30), # noqa: E241 # Q
(-1, 17, 28, 22, 16, 22, 28, 26, 26, 24, 28, 24, 28, 22, 24, 24, 30, 28, 28, 26, 28, 30, 24, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30)) # noqa: E241 # H
_NUM_ERROR_CORRECTION_BLOCKS: Sequence[Sequence[int]] = (
(-1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4, 4, 4, 4, 4, 6, 6, 6, 6, 7, 8, 8, 9, 9, 10, 12, 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 24, 25), # noqa: E241 # L
(-1, 1, 1, 1, 2, 2, 4, 4, 4, 5, 5, 5, 8, 9, 9, 10, 10, 11, 13, 14, 16, 17, 17, 18, 20, 21, 23, 25, 26, 28, 29, 31, 33, 35, 37, 38, 40, 43, 45, 47, 49), # noqa: E241 # M
(-1, 1, 1, 2, 2, 4, 4, 6, 6, 8, 8, 8, 10, 12, 16, 12, 17, 16, 18, 21, 20, 23, 23, 25, 27, 29, 34, 34, 35, 38, 40, 43, 45, 48, 51, 53, 56, 59, 62, 65, 68), # noqa: E241 # Q
(-1, 1, 1, 2, 4, 4, 4, 5, 6, 8, 8, 11, 11, 16, 16, 18, 16, 19, 21, 25, 25, 25, 34, 30, 32, 35, 37, 40, 42, 45, 48, 51, 54, 57, 60, 63, 66, 70, 74, 77, 81)) # noqa: E241 # H
# fmt: on
_MASK_PATTERNS: Sequence[Callable[[int, int], int]] = (
(lambda x, y: (x + y) % 2),
(lambda x, y: y % 2),
(lambda x, y: x % 3),
(lambda x, y: (x + y) % 3),
(lambda x, y: (x // 3 + y // 2) % 2),
(lambda x, y: x * y % 2 + x * y % 3),
(lambda x, y: (x * y % 2 + x * y % 3) % 2),
(lambda x, y: ((x + y) % 2 + x * y % 3) % 2),
)
class _BitBuffer(list): # type: ignore
def append_bits(self, val: int, n: int) -> None:
if (n < 0) or (val >> n != 0):
raise ValueError("Value out of range")
self.extend(((val >> i) & 1) for i in reversed(range(n)))
def _get_bit(x: int, i: int) -> bool:
return (x >> i) & 1 != 0
class DataTooLongError(ValueError):
pass

View File

@@ -1,5 +1,3 @@
# coding: utf-8
""" """
This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error
handler of Python 3. handler of Python 3.
@@ -12,16 +10,27 @@ Original source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/m
# This code is released under the Python license and the BSD 2-clause license # This code is released under the Python license and the BSD 2-clause license
import codecs
import platform import platform
import codecs
import sys import sys
PY3 = sys.version_info > (3,) PY3 = sys.version_info[0] > 2
WINDOWS = platform.system() == "Windows" WINDOWS = platform.system() == "Windows"
FS_ERRORS = "surrogateescape" FS_ERRORS = "surrogateescape"
if True: # pylint: disable=using-constant-test
from typing import Any def u(text):
if PY3:
return text
else:
return text.decode("unicode_escape")
def b(data):
if PY3:
return data.encode("latin1")
else:
return data
if PY3: if PY3:
@@ -32,7 +41,7 @@ else:
bytes_chr = chr bytes_chr = chr
def surrogateescape_handler(exc: Any) -> tuple[str, int]: def surrogateescape_handler(exc):
""" """
Pure Python implementation of the PEP 383: the "surrogateescape" error Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode handler of Python 3. Undecodable bytes will be replaced by a Unicode
@@ -63,7 +72,7 @@ class NotASurrogateError(Exception):
pass pass
def replace_surrogate_encode(mystring: str) -> str: def replace_surrogate_encode(mystring):
""" """
Returns a (unicode) string, not the more logical bytes, because the codecs Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this. register_error functionality expects this.
@@ -89,7 +98,7 @@ def replace_surrogate_encode(mystring: str) -> str:
return str().join(decoded) return str().join(decoded)
def replace_surrogate_decode(mybytes: bytes) -> str: def replace_surrogate_decode(mybytes):
""" """
Returns a (unicode) string Returns a (unicode) string
""" """
@@ -110,7 +119,7 @@ def replace_surrogate_decode(mybytes: bytes) -> str:
return str().join(decoded) return str().join(decoded)
def encodefilename(fn: str) -> bytes: def encodefilename(fn):
if FS_ENCODING == "ascii": if FS_ENCODING == "ascii":
# ASCII encoder of Python 2 expects that the error handler returns a # ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error # Unicode string encodable to ASCII, whereas our surrogateescape error
@@ -150,16 +159,19 @@ def encodefilename(fn: str) -> bytes:
return fn.encode(FS_ENCODING, FS_ERRORS) return fn.encode(FS_ENCODING, FS_ERRORS)
def decodefilename(fn: bytes) -> str: def decodefilename(fn):
return fn.decode(FS_ENCODING, FS_ERRORS) return fn.decode(FS_ENCODING, FS_ERRORS)
FS_ENCODING = sys.getfilesystemencoding() FS_ENCODING = sys.getfilesystemencoding()
# FS_ENCODING = "ascii"; fn = b("[abc\xff]"); encoded = u("[abc\udcff]")
# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]')
# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
if WINDOWS and not PY3: if WINDOWS and not PY3:
# py2 thinks win* is mbcs, probably a bug? anyways this works # py2 thinks win* is mbcs, probably a bug? anyways this works
FS_ENCODING = "utf-8" FS_ENCODING = 'utf-8'
# normalize the filesystem encoding name. # normalize the filesystem encoding name.
@@ -167,7 +179,7 @@ if WINDOWS and not PY3:
FS_ENCODING = codecs.lookup(FS_ENCODING).name FS_ENCODING = codecs.lookup(FS_ENCODING).name
def register_surrogateescape() -> None: def register_surrogateescape():
""" """
Registers the surrogateescape error handler on Python 2 (only) Registers the surrogateescape error handler on Python 2 (only)
""" """

View File

@@ -1,32 +1,13 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import os
import time
import tempfile import tempfile
from datetime import datetime from datetime import datetime
from .bos import bos
if True: # pylint: disable=using-constant-test def errdesc(errors):
from typing import Any, Generator, Optional
from .util import NamedLogger
class StreamArc(object):
def __init__(
self,
log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None],
**kwargs: Any
):
self.log = log
self.fgen = fgen
def gen(self) -> Generator[Optional[bytes], None, None]:
raise Exception("override me")
def errdesc(errors: list[tuple[str, str]]) -> tuple[dict[str, Any], list[str]]:
report = ["copyparty failed to add the following files to the archive:", ""] report = ["copyparty failed to add the following files to the archive:", ""]
for fn, err in errors: for fn, err in errors:
@@ -36,11 +17,12 @@ def errdesc(errors: list[tuple[str, str]]) -> tuple[dict[str, Any], list[str]]:
tf_path = tf.name tf_path = tf.name
tf.write("\r\n".join(report).encode("utf-8", "replace")) tf.write("\r\n".join(report).encode("utf-8", "replace"))
dt = datetime.utcnow().strftime("%Y-%m%d-%H%M%S") dt = datetime.utcfromtimestamp(time.time())
dt = dt.strftime("%Y-%m%d-%H%M%S")
bos.chmod(tf_path, 0o444) os.chmod(tf_path, 0o444)
return { return {
"vp": "archive-errors-{}.txt".format(dt), "vp": "archive-errors-{}.txt".format(dt),
"ap": tf_path, "ap": tf_path,
"st": bos.stat(tf_path), "st": os.stat(tf_path),
}, report }

View File

@@ -1,544 +1,90 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse
import base64
import calendar
import gzip
import logging
import os
import re import re
import shlex import os
import signal
import socket
import string
import sys import sys
import threading
import time import time
import threading
from datetime import datetime, timedelta from datetime import datetime, timedelta
import calendar
# from inspect import currentframe from .__init__ import PY2, WINDOWS, MACOS, VT100
# print(currentframe().f_lineno) from .util import mp
if True: # pylint: disable=using-constant-test
from types import FrameType
import typing
from typing import Any, Optional, Union
from .__init__ import ANYWIN, MACOS, TYPE_CHECKING, VT100, EnvParams, unicode
from .authsrv import AuthSrv from .authsrv import AuthSrv
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE
from .tcpsrv import TcpSrv from .tcpsrv import TcpSrv
from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv
from .up2k import Up2k from .up2k import Up2k
from .util import ( from .th_srv import ThumbSrv, HAVE_PIL, HAVE_WEBP
VERSIONS,
Daemon,
Garda,
HLog,
HMaccas,
alltrace,
ansi_re,
min_ex,
mp,
start_log_thrs,
start_stackmon,
)
if TYPE_CHECKING:
try:
from .mdns import MDNS
from .ssdp import SSDPd
except:
pass
class SvcHub(object): class SvcHub(object):
""" """
Hosts all services which cannot be parallelized due to reliance on monolithic resources. Hosts all services which cannot be parallelized due to reliance on monolithic resources.
Creates a Broker which does most of the heavy stuff; hosted services can use this to perform work: Creates a Broker which does most of the heavy stuff; hosted services can use this to perform work:
hub.broker.<say|ask>(destination, args_list). hub.broker.put(want_reply, destination, args_list).
Either BrokerThr (plain threads) or BrokerMP (multiprocessing) is used depending on configuration. Either BrokerThr (plain threads) or BrokerMP (multiprocessing) is used depending on configuration.
Nothing is returned synchronously; if you want any value returned from the call, Nothing is returned synchronously; if you want any value returned from the call,
put() can return a queue (if want_reply=True) which has a blocking get() with the response. put() can return a queue (if want_reply=True) which has a blocking get() with the response.
""" """
def __init__(self, args: argparse.Namespace, argv: list[str], printed: str) -> None: def __init__(self, args):
self.args = args self.args = args
self.argv = argv
self.E: EnvParams = args.E
self.logf: Optional[typing.TextIO] = None
self.logf_base_fn = ""
self.stop_req = False
self.stopping = False
self.stopped = False
self.reload_req = False
self.reloading = False
self.stop_cond = threading.Condition()
self.nsigs = 3
self.retcode = 0
self.httpsrv_up = 0
self.ansi_re = re.compile("\033\\[[^m]*m")
self.log_mutex = threading.Lock() self.log_mutex = threading.Lock()
self.next_day = 0 self.next_day = 0
self.tstack = 0.0
self.iphash = HMaccas(os.path.join(self.E.cfg, "iphash"), 8)
# for non-http clients (ftp)
self.bans: dict[str, int] = {}
self.gpwd = Garda(self.args.ban_pw)
self.g404 = Garda(self.args.ban_404)
if args.sss or args.s >= 3:
args.ss = True
args.no_dav = True
args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz"
args.ls = args.ls or "**,*,ln,p,r"
if args.ss or args.s >= 2:
args.s = True
args.no_logues = True
args.no_readme = True
args.unpost = 0
args.no_del = True
args.no_mv = True
args.hardlink = True
args.vague_403 = True
args.ban_404 = "50,60,1440"
args.nih = True
if args.s:
args.dotpart = True
args.no_thumb = True
args.no_mtag_ff = True
args.no_robots = True
args.force_js = True
self.log = self._log_disabled if args.q else self._log_enabled self.log = self._log_disabled if args.q else self._log_enabled
if args.lo:
self._setup_logfile(printed)
lg = logging.getLogger()
lh = HLog(self.log)
lg.handlers = [lh]
lg.setLevel(logging.DEBUG)
if args.stackmon:
start_stackmon(args.stackmon, 0)
if args.log_thrs:
start_log_thrs(self.log, args.log_thrs, 0)
if not args.use_fpool and args.j != 1:
args.no_fpool = True
t = "multithreading enabled with -j {}, so disabling fpool -- this can reduce upload performance on some filesystems"
self.log("root", t.format(args.j))
if not args.no_fpool and args.j != 1:
t = "WARNING: --use-fpool combined with multithreading is untested and can probably cause undefined behavior"
if ANYWIN:
t = 'windows cannot do multithreading without --no-fpool, so enabling that -- note that upload performance will suffer if you have microsoft defender "real-time protection" enabled, so you probably want to use -j 1 instead'
args.no_fpool = True
self.log("root", t, c=3)
bri = "zy"[args.theme % 2 :][:1]
ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)]
args.theme = "{0}{1} {0} {1}".format(ch, bri)
if not args.hardlink and args.never_symlink:
args.no_dedup = True
if args.log_fk:
args.log_fk = re.compile(args.log_fk)
# initiate all services to manage # initiate all services to manage
self.asrv = AuthSrv(self.args, self.log) self.asrv = AuthSrv(self.args, self.log, False)
if args.ls: if args.ls:
self.asrv.dbg_ls() self.asrv.dbg_ls()
if not ANYWIN:
self._setlimits()
self.log("root", "max clients: {}".format(self.args.nc))
if not self._process_config():
raise Exception("bad config")
self.tcpsrv = TcpSrv(self) self.tcpsrv = TcpSrv(self)
self.up2k = Up2k(self) self.up2k = Up2k(self)
decs = {k: 1 for k in self.args.th_dec.split(",")}
if not HAVE_VIPS:
decs.pop("vips", None)
if not HAVE_PIL:
decs.pop("pil", None)
if not HAVE_FFMPEG or not HAVE_FFPROBE:
decs.pop("ff", None)
self.args.th_dec = list(decs.keys())
self.thumbsrv = None self.thumbsrv = None
if not args.no_thumb: if not args.no_thumb:
t = ", ".join(self.args.th_dec) or "(None available)" if HAVE_PIL:
self.log("thumb", "decoder preference: {}".format(t)) if not HAVE_WEBP:
args.th_no_webp = True
msg = "setting --th-no-webp because either libwebp is not available or your Pillow is too old"
self.log("thumb", msg, c=3)
if "pil" in self.args.th_dec and not HAVE_WEBP:
msg = "disabling webp thumbnails because either libwebp is not available or your Pillow is too old"
self.log("thumb", msg, c=3)
if self.args.th_dec:
self.thumbsrv = ThumbSrv(self) self.thumbsrv = ThumbSrv(self)
else: else:
msg = "need either Pillow, pyvips, or FFmpeg to create thumbnails; for example:\n{0}{1} -m pip install --user Pillow\n{0}{1} -m pip install --user pyvips\n{0}apt install ffmpeg" msg = "need Pillow to create thumbnails; for example:\n{}{} -m pip install --user Pillow\n"
msg = msg.format(" " * 37, os.path.basename(sys.executable)) self.log(
self.log("thumb", msg, c=3) "thumb", msg.format(" " * 37, os.path.basename(sys.executable)), c=3
)
if not args.no_acode and args.no_thumb:
msg = "setting --no-acode because --no-thumb (sorry)"
self.log("thumb", msg, c=6)
args.no_acode = True
if not args.no_acode and (not HAVE_FFMPEG or not HAVE_FFPROBE):
msg = "setting --no-acode because either FFmpeg or FFprobe is not available"
self.log("thumb", msg, c=6)
args.no_acode = True
args.th_poke = min(args.th_poke, args.th_maxage, args.ac_maxage)
zms = ""
if not args.https_only:
zms += "d"
if not args.http_only:
zms += "D"
if args.ftp or args.ftps:
from .ftpd import Ftpd
self.ftpd = Ftpd(self)
zms += "f" if args.ftp else "F"
if args.smb:
# impacket.dcerpc is noisy about listen timeouts
sto = socket.getdefaulttimeout()
socket.setdefaulttimeout(None)
from .smbd import SMB
self.smbd = SMB(self)
socket.setdefaulttimeout(sto)
self.smbd.start()
zms += "s"
if not args.zms:
args.zms = zms
self.mdns: Optional["MDNS"] = None
self.ssdp: Optional["SSDPd"] = None
# decide which worker impl to use # decide which worker impl to use
if self.check_mp_enable(): if self.check_mp_enable():
from .broker_mp import BrokerMp as Broker from .broker_mp import BrokerMp as Broker
else: else:
from .broker_thr import BrokerThr as Broker # type: ignore self.log("root", "cannot efficiently use multiple CPU cores")
from .broker_thr import BrokerThr as Broker
self.broker = Broker(self) self.broker = Broker(self)
def thr_httpsrv_up(self) -> None: def run(self):
time.sleep(1 if self.args.ign_ebind_all else 5) thr = threading.Thread(target=self.tcpsrv.run, name="svchub-main")
expected = self.broker.num_workers * self.tcpsrv.nsrv thr.daemon = True
failed = expected - self.httpsrv_up thr.start()
if not failed:
return
if self.args.ign_ebind_all: # winxp/py2.7 support: thr.join() kills signals
if not self.tcpsrv.srv:
for _ in range(self.broker.num_workers):
self.broker.say("cb_httpsrv_up")
return
if self.args.ign_ebind and self.tcpsrv.srv:
return
t = "{}/{} workers failed to start"
t = t.format(failed, expected)
self.log("root", t, 1)
self.retcode = 1
self.sigterm()
def sigterm(self) -> None:
os.kill(os.getpid(), signal.SIGTERM)
def cb_httpsrv_up(self) -> None:
self.httpsrv_up += 1
if self.httpsrv_up != self.broker.num_workers:
return
time.sleep(0.1) # purely cosmetic dw
if self.tcpsrv.qr:
self.log("qr-code", self.tcpsrv.qr)
else:
self.log("root", "workers OK\n")
self.up2k.init_vols()
Daemon(self.sd_notify, "sd-notify")
def _process_config(self) -> bool:
al = self.args
al.zm_on = al.zm_on or al.z_on
al.zs_on = al.zs_on or al.z_on
al.zm_off = al.zm_off or al.z_off
al.zs_off = al.zs_off or al.z_off
for n in ("zm_on", "zm_off", "zs_on", "zs_off"):
vs = getattr(al, n).split(",")
vs = [x.strip() for x in vs]
vs = [x for x in vs if x]
setattr(al, n, vs)
R = al.rp_loc
if "//" in R or ":" in R:
t = "found URL in --rp-loc; it should be just the location, for example /foo/bar"
raise Exception(t)
al.R = R = R.strip("/")
al.SR = "/" + R if R else ""
al.RS = R + "/" if R else ""
return True
def _setlimits(self) -> None:
try: try:
import resource while True:
time.sleep(9001)
soft, hard = [ except KeyboardInterrupt:
x if x > 0 else 1024 * 1024 with self.log_mutex:
for x in list(resource.getrlimit(resource.RLIMIT_NOFILE)) print("OPYTHAT")
]
except:
self.log("root", "failed to read rlimits from os", 6)
return
if not soft or not hard:
t = "got bogus rlimits from os ({}, {})"
self.log("root", t.format(soft, hard), 6)
return
want = self.args.nc * 4
new_soft = min(hard, want)
if new_soft < soft:
return
# t = "requesting rlimit_nofile({}), have {}"
# self.log("root", t.format(new_soft, soft), 6)
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
soft = new_soft
except:
t = "rlimit denied; max open files: {}"
self.log("root", t.format(soft), 3)
return
if soft < want:
t = "max open files: {} (wanted {} for -nc {})"
self.log("root", t.format(soft, want, self.args.nc), 3)
self.args.nc = min(self.args.nc, soft // 2)
def _logname(self) -> str:
dt = datetime.utcnow()
fn = str(self.args.lo)
for fs in "YmdHMS":
fs = "%" + fs
if fs in fn:
fn = fn.replace(fs, dt.strftime(fs))
return fn
def _setup_logfile(self, printed: str) -> None:
base_fn = fn = sel_fn = self._logname()
if fn != self.args.lo:
ctr = 0
# yup this is a race; if started sufficiently concurrently, two
# copyparties can grab the same logfile (considered and ignored)
while os.path.exists(sel_fn):
ctr += 1
sel_fn = "{}.{}".format(fn, ctr)
fn = sel_fn
try:
if fn.lower().endswith(".xz"):
import lzma
lh = lzma.open(fn, "wt", encoding="utf-8", errors="replace", preset=0)
else:
lh = open(fn, "wt", encoding="utf-8", errors="replace")
except:
import codecs
lh = codecs.open(fn, "w", encoding="utf-8", errors="replace")
argv = [sys.executable] + self.argv
if hasattr(shlex, "quote"):
argv = [shlex.quote(x) for x in argv]
else:
argv = ['"{}"'.format(x) for x in argv]
msg = "[+] opened logfile [{}]\n".format(fn)
printed += msg
t = "t0: {:.3f}\nargv: {}\n\n{}"
lh.write(t.format(self.E.t0, " ".join(argv), printed))
self.logf = lh
self.logf_base_fn = base_fn
print(msg, end="")
def run(self) -> None:
self.tcpsrv.run()
if getattr(self.args, "zm", False):
try:
from .mdns import MDNS
self.mdns = MDNS(self)
Daemon(self.mdns.run, "mdns")
except:
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
if getattr(self.args, "zs", False):
try:
from .ssdp import SSDPd
self.ssdp = SSDPd(self)
Daemon(self.ssdp.run, "ssdp")
except:
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
Daemon(self.thr_httpsrv_up, "sig-hsrv-up2")
sigs = [signal.SIGINT, signal.SIGTERM]
if not ANYWIN:
sigs.append(signal.SIGUSR1)
for sig in sigs:
signal.signal(sig, self.signal_handler)
# macos hangs after shutdown on sigterm with while-sleep,
# windows cannot ^c stop_cond (and win10 does the macos thing but winxp is fine??)
# linux is fine with both,
# never lucky
if ANYWIN:
# msys-python probably fine but >msys-python
Daemon(self.stop_thr, "svchub-sig")
try:
while not self.stop_req:
time.sleep(1)
except:
pass
self.shutdown()
# cant join; eats signals on win10
while not self.stopped:
time.sleep(0.1)
else:
self.stop_thr()
def reload(self) -> str:
if self.reloading:
return "cannot reload; already in progress"
self.reloading = True
Daemon(self._reload, "reloading")
return "reload initiated"
def _reload(self) -> None:
self.log("root", "reload scheduled")
with self.up2k.mutex:
self.asrv.reload()
self.up2k.reload()
self.broker.reload()
self.reloading = False
def stop_thr(self) -> None:
while not self.stop_req:
with self.stop_cond:
self.stop_cond.wait(9001)
if self.reload_req:
self.reload_req = False
self.reload()
self.shutdown()
def kill9(self, delay: float = 0.0) -> None:
if delay > 0.01:
time.sleep(delay)
print("component stuck; issuing sigkill")
time.sleep(0.1)
if ANYWIN:
os.system("taskkill /f /pid {}".format(os.getpid()))
else:
os.kill(os.getpid(), signal.SIGKILL)
def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None:
if self.stopping:
if self.nsigs <= 0:
try:
threading.Thread(target=self.pr, args=("OMBO BREAKER",)).start()
time.sleep(0.1)
except:
pass
self.kill9()
else:
self.nsigs -= 1
return
if not ANYWIN and sig == signal.SIGUSR1:
self.reload_req = True
else:
self.stop_req = True
with self.stop_cond:
self.stop_cond.notify_all()
def shutdown(self) -> None:
if self.stopping:
return
# start_log_thrs(print, 0.1, 1)
self.stopping = True
self.stop_req = True
with self.stop_cond:
self.stop_cond.notify_all()
ret = 1
try:
self.pr("OPYTHAT")
slp = 0.0
if self.mdns:
Daemon(self.mdns.stop)
slp = time.time() + 0.5
if self.ssdp:
Daemon(self.ssdp.stop)
slp = time.time() + 0.5
self.broker.shutdown()
self.tcpsrv.shutdown() self.tcpsrv.shutdown()
self.up2k.shutdown() self.broker.shutdown()
if self.thumbsrv: if self.thumbsrv:
self.thumbsrv.shutdown() self.thumbsrv.shutdown()
@@ -548,79 +94,41 @@ class SvcHub(object):
break break
if n == 3: if n == 3:
self.pr("waiting for thumbsrv (10sec)...") print("waiting for thumbsrv (10sec)...")
if hasattr(self, "smbd"): print("nailed it", end="")
slp = max(slp, time.time() + 0.5)
Daemon(self.kill9, a=(1,))
Daemon(self.smbd.stop)
while time.time() < slp:
time.sleep(0.1)
self.pr("nailed it", end="")
ret = self.retcode
except:
self.pr("\033[31m[ error during shutdown ]\n{}\033[0m".format(min_ex()))
raise
finally: finally:
if self.args.wintitle: print("\033[0m")
print("\033]0;\033\\", file=sys.stderr, end="")
sys.stderr.flush()
self.pr("\033[0m") def _log_disabled(self, src, msg, c=0):
if self.logf: pass
self.logf.close()
self.stopped = True def _log_enabled(self, src, msg, c=0):
sys.exit(ret)
def _log_disabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
if not self.logf:
return
with self.log_mutex:
ts = datetime.utcnow().strftime("%Y-%m%d-%H%M%S.%f")[:-3]
self.logf.write("@{} [{}\033[0m] {}\n".format(ts, src, msg))
now = time.time()
if now >= self.next_day:
self._set_next_day()
def _set_next_day(self) -> None:
if self.next_day and self.logf and self.logf_base_fn != self._logname():
self.logf.close()
self._setup_logfile("")
dt = datetime.utcnow()
# unix timestamp of next 00:00:00 (leap-seconds safe)
day_now = dt.day
while dt.day == day_now:
dt += timedelta(hours=12)
dt = dt.replace(hour=0, minute=0, second=0)
self.next_day = calendar.timegm(dt.utctimetuple())
def _log_enabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
"""handles logging from all components""" """handles logging from all components"""
with self.log_mutex: with self.log_mutex:
now = time.time() now = time.time()
if now >= self.next_day: if now >= self.next_day:
dt = datetime.utcfromtimestamp(now) dt = datetime.utcfromtimestamp(now)
print("\033[36m{}\033[0m\n".format(dt.strftime("%Y-%m-%d")), end="") print("\033[36m{}\033[0m\n".format(dt.strftime("%Y-%m-%d")), end="")
self._set_next_day()
# unix timestamp of next 00:00:00 (leap-seconds safe)
day_now = dt.day
while dt.day == day_now:
dt += timedelta(hours=12)
dt = dt.replace(hour=0, minute=0, second=0)
self.next_day = calendar.timegm(dt.utctimetuple())
fmt = "\033[36m{} \033[33m{:21} \033[0m{}\n" fmt = "\033[36m{} \033[33m{:21} \033[0m{}\n"
if not VT100: if not VT100:
fmt = "{} {:21} {}\n" fmt = "{} {:21} {}\n"
if "\033" in msg: if "\033" in msg:
msg = ansi_re.sub("", msg) msg = self.ansi_re.sub("", msg)
if "\033" in src: if "\033" in src:
src = ansi_re.sub("", src) src = self.ansi_re.sub("", src)
elif c: elif c:
if isinstance(c, int): if isinstance(c, int):
msg = "\033[3{}m{}\033[0m".format(c, msg) msg = "\033[3{}m{}".format(c, msg)
elif "\033" not in c: elif "\033" not in c:
msg = "\033[{}m{}\033[0m".format(c, msg) msg = "\033[{}m{}\033[0m".format(c, msg)
else: else:
@@ -636,38 +144,38 @@ class SvcHub(object):
except: except:
print(msg.encode("ascii", "replace").decode(), end="") print(msg.encode("ascii", "replace").decode(), end="")
if self.logf: def check_mp_support(self):
self.logf.write(msg) vmin = sys.version_info[1]
if WINDOWS:
def pr(self, *a: Any, **ka: Any) -> None: msg = "need python 3.3 or newer for multiprocessing;"
with self.log_mutex: if PY2:
print(*a, **ka) # py2 pickler doesn't support winsock
return msg
def check_mp_support(self) -> str: elif vmin < 3:
if MACOS: return msg
elif MACOS:
return "multiprocessing is wonky on mac osx;" return "multiprocessing is wonky on mac osx;"
elif sys.version_info < (3, 3): else:
return "need python 3.3 or newer for multiprocessing;" msg = "need python 2.7 or 3.3+ for multiprocessing;"
if not PY2 and vmin < 3:
return msg
try: try:
x: mp.Queue[tuple[str, str]] = mp.Queue(1) x = mp.Queue(1)
x.put(("foo", "bar")) x.put(["foo", "bar"])
if x.get()[0] != "foo": if x.get()[0] != "foo":
raise Exception() raise Exception()
except: except:
return "multiprocessing is not supported on your platform;" return "multiprocessing is not supported on your platform;"
return "" return None
def check_mp_enable(self) -> bool: def check_mp_enable(self):
if self.args.j == 1: if self.args.j == 1:
self.log("root", "multiprocessing disabled by argument -j 1;")
return False return False
try: if mp.cpu_count() <= 1:
if mp.cpu_count() <= 1:
raise Exception()
except:
self.log("svchub", "only one CPU detected; multiprocessing disabled")
return False return False
try: try:
@@ -681,38 +189,5 @@ class SvcHub(object):
if not err: if not err:
return True return True
else: else:
self.log("svchub", err) self.log("root", err)
self.log("svchub", "cannot efficiently use multiple CPU cores")
return False return False
def sd_notify(self) -> None:
try:
zb = os.getenv("NOTIFY_SOCKET")
if not zb:
return
addr = unicode(zb)
if addr.startswith("@"):
addr = "\0" + addr[1:]
t = "".join(x for x in addr if x in string.printable)
self.log("sd_notify", t)
sck = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sck.connect(addr)
sck.sendall(b"READY=1")
except:
self.log("sd_notify", min_ex())
def log_stacks(self) -> None:
td = time.time() - self.tstack
if td < 300:
self.log("stacks", "cooldown {}".format(td))
return
self.tstack = time.time()
zs = "{}\n{}".format(VERSIONS, alltrace())
zb = zs.encode("utf-8", "replace")
zb = gzip.compress(zb)
zs = base64.b64encode(zb).decode("ascii")
self.log("stacks", zs)

View File

@@ -1,23 +1,18 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import calendar import os
import time import time
import stat
import zlib import zlib
import struct
from datetime import datetime
from .bos import bos from .sutil import errdesc
from .sutil import StreamArc, errdesc from .util import yieldfile, sanitize_fn
from .util import min_ex, sanitize_fn, spack, sunpack, yieldfile
if True: # pylint: disable=using-constant-test
from typing import Any, Generator, Optional
from .util import NamedLogger
def dostime2unix(buf: bytes) -> int: def dostime2unix(buf):
t, d = sunpack(b"<HH", buf) t, d = struct.unpack("<HH", buf)
ts = (t & 0x1F) * 2 ts = (t & 0x1F) * 2
tm = (t >> 5) & 0x3F tm = (t >> 5) & 0x3F
@@ -31,38 +26,27 @@ def dostime2unix(buf: bytes) -> int:
tf = "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}" tf = "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}"
iso = tf.format(*tt) iso = tf.format(*tt)
dt = time.strptime(iso, "%Y-%m-%d %H:%M:%S") dt = datetime.strptime(iso, "%Y-%m-%d %H:%M:%S")
return int(calendar.timegm(dt)) return int(dt.timestamp())
def unixtime2dos(ts: int) -> bytes: def unixtime2dos(ts):
tt = time.gmtime(ts + 1) tt = time.gmtime(ts)
dy, dm, dd, th, tm, ts = list(tt)[:6] dy, dm, dd, th, tm, ts = list(tt)[:6]
bd = ((dy - 1980) << 9) + (dm << 5) + dd bd = ((dy - 1980) << 9) + (dm << 5) + dd
bt = (th << 11) + (tm << 5) + ts // 2 bt = (th << 11) + (tm << 5) + ts // 2
try: return struct.pack("<HH", bt, bd)
return spack(b"<HH", bt, bd)
except:
return b"\x00\x00\x21\x00"
def gen_fdesc(sz: int, crc32: int, z64: bool) -> bytes: def gen_fdesc(sz, crc32, z64):
ret = b"\x50\x4b\x07\x08" ret = b"\x50\x4b\x07\x08"
fmt = b"<LQQ" if z64 else b"<LLL" fmt = "<LQQ" if z64 else "<LLL"
ret += spack(fmt, crc32, sz, sz) ret += struct.pack(fmt, crc32, sz, sz)
return ret return ret
def gen_hdr( def gen_hdr(h_pos, fn, sz, lastmod, utf8, crc32, pre_crc):
h_pos: Optional[int],
fn: str,
sz: int,
lastmod: int,
utf8: bool,
icrc32: int,
pre_crc: bool,
) -> bytes:
""" """
does regular file headers does regular file headers
and the central directory meme if h_pos is set and the central directory meme if h_pos is set
@@ -81,8 +65,8 @@ def gen_hdr(
# confusingly this doesn't bump if h_pos # confusingly this doesn't bump if h_pos
req_ver = b"\x2d\x00" if z64 else b"\x0a\x00" req_ver = b"\x2d\x00" if z64 else b"\x0a\x00"
if icrc32: if crc32:
crc32 = spack(b"<L", icrc32) crc32 = struct.pack("<L", crc32)
else: else:
crc32 = b"\x00" * 4 crc32 = b"\x00" * 4
@@ -90,7 +74,7 @@ def gen_hdr(
# 4b magic, 2b min-ver # 4b magic, 2b min-ver
ret = b"\x50\x4b\x03\x04" + req_ver ret = b"\x50\x4b\x03\x04" + req_ver
else: else:
# 4b magic, 2b spec-ver (1b compat, 1b os (00 dos, 03 unix)), 2b min-ver # 4b magic, 2b spec-ver, 2b min-ver
ret = b"\x50\x4b\x01\x02\x1e\x03" + req_ver ret = b"\x50\x4b\x01\x02\x1e\x03" + req_ver
ret += b"\x00" if pre_crc else b"\x08" # streaming ret += b"\x00" if pre_crc else b"\x08" # streaming
@@ -103,49 +87,36 @@ def gen_hdr(
# however infozip does actual sz and it even works on winxp # however infozip does actual sz and it even works on winxp
# (same reasning for z64 extradata later) # (same reasning for z64 extradata later)
vsz = 0xFFFFFFFF if z64 else sz vsz = 0xFFFFFFFF if z64 else sz
ret += spack(b"<LL", vsz, vsz) ret += struct.pack("<LL", vsz, vsz)
# windows support (the "?" replace below too) # windows support (the "?" replace below too)
fn = sanitize_fn(fn, "/", []) fn = sanitize_fn(fn, ok="/")
bfn = fn.encode("utf-8" if utf8 else "cp437", "replace").replace(b"?", b"_") bfn = fn.encode("utf-8" if utf8 else "cp437", "replace").replace(b"?", b"_")
# add ntfs (0x24) and/or unix (0x10) extrafields for utc, add z64 if requested
z64_len = len(z64v) * 8 + 4 if z64v else 0 z64_len = len(z64v) * 8 + 4 if z64v else 0
ret += spack(b"<HH", len(bfn), 0x10 + z64_len) ret += struct.pack("<HH", len(bfn), z64_len)
if h_pos is not None: if h_pos is not None:
# 2b comment, 2b diskno # 2b comment, 2b diskno
ret += b"\x00" * 4 ret += b"\x00" * 4
# 2b internal.attr, 4b external.attr # 2b internal.attr, 4b external.attr
# infozip-macos: 0100 0000 a481 (spec-ver 1e03) file:644 # infozip-macos: 0100 0000 a481 file:644
# infozip-macos: 0100 0100 0080 (spec-ver 1e03) file:000 # infozip-macos: 0100 0100 0080 file:000
# win10-zip: 0000 2000 0000 (spec-ver xx00) FILE_ATTRIBUTE_ARCHIVE ret += b"\x01\x00\x00\x00\xa4\x81"
ret += b"\x00\x00\x00\x00\xa4\x81" # unx
# ret += b"\x00\x00\x20\x00\x00\x00" # fat
# 4b local-header-ofs # 4b local-header-ofs
ret += spack(b"<L", min(h_pos, 0xFFFFFFFF)) ret += struct.pack("<L", min(h_pos, 0xFFFFFFFF))
ret += bfn ret += bfn
# ntfs: type 0a, size 20, rsvd, attr1, len 18, mtime, atime, ctime
# b"\xa3\x2f\x82\x41\x55\x68\xd8\x01" 1652616838.798941100 ~5.861518 132970904387989411 ~58615181
# nt = int((lastmod + 11644473600) * 10000000)
# ret += spack(b"<HHLHHQQQ", 0xA, 0x20, 0, 1, 0x18, nt, nt, nt)
# unix: type 0d, size 0c, atime, mtime, uid, gid
ret += spack(b"<HHLLHH", 0xD, 0xC, int(lastmod), int(lastmod), 1000, 1000)
if z64v: if z64v:
ret += spack(b"<HH" + b"Q" * len(z64v), 1, len(z64v) * 8, *z64v) ret += struct.pack("<HH" + "Q" * len(z64v), 1, len(z64v) * 8, *z64v)
return ret return ret
def gen_ecdr( def gen_ecdr(items, cdir_pos, cdir_end):
items: list[tuple[str, int, int, int, int]], cdir_pos: int, cdir_end: int
) -> tuple[bytes, bool]:
""" """
summary of all file headers, summary of all file headers,
usually the zipfile footer unless something clamps usually the zipfile footer unless something clamps
@@ -165,17 +136,15 @@ def gen_ecdr(
need_64 = nitems == 0xFFFF or 0xFFFFFFFF in [csz, cpos] need_64 = nitems == 0xFFFF or 0xFFFFFFFF in [csz, cpos]
# 2b tnfiles, 2b dnfiles, 4b dir sz, 4b dir pos # 2b tnfiles, 2b dnfiles, 4b dir sz, 4b dir pos
ret += spack(b"<HHLL", nitems, nitems, csz, cpos) ret += struct.pack("<HHLL", nitems, nitems, csz, cpos)
# 2b comment length # 2b comment length
ret += b"\x00\x00" ret += b"\x00\x00"
return ret, need_64 return [ret, need_64]
def gen_ecdr64( def gen_ecdr64(items, cdir_pos, cdir_end):
items: list[tuple[str, int, int, int, int]], cdir_pos: int, cdir_end: int
) -> bytes:
""" """
z64 end of central directory z64 end of central directory
added when numfiles or a headerptr clamps added when numfiles or a headerptr clamps
@@ -194,12 +163,12 @@ def gen_ecdr64(
# 8b tnfiles, 8b dnfiles, 8b dir sz, 8b dir pos # 8b tnfiles, 8b dnfiles, 8b dir sz, 8b dir pos
cdir_sz = cdir_end - cdir_pos cdir_sz = cdir_end - cdir_pos
ret += spack(b"<QQQQ", len(items), len(items), cdir_sz, cdir_pos) ret += struct.pack("<QQQQ", len(items), len(items), cdir_sz, cdir_pos)
return ret return ret
def gen_ecdr64_loc(ecdr64_pos: int) -> bytes: def gen_ecdr64_loc(ecdr64_pos):
""" """
z64 end of central directory locator z64 end of central directory locator
points to ecdr64 points to ecdr64
@@ -209,44 +178,35 @@ def gen_ecdr64_loc(ecdr64_pos: int) -> bytes:
ret = b"\x50\x4b\x06\x07" ret = b"\x50\x4b\x06\x07"
# 4b cdisk, 8b start of ecdr64, 4b ndisks # 4b cdisk, 8b start of ecdr64, 4b ndisks
ret += spack(b"<LQL", 0, ecdr64_pos, 1) ret += struct.pack("<LQL", 0, ecdr64_pos, 1)
return ret return ret
class StreamZip(StreamArc): class StreamZip(object):
def __init__( def __init__(self, fgen, utf8=False, pre_crc=False):
self, self.fgen = fgen
log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None],
utf8: bool = False,
pre_crc: bool = False,
) -> None:
super(StreamZip, self).__init__(log, fgen)
self.utf8 = utf8 self.utf8 = utf8
self.pre_crc = pre_crc self.pre_crc = pre_crc
self.pos = 0 self.pos = 0
self.items: list[tuple[str, int, int, int, int]] = [] self.items = []
def _ct(self, buf: bytes) -> bytes: def _ct(self, buf):
self.pos += len(buf) self.pos += len(buf)
return buf return buf
def ser(self, f: dict[str, Any]) -> Generator[bytes, None, None]: def ser(self, f):
name = f["vp"] name = f["vp"]
src = f["ap"] src = f["ap"]
st = f["st"] st = f["st"]
if stat.S_ISDIR(st.st_mode):
return
sz = st.st_size sz = st.st_size
ts = st.st_mtime ts = st.st_mtime + 1
crc = 0 crc = None
if self.pre_crc: if self.pre_crc:
crc = 0
for buf in yieldfile(src): for buf in yieldfile(src):
crc = zlib.crc32(buf, crc) crc = zlib.crc32(buf, crc)
@@ -256,6 +216,7 @@ class StreamZip(StreamArc):
buf = gen_hdr(None, name, sz, ts, self.utf8, crc, self.pre_crc) buf = gen_hdr(None, name, sz, ts, self.utf8, crc, self.pre_crc)
yield self._ct(buf) yield self._ct(buf)
crc = crc or 0
for buf in yieldfile(src): for buf in yieldfile(src):
if not self.pre_crc: if not self.pre_crc:
crc = zlib.crc32(buf, crc) crc = zlib.crc32(buf, crc)
@@ -264,7 +225,7 @@ class StreamZip(StreamArc):
crc &= 0xFFFFFFFF crc &= 0xFFFFFFFF
self.items.append((name, sz, ts, crc, h_pos)) self.items.append([name, sz, ts, crc, h_pos])
z64 = sz >= 4 * 1024 * 1024 * 1024 z64 = sz >= 4 * 1024 * 1024 * 1024
@@ -272,47 +233,42 @@ class StreamZip(StreamArc):
buf = gen_fdesc(sz, crc, z64) buf = gen_fdesc(sz, crc, z64)
yield self._ct(buf) yield self._ct(buf)
def gen(self) -> Generator[bytes, None, None]: def gen(self):
errf: dict[str, Any] = {}
errors = [] errors = []
try: for f in self.fgen:
for f in self.fgen: if "err" in f:
if "err" in f: errors.append([f["vp"], f["err"]])
errors.append((f["vp"], f["err"])) continue
continue
try: try:
for x in self.ser(f): for x in self.ser(f):
yield x
except GeneratorExit:
raise
except:
ex = min_ex(5, True).replace("\n", "\n-- ")
errors.append((f["vp"], ex))
if errors:
errf, txt = errdesc(errors)
self.log("\n".join(([repr(errf)] + txt[1:])))
for x in self.ser(errf):
yield x yield x
except Exception as ex:
errors.append([f["vp"], repr(ex)])
cdir_pos = self.pos if errors:
for name, sz, ts, crc, h_pos in self.items: errf = errdesc(errors)
buf = gen_hdr(h_pos, name, sz, ts, self.utf8, crc, self.pre_crc) print(repr(errf))
yield self._ct(buf) for x in self.ser(errf):
cdir_end = self.pos yield x
_, need_64 = gen_ecdr(self.items, cdir_pos, cdir_end) cdir_pos = self.pos
if need_64: for name, sz, ts, crc, h_pos in self.items:
ecdir64_pos = self.pos buf = gen_hdr(h_pos, name, sz, ts, self.utf8, crc, self.pre_crc)
buf = gen_ecdr64(self.items, cdir_pos, cdir_end) yield self._ct(buf)
yield self._ct(buf) cdir_end = self.pos
buf = gen_ecdr64_loc(ecdir64_pos) _, need_64 = gen_ecdr(self.items, cdir_pos, cdir_end)
yield self._ct(buf) if need_64:
ecdir64_pos = self.pos
buf = gen_ecdr64(self.items, cdir_pos, cdir_end)
yield self._ct(buf)
ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end) buf = gen_ecdr64_loc(ecdir64_pos)
yield self._ct(ecdr) yield self._ct(buf)
finally:
if errf: ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end)
bos.unlink(errf["ap"]) yield self._ct(ecdr)
if errors:
os.unlink(errf["ap"])

Some files were not shown because too many files have changed in this diff Show More