Compare commits
193 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fbc2424e8f | ||
|
|
94cd13e8b8 | ||
|
|
447ed5ab37 | ||
|
|
af59808611 | ||
|
|
e3406a9f86 | ||
|
|
7fd1d6a4e8 | ||
|
|
0ab2a665de | ||
|
|
3895575bc2 | ||
|
|
138c2bbcbb | ||
|
|
bc7af1d1c8 | ||
|
|
19cd96e392 | ||
|
|
db194ab519 | ||
|
|
02ad4bfab2 | ||
|
|
56b73dcc8a | ||
|
|
7704b9c8a2 | ||
|
|
999b7ae919 | ||
|
|
252b5a88b1 | ||
|
|
01e2681a07 | ||
|
|
aa32f30202 | ||
|
|
195eb53995 | ||
|
|
06fa78f54a | ||
|
|
7a57c9dbf1 | ||
|
|
bb657bfa85 | ||
|
|
87181726b0 | ||
|
|
f1477a1c14 | ||
|
|
4f94a9e38b | ||
|
|
fbed322d3b | ||
|
|
9b0f519e4e | ||
|
|
6cd6dadd06 | ||
|
|
9a28afcb48 | ||
|
|
45b701801d | ||
|
|
062246fb12 | ||
|
|
416ebfdd68 | ||
|
|
731eb92f33 | ||
|
|
dbe2aec79c | ||
|
|
cd9cafe3a1 | ||
|
|
067cc23346 | ||
|
|
c573a780e9 | ||
|
|
8ef4a0aa71 | ||
|
|
89ba12065c | ||
|
|
99efc290df | ||
|
|
2fbdc0a85e | ||
|
|
4242422898 | ||
|
|
008d9b1834 | ||
|
|
7c76d08958 | ||
|
|
89c9f45fd0 | ||
|
|
f107497a94 | ||
|
|
b5dcf30e53 | ||
|
|
0cef062084 | ||
|
|
5c30148be4 | ||
|
|
3a800585bc | ||
|
|
29c212a60e | ||
|
|
2997baa7cb | ||
|
|
dc6bde594d | ||
|
|
e357aa546c | ||
|
|
d3fe19c5aa | ||
|
|
bd24bf9bae | ||
|
|
ee141544aa | ||
|
|
db6f6e6a23 | ||
|
|
c7d950dd5e | ||
|
|
6a96c62fde | ||
|
|
36dc8cd686 | ||
|
|
7622601a77 | ||
|
|
cfd41fcf41 | ||
|
|
f39e370e2a | ||
|
|
c1315a3b39 | ||
|
|
53b32f97e8 | ||
|
|
6c962ec7d3 | ||
|
|
6bc1bc542f | ||
|
|
f0e78a6826 | ||
|
|
e53531a9fb | ||
|
|
5cd9d11329 | ||
|
|
5a3e504ec4 | ||
|
|
d6e09c3880 | ||
|
|
04f44c3c7c | ||
|
|
ec587423e8 | ||
|
|
f57b31146d | ||
|
|
35175fd685 | ||
|
|
d326ba9723 | ||
|
|
ab655a56af | ||
|
|
d1eb113ea8 | ||
|
|
74effa9b8d | ||
|
|
bba4b1c663 | ||
|
|
8709d4dba0 | ||
|
|
4ad4657774 | ||
|
|
5abe0c955c | ||
|
|
0cedaf4fa9 | ||
|
|
0aa7d12704 | ||
|
|
a234aa1f7e | ||
|
|
9f68287846 | ||
|
|
cd2513ec16 | ||
|
|
91d132c2b4 | ||
|
|
97ff0ebd06 | ||
|
|
8829f56d4c | ||
|
|
37c1cab726 | ||
|
|
b3eb117e87 | ||
|
|
fc0a941508 | ||
|
|
c72753c5da | ||
|
|
e442cb677a | ||
|
|
450121eac9 | ||
|
|
b2ab8f971e | ||
|
|
e9c6268568 | ||
|
|
2170ee8da4 | ||
|
|
357e7333cc | ||
|
|
8bb4f02601 | ||
|
|
4213efc7a6 | ||
|
|
67a744c3e8 | ||
|
|
98818e7d63 | ||
|
|
8650ce1295 | ||
|
|
9638267b4c | ||
|
|
304e053155 | ||
|
|
89d1f52235 | ||
|
|
3312c6f5bd | ||
|
|
d4ba644d07 | ||
|
|
b9a504fd3a | ||
|
|
cebac523dc | ||
|
|
c2f4090318 | ||
|
|
d562956809 | ||
|
|
62499f9b71 | ||
|
|
89cf7608f9 | ||
|
|
dd26b8f183 | ||
|
|
79303dac6d | ||
|
|
4203fc161b | ||
|
|
f8a31cc24f | ||
|
|
fc5bfe81a0 | ||
|
|
aae14de796 | ||
|
|
54e1c8d261 | ||
|
|
a0cc4ca4b7 | ||
|
|
2701108c5b | ||
|
|
73bd2df2c6 | ||
|
|
0063021012 | ||
|
|
1c3e4750b3 | ||
|
|
edad3246e0 | ||
|
|
3411b0993f | ||
|
|
097b5609dc | ||
|
|
a42af7655e | ||
|
|
69f78b86af | ||
|
|
5f60c509c6 | ||
|
|
75e5e53276 | ||
|
|
4b2b4ed52d | ||
|
|
fb21bfd6d6 | ||
|
|
f14369e038 | ||
|
|
ff04b72f62 | ||
|
|
4535a81617 | ||
|
|
cce57b700b | ||
|
|
5b6194d131 | ||
|
|
2701238cea | ||
|
|
835f8a20e6 | ||
|
|
f3a501db30 | ||
|
|
4bcd30da6b | ||
|
|
947dbb6f8a | ||
|
|
1c2fedd2bf | ||
|
|
32e826efbc | ||
|
|
138b932c6a | ||
|
|
6da2f53aad | ||
|
|
20eeacaac3 | ||
|
|
81d896be9f | ||
|
|
c003dfab03 | ||
|
|
20c6b82bec | ||
|
|
046b494b53 | ||
|
|
f0e98d6e0d | ||
|
|
fe57321853 | ||
|
|
8510804e57 | ||
|
|
acd32abac5 | ||
|
|
2b47c96cf2 | ||
|
|
1027378bda | ||
|
|
e979d30659 | ||
|
|
574db704cc | ||
|
|
fdb969ea89 | ||
|
|
08977854b3 | ||
|
|
cecac64b68 | ||
|
|
7dabdade2a | ||
|
|
e788f098e2 | ||
|
|
69406d4344 | ||
|
|
d16dd26c65 | ||
|
|
12219c1bea | ||
|
|
118bdcc26e | ||
|
|
78fa96f0f4 | ||
|
|
c7deb63a04 | ||
|
|
4f811eb9e9 | ||
|
|
0b265bd673 | ||
|
|
ee67fabbeb | ||
|
|
b213de7e62 | ||
|
|
7c01505750 | ||
|
|
ae28dfd020 | ||
|
|
2a5a4e785f | ||
|
|
d8bddede6a | ||
|
|
b8a93e74bf | ||
|
|
e60ec94d35 | ||
|
|
84af5fd0a3 | ||
|
|
dbb3edec77 | ||
|
|
d284b46a3e | ||
|
|
9fcb4d222b |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -22,6 +22,7 @@ copyparty.egg-info/
|
|||||||
*.bak
|
*.bak
|
||||||
|
|
||||||
# derived
|
# derived
|
||||||
|
copyparty/res/COPYING.txt
|
||||||
copyparty/web/deps/
|
copyparty/web/deps/
|
||||||
srv/
|
srv/
|
||||||
|
|
||||||
|
|||||||
8
.vscode/launch.py
vendored
Normal file → Executable file
8
.vscode/launch.py
vendored
Normal file → Executable file
@@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# takes arguments from launch.json
|
# takes arguments from launch.json
|
||||||
# is used by no_dbg in tasks.json
|
# is used by no_dbg in tasks.json
|
||||||
# launches 10x faster than mspython debugpy
|
# launches 10x faster than mspython debugpy
|
||||||
@@ -9,15 +11,15 @@ import sys
|
|||||||
|
|
||||||
print(sys.executable)
|
print(sys.executable)
|
||||||
|
|
||||||
|
import json5
|
||||||
import shlex
|
import shlex
|
||||||
import jstyleson
|
|
||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
|
|
||||||
|
|
||||||
with open(".vscode/launch.json", "r", encoding="utf-8") as f:
|
with open(".vscode/launch.json", "r", encoding="utf-8") as f:
|
||||||
tj = f.read()
|
tj = f.read()
|
||||||
|
|
||||||
oj = jstyleson.loads(tj)
|
oj = json5.loads(tj)
|
||||||
argv = oj["configurations"][0]["args"]
|
argv = oj["configurations"][0]["args"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -28,6 +30,8 @@ except:
|
|||||||
|
|
||||||
argv = [os.path.expanduser(x) if x.startswith("~") else x for x in argv]
|
argv = [os.path.expanduser(x) if x.startswith("~") else x for x in argv]
|
||||||
|
|
||||||
|
argv += sys.argv[1:]
|
||||||
|
|
||||||
if re.search(" -j ?[0-9]", " ".join(argv)):
|
if re.search(" -j ?[0-9]", " ".join(argv)):
|
||||||
argv = [sys.executable, "-m", "copyparty"] + argv
|
argv = [sys.executable, "-m", "copyparty"] + argv
|
||||||
sp.check_call(argv)
|
sp.check_call(argv)
|
||||||
|
|||||||
548
README.md
548
README.md
@@ -8,9 +8,9 @@
|
|||||||
|
|
||||||
turn your phone or raspi into a portable file server with resumable uploads/downloads using *any* web browser
|
turn your phone or raspi into a portable file server with resumable uploads/downloads using *any* web browser
|
||||||
|
|
||||||
* server only needs `py2.7` or `py3.3+`, all dependencies optional
|
* server only needs Python (`2.7` or `3.3+`), all dependencies optional
|
||||||
* browse/upload with [IE4](#browser-support) / netscape4.0 on win3.11 (heh)
|
* browse/upload with [IE4](#browser-support) / netscape4.0 on win3.11 (heh)
|
||||||
* *resumable* uploads need `firefox 34+` / `chrome 41+` / `safari 7+`
|
* protocols: [http](#the-browser) // [ftp](#ftp-server) // [webdav](#webdav-server) // [smb/cifs](#smb-server)
|
||||||
|
|
||||||
try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running from a basement in finland
|
try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running from a basement in finland
|
||||||
|
|
||||||
@@ -30,16 +30,17 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
|
|||||||
* [quickstart](#quickstart) - download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set!
|
* [quickstart](#quickstart) - download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set!
|
||||||
* [on servers](#on-servers) - you may also want these, especially on servers
|
* [on servers](#on-servers) - you may also want these, especially on servers
|
||||||
* [on debian](#on-debian) - recommended additional steps on debian
|
* [on debian](#on-debian) - recommended additional steps on debian
|
||||||
* [notes](#notes) - general notes
|
* [features](#features)
|
||||||
* [status](#status) - feature summary
|
|
||||||
* [testimonials](#testimonials) - small collection of user feedback
|
* [testimonials](#testimonials) - small collection of user feedback
|
||||||
* [motivations](#motivations) - project goals / philosophy
|
* [motivations](#motivations) - project goals / philosophy
|
||||||
* [future plans](#future-plans) - some improvement ideas
|
* [notes](#notes) - general notes
|
||||||
* [bugs](#bugs)
|
* [bugs](#bugs)
|
||||||
* [general bugs](#general-bugs)
|
* [general bugs](#general-bugs)
|
||||||
* [not my bugs](#not-my-bugs)
|
* [not my bugs](#not-my-bugs)
|
||||||
|
* [breaking changes](#breaking-changes) - upgrade notes
|
||||||
* [FAQ](#FAQ) - "frequently" asked questions
|
* [FAQ](#FAQ) - "frequently" asked questions
|
||||||
* [accounts and volumes](#accounts-and-volumes) - per-folder, per-user permissions
|
* [accounts and volumes](#accounts-and-volumes) - per-folder, per-user permissions
|
||||||
|
* [shadowing](#shadowing) - hiding specific subfolders
|
||||||
* [the browser](#the-browser) - accessing a copyparty server using a web-browser
|
* [the browser](#the-browser) - accessing a copyparty server using a web-browser
|
||||||
* [tabs](#tabs) - the main tabs in the ui
|
* [tabs](#tabs) - the main tabs in the ui
|
||||||
* [hotkeys](#hotkeys) - the browser has the following hotkeys
|
* [hotkeys](#hotkeys) - the browser has the following hotkeys
|
||||||
@@ -56,7 +57,14 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
|
|||||||
* [other tricks](#other-tricks)
|
* [other tricks](#other-tricks)
|
||||||
* [searching](#searching) - search by size, date, path/name, mp3-tags, ...
|
* [searching](#searching) - search by size, date, path/name, mp3-tags, ...
|
||||||
* [server config](#server-config) - using arguments or config files, or a mix of both
|
* [server config](#server-config) - using arguments or config files, or a mix of both
|
||||||
* [ftp-server](#ftp-server) - an FTP server can be started using `--ftp 3921`
|
* [zeroconf](#zeroconf) - announce enabled services on the LAN
|
||||||
|
* [mdns](#mdns) - LAN domain-name and feature announcer
|
||||||
|
* [ssdp](#ssdp) - windows-explorer announcer
|
||||||
|
* [qr-code](#qr-code) - print a qr-code [(screenshot)](https://user-images.githubusercontent.com/241032/194728533-6f00849b-c6ac-43c6-9359-83e454d11e00.png) for quick access
|
||||||
|
* [ftp server](#ftp-server) - an FTP server can be started using `--ftp 3921`
|
||||||
|
* [webdav server](#webdav-server) - with read-write support
|
||||||
|
* [connecting to webdav from windows](#connecting-to-webdav-from-windows) - using the GUI
|
||||||
|
* [smb server](#smb-server) - unsafe, slow, not recommended for wan
|
||||||
* [file indexing](#file-indexing) - enables dedup and music search ++
|
* [file indexing](#file-indexing) - enables dedup and music search ++
|
||||||
* [exclude-patterns](#exclude-patterns) - to save some time
|
* [exclude-patterns](#exclude-patterns) - to save some time
|
||||||
* [filesystem guards](#filesystem-guards) - avoid traversing into other filesystems
|
* [filesystem guards](#filesystem-guards) - avoid traversing into other filesystems
|
||||||
@@ -66,15 +74,15 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
|
|||||||
* [other flags](#other-flags)
|
* [other flags](#other-flags)
|
||||||
* [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else
|
* [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else
|
||||||
* [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload
|
* [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload
|
||||||
* [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags, also see [./bin/mtag/README.md](./bin/mtag/README.md)
|
* [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags
|
||||||
* [upload events](#upload-events) - trigger a script/program on each upload
|
* [upload events](#upload-events) - trigger a script/program on each upload
|
||||||
* [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed
|
* [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed
|
||||||
* [themes](#themes)
|
* [themes](#themes)
|
||||||
* [complete examples](#complete-examples)
|
* [complete examples](#complete-examples)
|
||||||
|
* [reverse-proxy](#reverse-proxy) - running copyparty next to other websites
|
||||||
* [browser support](#browser-support) - TLDR: yes
|
* [browser support](#browser-support) - TLDR: yes
|
||||||
* [client examples](#client-examples) - interact with copyparty using non-browser clients
|
* [client examples](#client-examples) - interact with copyparty using non-browser clients
|
||||||
* [up2k](#up2k) - quick outline of the up2k protocol, see [uploading](#uploading) for the web-client
|
* [mount as drive](#mount-as-drive) - a remote copyparty server as a local filesystem
|
||||||
* [why chunk-hashes](#why-chunk-hashes) - a single sha512 would be better, right?
|
|
||||||
* [performance](#performance) - defaults are usually fine - expect `8 GiB/s` download, `1 GiB/s` upload
|
* [performance](#performance) - defaults are usually fine - expect `8 GiB/s` download, `1 GiB/s` upload
|
||||||
* [client-side](#client-side) - when uploading files
|
* [client-side](#client-side) - when uploading files
|
||||||
* [security](#security) - some notes on hardening
|
* [security](#security) - some notes on hardening
|
||||||
@@ -82,40 +90,32 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
|
|||||||
* [recovering from crashes](#recovering-from-crashes)
|
* [recovering from crashes](#recovering-from-crashes)
|
||||||
* [client crashes](#client-crashes)
|
* [client crashes](#client-crashes)
|
||||||
* [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads
|
* [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads
|
||||||
* [HTTP API](#HTTP-API)
|
* [HTTP API](#HTTP-API) - see [devnotes](#./docs/devnotes.md#http-api)
|
||||||
* [read](#read)
|
|
||||||
* [write](#write)
|
|
||||||
* [admin](#admin)
|
|
||||||
* [general](#general)
|
|
||||||
* [dependencies](#dependencies) - mandatory deps
|
* [dependencies](#dependencies) - mandatory deps
|
||||||
* [optional dependencies](#optional-dependencies) - install these to enable bonus features
|
* [optional dependencies](#optional-dependencies) - install these to enable bonus features
|
||||||
* [install recommended deps](#install-recommended-deps)
|
* [install recommended deps](#install-recommended-deps)
|
||||||
* [optional gpl stuff](#optional-gpl-stuff)
|
* [optional gpl stuff](#optional-gpl-stuff)
|
||||||
* [sfx](#sfx) - the self-contained "binary"
|
* [sfx](#sfx) - the self-contained "binary"
|
||||||
* [sfx repack](#sfx-repack) - reduce the size of an sfx by removing features
|
* [copyparty.exe](#copypartyexe) - download [copyparty.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty.exe) or [copyparty64.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty64.exe)
|
||||||
* [install on android](#install-on-android)
|
* [install on android](#install-on-android)
|
||||||
* [reporting bugs](#reporting-bugs) - ideas for context to include in bug reports
|
* [reporting bugs](#reporting-bugs) - ideas for context to include in bug reports
|
||||||
* [building](#building)
|
* [devnotes](#devnotes) - for build instructions etc, see [./docs/devnotes.md](./docs/devnotes.md)
|
||||||
* [dev env setup](#dev-env-setup)
|
|
||||||
* [just the sfx](#just-the-sfx)
|
|
||||||
* [complete release](#complete-release)
|
|
||||||
* [todo](#todo) - roughly sorted by priority
|
|
||||||
* [discarded ideas](#discarded-ideas)
|
|
||||||
|
|
||||||
|
|
||||||
## quickstart
|
## quickstart
|
||||||
|
|
||||||
download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set!
|
download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set!
|
||||||
|
|
||||||
|
if you cannot install python, you can use [copyparty.exe](#copypartyexe) instead
|
||||||
|
|
||||||
running the sfx without arguments (for example doubleclicking it on Windows) will give everyone read/write access to the current folder; you may want [accounts and volumes](#accounts-and-volumes)
|
running the sfx without arguments (for example doubleclicking it on Windows) will give everyone read/write access to the current folder; you may want [accounts and volumes](#accounts-and-volumes)
|
||||||
|
|
||||||
some recommended options:
|
some recommended options:
|
||||||
* `-e2dsa` enables general [file indexing](#file-indexing)
|
* `-e2dsa` enables general [file indexing](#file-indexing)
|
||||||
* `-e2ts` enables audio metadata indexing (needs either FFprobe or Mutagen), see [optional dependencies](#optional-dependencies)
|
* `-e2ts` enables audio metadata indexing (needs either FFprobe or Mutagen), see [optional dependencies](#optional-dependencies) to enable thumbnails and more
|
||||||
* `-v /mnt/music:/music:r:rw,foo -a foo:bar` shares `/mnt/music` as `/music`, `r`eadable by anyone, and read-write for user `foo`, password `bar`
|
* `-v /mnt/music:/music:r:rw,foo -a foo:bar` shares `/mnt/music` as `/music`, `r`eadable by anyone, and read-write for user `foo`, password `bar`
|
||||||
* replace `:r:rw,foo` with `:r,foo` to only make the folder readable by `foo` and nobody else
|
* replace `:r:rw,foo` with `:r,foo` to only make the folder readable by `foo` and nobody else
|
||||||
* see [accounts and volumes](#accounts-and-volumes) for the syntax and other permissions (`r`ead, `w`rite, `m`ove, `d`elete, `g`et)
|
* see [accounts and volumes](#accounts-and-volumes) for the syntax and other permissions (`r`ead, `w`rite, `m`ove, `d`elete, `g`et, up`G`et)
|
||||||
* `--ls '**,*,ln,p,r'` to crash on startup if any of the volumes contain a symlink which point outside the volume, as that could give users unintended access (see `--help-ls`)
|
|
||||||
|
|
||||||
|
|
||||||
### on servers
|
### on servers
|
||||||
@@ -124,8 +124,16 @@ you may also want these, especially on servers:
|
|||||||
|
|
||||||
* [contrib/systemd/copyparty.service](contrib/systemd/copyparty.service) to run copyparty as a systemd service
|
* [contrib/systemd/copyparty.service](contrib/systemd/copyparty.service) to run copyparty as a systemd service
|
||||||
* [contrib/systemd/prisonparty.service](contrib/systemd/prisonparty.service) to run it in a chroot (for extra security)
|
* [contrib/systemd/prisonparty.service](contrib/systemd/prisonparty.service) to run it in a chroot (for extra security)
|
||||||
* [contrib/nginx/copyparty.conf](contrib/nginx/copyparty.conf) to reverse-proxy behind nginx (for better https)
|
* [contrib/nginx/copyparty.conf](contrib/nginx/copyparty.conf) to [reverse-proxy](#reverse-proxy) behind nginx (for better https)
|
||||||
|
|
||||||
|
and remember to open the ports you want; here's a complete example including every feature copyparty has to offer:
|
||||||
|
```
|
||||||
|
firewall-cmd --permanent --add-port={80,443,3921,3923,3945,3990}/tcp # --zone=libvirt
|
||||||
|
firewall-cmd --permanent --add-port=12000-12099/tcp --permanent # --zone=libvirt
|
||||||
|
firewall-cmd --permanent --add-port={1900,5353}/udp # --zone=libvirt
|
||||||
|
firewall-cmd --reload
|
||||||
|
```
|
||||||
|
(1900:ssdp, 3921:ftp, 3923:http/https, 3945:smb, 3990:ftps, 5353:mdns, 12000:passive-ftp)
|
||||||
|
|
||||||
### on debian
|
### on debian
|
||||||
|
|
||||||
@@ -140,30 +148,18 @@ recommended additional steps on debian which enable audio metadata and thumbnai
|
|||||||
(skipped `pyheif-pillow-opener` because apparently debian is too old to build it)
|
(skipped `pyheif-pillow-opener` because apparently debian is too old to build it)
|
||||||
|
|
||||||
|
|
||||||
## notes
|
## features
|
||||||
|
|
||||||
general notes:
|
|
||||||
* paper-printing is affected by dark/light-mode! use lightmode for color, darkmode for grayscale
|
|
||||||
* because no browsers currently implement the media-query to do this properly orz
|
|
||||||
|
|
||||||
browser-specific:
|
|
||||||
* iPhone/iPad: use Firefox to download files
|
|
||||||
* Android-Chrome: increase "parallel uploads" for higher speed (android bug)
|
|
||||||
* Android-Firefox: takes a while to select files (their fix for ☝️)
|
|
||||||
* Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now*
|
|
||||||
* Desktop-Firefox: may stop you from deleting files you've uploaded until you visit `about:memory` and click `Minimize memory usage`
|
|
||||||
|
|
||||||
|
|
||||||
## status
|
|
||||||
|
|
||||||
feature summary
|
|
||||||
|
|
||||||
* backend stuff
|
* backend stuff
|
||||||
* ☑ sanic multipart parser
|
* ☑ IPv6
|
||||||
* ☑ multiprocessing (actual multithreading)
|
* ☑ [multiprocessing](#performance) (actual multithreading)
|
||||||
* ☑ volumes (mountpoints)
|
* ☑ volumes (mountpoints)
|
||||||
* ☑ [accounts](#accounts-and-volumes)
|
* ☑ [accounts](#accounts-and-volumes)
|
||||||
* ☑ [ftp-server](#ftp-server)
|
* ☑ [ftp server](#ftp-server)
|
||||||
|
* ☑ [webdav server](#webdav-server)
|
||||||
|
* ☑ [smb/cifs server](#smb-server)
|
||||||
|
* ☑ [qr-code](#qr-code) for quick access
|
||||||
|
* ☑ [upnp / zeroconf / mdns / ssdp](#zeroconf)
|
||||||
* upload
|
* upload
|
||||||
* ☑ basic: plain multipart, ie6 support
|
* ☑ basic: plain multipart, ie6 support
|
||||||
* ☑ [up2k](#uploading): js, resumable, multithreaded
|
* ☑ [up2k](#uploading): js, resumable, multithreaded
|
||||||
@@ -174,7 +170,7 @@ feature summary
|
|||||||
* download
|
* download
|
||||||
* ☑ single files in browser
|
* ☑ single files in browser
|
||||||
* ☑ [folders as zip / tar files](#zip-downloads)
|
* ☑ [folders as zip / tar files](#zip-downloads)
|
||||||
* ☑ [FUSE client](https://github.com/9001/copyparty/tree/hovudstraum/bin#copyparty-fusepy) (read-only)
|
* ☑ [FUSE client](https://github.com/9001/copyparty/tree/hovudstraum/bin#partyfusepy) (read-only)
|
||||||
* browser
|
* browser
|
||||||
* ☑ [navpane](#navpane) (directory tree sidebar)
|
* ☑ [navpane](#navpane) (directory tree sidebar)
|
||||||
* ☑ file manager (cut/paste, delete, [batch-rename](#batch-rename))
|
* ☑ file manager (cut/paste, delete, [batch-rename](#batch-rename))
|
||||||
@@ -223,20 +219,21 @@ project goals / philosophy
|
|||||||
* no build steps; modify the js/python without needing node.js or anything like that
|
* no build steps; modify the js/python without needing node.js or anything like that
|
||||||
|
|
||||||
|
|
||||||
## future plans
|
## notes
|
||||||
|
|
||||||
some improvement ideas
|
general notes:
|
||||||
|
* paper-printing is affected by dark/light-mode! use lightmode for color, darkmode for grayscale
|
||||||
|
* because no browsers currently implement the media-query to do this properly orz
|
||||||
|
|
||||||
* the JS is a mess -- a preact rewrite would be nice
|
browser-specific:
|
||||||
* preferably without build dependencies like webpack/babel/node.js, maybe a python thing to assemble js files into main.js
|
* iPhone/iPad: use Firefox to download files
|
||||||
* good excuse to look at using virtual lists (browsers start to struggle when folders contain over 5000 files)
|
* Android-Chrome: increase "parallel uploads" for higher speed (android bug)
|
||||||
* the UX is a mess -- a proper design would be nice
|
* Android-Firefox: takes a while to select files (their fix for ☝️)
|
||||||
* very organic (much like the python/js), everything was an afterthought
|
* Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now*
|
||||||
* true for both the layout and the visual flair
|
* Desktop-Firefox: may stop you from deleting files you've uploaded until you visit `about:memory` and click `Minimize memory usage`
|
||||||
* something like the tron board-room ui (or most other hollywood ones, like ironman) would be :100:
|
|
||||||
* some of the python files are way too big
|
server-os-specific:
|
||||||
* `up2k.py` ended up doing all the file indexing / db management
|
* RHEL8 / Rocky8: you can run copyparty using `/usr/libexec/platform-python`
|
||||||
* `httpcli.py` should be separated into modules in general
|
|
||||||
|
|
||||||
|
|
||||||
# bugs
|
# bugs
|
||||||
@@ -286,6 +283,15 @@ some improvement ideas
|
|||||||
* due to snap security policies -- see `snap connections firefox` for the allowlist, `removable-media` permits all of `/mnt` and `/media` apparently
|
* due to snap security policies -- see `snap connections firefox` for the allowlist, `removable-media` permits all of `/mnt` and `/media` apparently
|
||||||
|
|
||||||
|
|
||||||
|
# breaking changes
|
||||||
|
|
||||||
|
upgrade notes
|
||||||
|
|
||||||
|
* `1.5.0` (2022-12-03): [new chunksize formula](https://github.com/9001/copyparty/commit/54e1c8d261df) for files larger than 128 GiB
|
||||||
|
* **users:** upgrade to the latest [cli uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) if you use that
|
||||||
|
* **devs:** update third-party up2k clients (if those even exist)
|
||||||
|
|
||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
"frequently" asked questions
|
"frequently" asked questions
|
||||||
@@ -318,6 +324,7 @@ permissions:
|
|||||||
* `m` (move): move files/folders *from* this folder
|
* `m` (move): move files/folders *from* this folder
|
||||||
* `d` (delete): delete files/folders
|
* `d` (delete): delete files/folders
|
||||||
* `g` (get): only download files, cannot see folder contents or zip/tar
|
* `g` (get): only download files, cannot see folder contents or zip/tar
|
||||||
|
* `G` (upget): same as `g` except uploaders get to see their own filekeys (see `fk` in examples below)
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
* add accounts named u1, u2, u3 with passwords p1, p2, p3: `-a u1:p1 -a u2:p2 -a u3:p3`
|
* add accounts named u1, u2, u3 with passwords p1, p2, p3: `-a u1:p1 -a u2:p2 -a u3:p3`
|
||||||
@@ -328,14 +335,23 @@ examples:
|
|||||||
* unauthorized users accessing the webroot can see that the `inc` folder exists, but cannot open it
|
* unauthorized users accessing the webroot can see that the `inc` folder exists, but cannot open it
|
||||||
* `u1` can open the `inc` folder, but cannot see the contents, only upload new files to it
|
* `u1` can open the `inc` folder, but cannot see the contents, only upload new files to it
|
||||||
* `u2` can browse it and move files *from* `/inc` into any folder where `u2` has write-access
|
* `u2` can browse it and move files *from* `/inc` into any folder where `u2` has write-access
|
||||||
* make folder `/mnt/ss` available at `/i`, read-write for u1, get-only for everyone else, and enable accesskeys: `-v /mnt/ss:i:rw,u1:g:c,fk=4`
|
* make folder `/mnt/ss` available at `/i`, read-write for u1, get-only for everyone else, and enable filekeys: `-v /mnt/ss:i:rw,u1:g:c,fk=4`
|
||||||
* `c,fk=4` sets the `fk` volflag to 4, meaning each file gets a 4-character accesskey
|
* `c,fk=4` sets the `fk` (filekey) volflag to 4, meaning each file gets a 4-character accesskey
|
||||||
* `u1` can upload files, browse the folder, and see the generated accesskeys
|
* `u1` can upload files, browse the folder, and see the generated filekeys
|
||||||
* other users cannot browse the folder, but can access the files if they have the full file URL with the accesskey
|
* other users cannot browse the folder, but can access the files if they have the full file URL with the filekey
|
||||||
|
* replacing the `g` permission with `wg` would let anonymous users upload files, but not see the required filekey to access it
|
||||||
|
* replacing the `g` permission with `wG` would let anonymous users upload files, receiving a working direct link in return
|
||||||
|
|
||||||
anyone trying to bruteforce a password gets banned according to `--ban-pw`; default is 24h ban for 9 failed attempts in 1 hour
|
anyone trying to bruteforce a password gets banned according to `--ban-pw`; default is 24h ban for 9 failed attempts in 1 hour
|
||||||
|
|
||||||
|
|
||||||
|
## shadowing
|
||||||
|
|
||||||
|
hiding specific subfolders by mounting another volume on top of them
|
||||||
|
|
||||||
|
for example `-v /mnt::r -v /var/empty:web/certs:r` mounts the server folder `/mnt` as the webroot, but another volume is mounted at `/web/certs` -- so visitors can only see the contents of `/mnt` and `/mnt/web` (at URLs `/` and `/web`), but not `/mnt/web/certs` because URL `/web/certs` is mapped to `/var/empty`
|
||||||
|
|
||||||
|
|
||||||
# the browser
|
# the browser
|
||||||
|
|
||||||
accessing a copyparty server using a web-browser
|
accessing a copyparty server using a web-browser
|
||||||
@@ -359,6 +375,7 @@ the main tabs in the ui
|
|||||||
## hotkeys
|
## hotkeys
|
||||||
|
|
||||||
the browser has the following hotkeys (always qwerty)
|
the browser has the following hotkeys (always qwerty)
|
||||||
|
* `?` show hotkeys help
|
||||||
* `B` toggle breadcrumbs / [navpane](#navpane)
|
* `B` toggle breadcrumbs / [navpane](#navpane)
|
||||||
* `I/K` prev/next folder
|
* `I/K` prev/next folder
|
||||||
* `M` parent folder (or unexpand current)
|
* `M` parent folder (or unexpand current)
|
||||||
@@ -366,8 +383,10 @@ the browser has the following hotkeys (always qwerty)
|
|||||||
* `G` toggle list / [grid view](#thumbnails) -- same as `田` bottom-right
|
* `G` toggle list / [grid view](#thumbnails) -- same as `田` bottom-right
|
||||||
* `T` toggle thumbnails / icons
|
* `T` toggle thumbnails / icons
|
||||||
* `ESC` close various things
|
* `ESC` close various things
|
||||||
|
* `ctrl-K` delete selected files/folders
|
||||||
* `ctrl-X` cut selected files/folders
|
* `ctrl-X` cut selected files/folders
|
||||||
* `ctrl-V` paste
|
* `ctrl-V` paste
|
||||||
|
* `Y` download selected files
|
||||||
* `F2` [rename](#batch-rename) selected file/folder
|
* `F2` [rename](#batch-rename) selected file/folder
|
||||||
* when a file/folder is selected (in not-grid-view):
|
* when a file/folder is selected (in not-grid-view):
|
||||||
* `Up/Down` move cursor
|
* `Up/Down` move cursor
|
||||||
@@ -670,12 +689,56 @@ for the above example to work, add the commandline argument `-e2ts` to also scan
|
|||||||
# server config
|
# server config
|
||||||
|
|
||||||
using arguments or config files, or a mix of both:
|
using arguments or config files, or a mix of both:
|
||||||
* config files (`-c some.conf`) can set additional commandline arguments; see [./docs/example.conf](docs/example.conf)
|
* config files (`-c some.conf`) can set additional commandline arguments; see [./docs/example.conf](docs/example.conf) and [./docs/example2.conf](docs/example2.conf)
|
||||||
* `kill -s USR1` (same as `systemctl reload copyparty`) to reload accounts and volumes from config files without restarting
|
* `kill -s USR1` (same as `systemctl reload copyparty`) to reload accounts and volumes from config files without restarting
|
||||||
* or click the `[reload cfg]` button in the control-panel when logged in as admin
|
* or click the `[reload cfg]` button in the control-panel when logged in as admin
|
||||||
|
|
||||||
|
|
||||||
## ftp-server
|
## zeroconf
|
||||||
|
|
||||||
|
announce enabled services on the LAN if you specify the `-z` option, which enables [mdns](#mdns) and [ssdp](#ssdp)
|
||||||
|
|
||||||
|
* `--z-on` / `--z-off`' limits the feature to certain networks
|
||||||
|
|
||||||
|
|
||||||
|
### mdns
|
||||||
|
|
||||||
|
LAN domain-name and feature announcer
|
||||||
|
|
||||||
|
uses [multicast dns](https://en.wikipedia.org/wiki/Multicast_DNS) to give copyparty a domain which any machine on the LAN can use to access it
|
||||||
|
|
||||||
|
all enabled services ([webdav](#webdav-server), [ftp](#ftp-server), [smb](#smb-server)) will appear in mDNS-aware file managers (KDE, gnome, macOS, ...)
|
||||||
|
|
||||||
|
the domain will be http://partybox.local if the machine's hostname is `partybox` unless `--name` specifies soemthing else
|
||||||
|
|
||||||
|
|
||||||
|
### ssdp
|
||||||
|
|
||||||
|
windows-explorer announcer
|
||||||
|
|
||||||
|
uses [ssdp](https://en.wikipedia.org/wiki/Simple_Service_Discovery_Protocol) to make copyparty appear in the windows file explorer on all machines on the LAN
|
||||||
|
|
||||||
|
doubleclicking the icon opens the "connect" page which explains how to mount copyparty as a local filesystem
|
||||||
|
|
||||||
|
if copyparty does not appear in windows explorer, use `--zsv` to see why:
|
||||||
|
|
||||||
|
* maybe the discovery multicast was sent from an IP which does not intersect with the server subnets
|
||||||
|
|
||||||
|
|
||||||
|
## qr-code
|
||||||
|
|
||||||
|
print a qr-code [(screenshot)](https://user-images.githubusercontent.com/241032/194728533-6f00849b-c6ac-43c6-9359-83e454d11e00.png) for quick access, great between phones on android hotspots which keep changing the subnet
|
||||||
|
|
||||||
|
* `--qr` enables it
|
||||||
|
* `--qrs` does https instead of http
|
||||||
|
* `--qrl lootbox/?pw=hunter2` appends to the url, linking to the `lootbox` folder with password `hunter2`
|
||||||
|
* `--qrz 1` forces 1x zoom instead of autoscaling to fit the terminal size
|
||||||
|
* 1x may render incorrectly on some terminals/fonts, but 2x should always work
|
||||||
|
|
||||||
|
it uses the server hostname if [mdns](#mdns) is enbled, otherwise it'll use your external ip (default route) unless `--qri` specifies a specific ip-prefix or domain
|
||||||
|
|
||||||
|
|
||||||
|
## ftp server
|
||||||
|
|
||||||
an FTP server can be started using `--ftp 3921`, and/or `--ftps` for explicit TLS (ftpes)
|
an FTP server can be started using `--ftp 3921`, and/or `--ftps` for explicit TLS (ftpes)
|
||||||
|
|
||||||
@@ -685,6 +748,79 @@ an FTP server can be started using `--ftp 3921`, and/or `--ftps` for explicit T
|
|||||||
* runs in active mode by default, you probably want `--ftp-pr 12000-13000`
|
* runs in active mode by default, you probably want `--ftp-pr 12000-13000`
|
||||||
* if you enable both `ftp` and `ftps`, the port-range will be divided in half
|
* if you enable both `ftp` and `ftps`, the port-range will be divided in half
|
||||||
* some older software (filezilla on debian-stable) cannot passive-mode with TLS
|
* some older software (filezilla on debian-stable) cannot passive-mode with TLS
|
||||||
|
* login with any username + your password, or put your password in the username field
|
||||||
|
|
||||||
|
|
||||||
|
## webdav server
|
||||||
|
|
||||||
|
with read-write support, supports winXP and later, macos, nautilus/gvfs
|
||||||
|
|
||||||
|
click the [connect](http://127.0.0.1:3923/?hc) button in the control-panel to see connection instructions for windows, linux, macos
|
||||||
|
|
||||||
|
general usage:
|
||||||
|
* login with any username + your password, or put your password in the username field (password field can be empty/whatever)
|
||||||
|
|
||||||
|
on macos, connect from finder:
|
||||||
|
* [Go] -> [Connect to Server...] -> http://192.168.123.1:3923/
|
||||||
|
|
||||||
|
|
||||||
|
### connecting to webdav from windows
|
||||||
|
|
||||||
|
using the GUI (winXP or later):
|
||||||
|
* rightclick [my computer] -> [map network drive] -> Folder: `http://192.168.123.1:3923/`
|
||||||
|
* on winXP only, click the `Sign up for online storage` hyperlink instead and put the URL there
|
||||||
|
* providing your password as the username is recommended; the password field can be anything or empty
|
||||||
|
|
||||||
|
known client bugs:
|
||||||
|
* win7+ doesn't actually send the password to the server when reauthenticating after a reboot unless you first try to login with an incorrect password and then switch to the correct password
|
||||||
|
* or just type your password into the username field instead to get around it entirely
|
||||||
|
* connecting to a folder which allows anonymous read will make writing impossible, as windows has decided it doesn't need to login
|
||||||
|
* workaround: connect twice; first to a folder which requires auth, then to the folder you actually want, and leave both of those mounted
|
||||||
|
* win7+ may open a new tcp connection for every file and sometimes forgets to close them, eventually needing a reboot
|
||||||
|
* maybe NIC-related (??), happens with win10-ltsc on e1000e but not virtio
|
||||||
|
* windows cannot access folders which contain filenames with invalid unicode or forbidden characters (`<>:"/\|?*`), or names ending with `.`
|
||||||
|
* winxp cannot show unicode characters outside of *some range*
|
||||||
|
* latin-1 is fine, hiragana is not (not even as shift-jis on japanese xp)
|
||||||
|
|
||||||
|
|
||||||
|
## smb server
|
||||||
|
|
||||||
|
unsafe, slow, not recommended for wan, enable with `--smb` for read-only or `--smbw` for read-write
|
||||||
|
|
||||||
|
click the [connect](http://127.0.0.1:3923/?hc) button in the control-panel to see connection instructions for windows, linux, macos
|
||||||
|
|
||||||
|
dependencies: `python3 -m pip install --user -U impacket==0.10.0`
|
||||||
|
* newer versions of impacket will hopefully work just fine but there is monkeypatching so maybe not
|
||||||
|
|
||||||
|
some **BIG WARNINGS** specific to SMB/CIFS, in decreasing importance:
|
||||||
|
* not entirely confident that read-only is read-only
|
||||||
|
* the smb backend is not fully integrated with vfs, meaning there could be security issues (path traversal). Please use `--smb-port` (see below) and [prisonparty](./bin/prisonparty.sh)
|
||||||
|
* account passwords work per-volume as expected, but account permissions are coalesced; all accounts have read-access to all volumes, and if a single account has write-access to some volume then all other accounts also do
|
||||||
|
* if no accounts have write-access to a specific volume, or if `--smbw` is not set, then writing to that volume from smb *should* be impossible
|
||||||
|
* will be fixed once [impacket v0.11.0](https://github.com/SecureAuthCorp/impacket/commit/d923c00f75d54b972bca573a211a82f09b55261a) is released
|
||||||
|
* [shadowing](#shadowing) probably works as expected but no guarantees
|
||||||
|
|
||||||
|
and some minor issues,
|
||||||
|
* clients only see the first ~400 files in big folders; [impacket#1433](https://github.com/SecureAuthCorp/impacket/issues/1433)
|
||||||
|
* hot-reload of server config (`/?reload=cfg`) only works for volumes, not account passwords
|
||||||
|
* listens on the first IPv4 `-i` interface only (default = :: = 0.0.0.0 = all)
|
||||||
|
* login doesn't work on winxp, but anonymous access is ok -- remove all accounts from copyparty config for that to work
|
||||||
|
* win10 onwards does not allow connecting anonymously / without accounts
|
||||||
|
* on windows, creating a new file through rightclick --> new --> textfile throws an error due to impacket limitations -- hit OK and F5 to get your file
|
||||||
|
* python3 only
|
||||||
|
* slow
|
||||||
|
|
||||||
|
known client bugs:
|
||||||
|
* on win7 only, `--smb1` is much faster than smb2 (default) because it keeps rescanning folders on smb2
|
||||||
|
* however smb1 is buggy and is not enabled by default on win10 onwards
|
||||||
|
* windows cannot access folders which contain filenames with invalid unicode or forbidden characters (`<>:"/\|?*`), or names ending with `.`
|
||||||
|
|
||||||
|
the smb protocol listens on TCP port 445, which is a privileged port on linux and macos, which would require running copyparty as root. However, this can be avoided by listening on another port using `--smb-port 3945` and then using NAT to forward the traffic from 445 to there;
|
||||||
|
* on linux: `iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 445 -j REDIRECT --to-port 3945`
|
||||||
|
|
||||||
|
authenticate with one of the following:
|
||||||
|
* username `$username`, password `$password`
|
||||||
|
* username `$password`, password `k`
|
||||||
|
|
||||||
|
|
||||||
## file indexing
|
## file indexing
|
||||||
@@ -703,6 +839,7 @@ through arguments:
|
|||||||
* `-e2v` verfies file integrity at startup, comparing hashes from the db
|
* `-e2v` verfies file integrity at startup, comparing hashes from the db
|
||||||
* `-e2vu` patches the database with the new hashes from the filesystem
|
* `-e2vu` patches the database with the new hashes from the filesystem
|
||||||
* `-e2vp` panics and kills copyparty instead
|
* `-e2vp` panics and kills copyparty instead
|
||||||
|
* `--xlink` enables deduplication across volumes
|
||||||
|
|
||||||
the same arguments can be set as volflags, in addition to `d2d`, `d2ds`, `d2t`, `d2ts`, `d2v` for disabling:
|
the same arguments can be set as volflags, in addition to `d2d`, `d2ds`, `d2t`, `d2ts`, `d2v` for disabling:
|
||||||
* `-v ~/music::r:c,e2dsa,e2tsr` does a full reindex of everything on startup
|
* `-v ~/music::r:c,e2dsa,e2tsr` does a full reindex of everything on startup
|
||||||
@@ -931,6 +1068,21 @@ see the top of [./copyparty/web/browser.css](./copyparty/web/browser.css) where
|
|||||||
`-lo log/cpp-%Y-%m%d-%H%M%S.txt.xz`
|
`-lo log/cpp-%Y-%m%d-%H%M%S.txt.xz`
|
||||||
|
|
||||||
|
|
||||||
|
## reverse-proxy
|
||||||
|
|
||||||
|
running copyparty next to other websites hosted on an existing webserver such as nginx or apache
|
||||||
|
|
||||||
|
you can either:
|
||||||
|
* give copyparty its own domain or subdomain (recommended)
|
||||||
|
* or do location-based proxying, using `--rp-loc=/stuff` to tell copyparty where it is mounted -- has a slight performance cost and higher chance of bugs
|
||||||
|
* if copyparty says `incorrect --rp-loc or webserver config; expected vpath starting with [...]` it's likely because the webserver is stripping away the proxy location from the request URLs -- see the `ProxyPass` in the apache example below
|
||||||
|
|
||||||
|
example webserver configs:
|
||||||
|
|
||||||
|
* [nginx config](contrib/nginx/copyparty.conf) -- entire domain/subdomain
|
||||||
|
* [apache2 config](contrib/apache/copyparty.conf) -- location-based
|
||||||
|
|
||||||
|
|
||||||
# browser support
|
# browser support
|
||||||
|
|
||||||
TLDR: yes
|
TLDR: yes
|
||||||
@@ -1002,11 +1154,13 @@ interact with copyparty using non-browser clients
|
|||||||
* `(printf 'PUT / HTTP/1.1\r\n\r\n'; cat movie.mkv) >/dev/tcp/127.0.0.1/3923`
|
* `(printf 'PUT / HTTP/1.1\r\n\r\n'; cat movie.mkv) >/dev/tcp/127.0.0.1/3923`
|
||||||
|
|
||||||
* python: [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) is a command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
|
* python: [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) is a command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
|
||||||
* file uploads, file-search, autoresume of aborted/broken uploads
|
* file uploads, file-search, folder sync, autoresume of aborted/broken uploads
|
||||||
|
* can be downloaded from copyparty: controlpanel -> connect -> [up2k.py](http://127.0.0.1:3923/.cpr/a/up2k.py)
|
||||||
* see [./bin/README.md#up2kpy](bin/README.md#up2kpy)
|
* see [./bin/README.md#up2kpy](bin/README.md#up2kpy)
|
||||||
|
|
||||||
* FUSE: mount a copyparty server as a local filesystem
|
* FUSE: mount a copyparty server as a local filesystem
|
||||||
* cross-platform python client available in [./bin/](bin/)
|
* cross-platform python client available in [./bin/](bin/)
|
||||||
|
* can be downloaded from copyparty: controlpanel -> connect -> [partyfuse.py](http://127.0.0.1:3923/.cpr/a/partyfuse.py)
|
||||||
* [rclone](https://rclone.org/) as client can give ~5x performance, see [./docs/rclone.md](docs/rclone.md)
|
* [rclone](https://rclone.org/) as client can give ~5x performance, see [./docs/rclone.md](docs/rclone.md)
|
||||||
|
|
||||||
* sharex (screenshot utility): see [./contrib/sharex.sxcu](contrib/#sharexsxcu)
|
* sharex (screenshot utility): see [./contrib/sharex.sxcu](contrib/#sharexsxcu)
|
||||||
@@ -1021,47 +1175,22 @@ you can provide passwords using cookie `cppwd=hunter2`, as a url-param `?pw=hunt
|
|||||||
NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename
|
NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename
|
||||||
|
|
||||||
|
|
||||||
# up2k
|
## mount as drive
|
||||||
|
|
||||||
quick outline of the up2k protocol, see [uploading](#uploading) for the web-client
|
a remote copyparty server as a local filesystem; go to the control-panel and click `connect` to see a list of commands to do that
|
||||||
* the up2k client splits a file into an "optimal" number of chunks
|
|
||||||
* 1 MiB each, unless that becomes more than 256 chunks
|
|
||||||
* tries 1.5M, 2M, 3, 4, 6, ... until <= 256 chunks or size >= 32M
|
|
||||||
* client posts the list of hashes, filename, size, last-modified
|
|
||||||
* server creates the `wark`, an identifier for this upload
|
|
||||||
* `sha512( salt + filesize + chunk_hashes )`
|
|
||||||
* and a sparse file is created for the chunks to drop into
|
|
||||||
* client uploads each chunk
|
|
||||||
* header entries for the chunk-hash and wark
|
|
||||||
* server writes chunks into place based on the hash
|
|
||||||
* client does another handshake with the hashlist; server replies with OK or a list of chunks to reupload
|
|
||||||
|
|
||||||
up2k has saved a few uploads from becoming corrupted in-transfer already;
|
alternatively, some alternatives roughly sorted by speed (unreproducible benchmark), best first:
|
||||||
* caught an android phone on wifi redhanded in wireshark with a bitflip, however bup with https would *probably* have noticed as well (thanks to tls also functioning as an integrity check)
|
|
||||||
* also stopped someone from uploading because their ram was bad
|
|
||||||
|
|
||||||
regarding the frequent server log message during uploads;
|
* [rclone-http](./docs/rclone.md) (25s), read-only
|
||||||
`6.0M 106M/s 2.77G 102.9M/s n948 thank 4/0/3/1 10042/7198 00:01:09`
|
* [rclone-ftp](./docs/rclone.md) (47s), read/WRITE
|
||||||
* this chunk was `6 MiB`, uploaded at `106 MiB/s`
|
* [rclone-webdav](./docs/rclone.md) (51s), read/WRITE
|
||||||
* on this http connection, `2.77 GiB` transferred, `102.9 MiB/s` average, `948` chunks handled
|
* copyparty-1.5.0's webdav server is faster than rclone-1.60.0 (69s)
|
||||||
* client says `4` uploads OK, `0` failed, `3` busy, `1` queued, `10042 MiB` total size, `7198 MiB` and `00:01:09` left
|
* [partyfuse.py](./bin/#partyfusepy) (71s), read-only
|
||||||
|
* davfs2 (103s), read/WRITE, *very fast* on small files
|
||||||
|
* [win10-webdav](#webdav-server) (138s), read/WRITE
|
||||||
|
* [win10-smb2](#smb-server) (387s), read/WRITE
|
||||||
|
|
||||||
|
most clients will fail to mount the root of a copyparty server unless there is a root volume (so you get the admin-panel instead of a browser when accessing it) -- in that case, mount a specific volume instead
|
||||||
## why chunk-hashes
|
|
||||||
|
|
||||||
a single sha512 would be better, right?
|
|
||||||
|
|
||||||
this is due to `crypto.subtle` [not yet](https://github.com/w3c/webcrypto/issues/73) providing a streaming api (or the option to seed the sha512 hasher with a starting hash)
|
|
||||||
|
|
||||||
as a result, the hashes are much less useful than they could have been (search the server by sha512, provide the sha512 in the response http headers, ...)
|
|
||||||
|
|
||||||
however it allows for hashing multiple chunks in parallel, greatly increasing upload speed from fast storage (NVMe, raid-0 and such)
|
|
||||||
|
|
||||||
* both the [browser uploader](#uploading) and the [commandline one](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) does this now, allowing for fast uploading even from plaintext http
|
|
||||||
|
|
||||||
hashwasm would solve the streaming issue but reduces hashing speed for sha512 (xxh128 does 6 GiB/s), and it would make old browsers and [iphones](https://bugs.webkit.org/show_bug.cgi?id=228552) unsupported
|
|
||||||
|
|
||||||
* blake2 might be a better choice since xxh is non-cryptographic, but that gets ~15 MiB/s on slower androids
|
|
||||||
|
|
||||||
|
|
||||||
# performance
|
# performance
|
||||||
@@ -1074,7 +1203,7 @@ below are some tweaks roughly ordered by usefulness:
|
|||||||
* `--http-only` or `--https-only` (unless you want to support both protocols) will reduce the delay before a new connection is established
|
* `--http-only` or `--https-only` (unless you want to support both protocols) will reduce the delay before a new connection is established
|
||||||
* `--hist` pointing to a fast location (ssd) will make directory listings and searches faster when `-e2d` or `-e2t` is set
|
* `--hist` pointing to a fast location (ssd) will make directory listings and searches faster when `-e2d` or `-e2t` is set
|
||||||
* `--no-hash .` when indexing a network-disk if you don't care about the actual filehashes and only want the names/tags searchable
|
* `--no-hash .` when indexing a network-disk if you don't care about the actual filehashes and only want the names/tags searchable
|
||||||
* `--no-htp --hash-mt=0 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger)
|
* `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger)
|
||||||
* `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example:
|
* `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example:
|
||||||
* huge amount of short-lived connections
|
* huge amount of short-lived connections
|
||||||
* really heavy traffic (downloads/uploads)
|
* really heavy traffic (downloads/uploads)
|
||||||
@@ -1113,19 +1242,21 @@ some notes on hardening
|
|||||||
* `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support
|
* `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support
|
||||||
* `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance
|
* `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance
|
||||||
* however note if you edit one file it will also affect the other copies
|
* however note if you edit one file it will also affect the other copies
|
||||||
* `--vague-403` returns a "404 not found" instead of "403 forbidden" which is a common enterprise meme
|
* `--vague-401` returns a "404 not found" instead of "401 unauthorized" which is a common enterprise meme
|
||||||
* `--ban-404=50,60,1440` ban client for 1440min (24h) if they hit 50 404's in 60min
|
* `--ban-404=50,60,1440` ban client for 1440min (24h) if they hit 50 404's in 60min
|
||||||
* **NB:** will ban anyone who enables up2k turbo
|
* **NB:** will ban anyone who enables up2k turbo
|
||||||
* `--nih` removes the server hostname from directory listings
|
* `--nih` removes the server hostname from directory listings
|
||||||
|
|
||||||
* option `-sss` is a shortcut for the above plus:
|
* option `-sss` is a shortcut for the above plus:
|
||||||
|
* `--no-dav` disables webdav support
|
||||||
* `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk
|
* `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk
|
||||||
* `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks
|
* `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks
|
||||||
|
|
||||||
other misc notes:
|
other misc notes:
|
||||||
|
|
||||||
* you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files
|
* you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files
|
||||||
* combine this with volflag `c,fk` to generate per-file accesskeys; users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404
|
* combine this with volflag `c,fk` to generate filekeys (per-file accesskeys); users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404
|
||||||
|
* permissions `wG` lets users upload files and receive their own filekeys, still without being able to see other uploads
|
||||||
|
|
||||||
|
|
||||||
## gotchas
|
## gotchas
|
||||||
@@ -1157,90 +1288,7 @@ however you can hit `F12` in the up2k tab and use the devtools to see how far yo
|
|||||||
|
|
||||||
# HTTP API
|
# HTTP API
|
||||||
|
|
||||||
* table-column `params` = URL parameters; `?foo=bar&qux=...`
|
see [devnotes](#./docs/devnotes.md#http-api)
|
||||||
* table-column `body` = POST payload
|
|
||||||
* method `jPOST` = json post
|
|
||||||
* method `mPOST` = multipart post
|
|
||||||
* method `uPOST` = url-encoded post
|
|
||||||
* `FILE` = conventional HTTP file upload entry (rfc1867 et al, filename in `Content-Disposition`)
|
|
||||||
|
|
||||||
authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
|
|
||||||
|
|
||||||
## read
|
|
||||||
|
|
||||||
| method | params | result |
|
|
||||||
|--|--|--|
|
|
||||||
| GET | `?ls` | list files/folders at URL as JSON |
|
|
||||||
| GET | `?ls&dots` | list files/folders at URL as JSON, including dotfiles |
|
|
||||||
| GET | `?ls=t` | list files/folders at URL as plaintext |
|
|
||||||
| GET | `?ls=v` | list files/folders at URL, terminal-formatted |
|
|
||||||
| GET | `?b` | list files/folders at URL as simplified HTML |
|
|
||||||
| GET | `?tree=.` | list one level of subdirectories inside URL |
|
|
||||||
| GET | `?tree` | list one level of subdirectories for each level until URL |
|
|
||||||
| GET | `?tar` | download everything below URL as a tar file |
|
|
||||||
| GET | `?zip=utf-8` | download everything below URL as a zip file |
|
|
||||||
| GET | `?ups` | show recent uploads from your IP |
|
|
||||||
| GET | `?ups&filter=f` | ...where URL contains `f` |
|
|
||||||
| GET | `?mime=foo` | specify return mimetype `foo` |
|
|
||||||
| GET | `?raw` | get markdown file at URL as plaintext |
|
|
||||||
| GET | `?txt` | get file at URL as plaintext |
|
|
||||||
| GET | `?txt=iso-8859-1` | ...with specific charset |
|
|
||||||
| GET | `?th` | get image/video at URL as thumbnail |
|
|
||||||
| GET | `?th=opus` | convert audio file to 128kbps opus |
|
|
||||||
| GET | `?th=caf` | ...in the iOS-proprietary container |
|
|
||||||
|
|
||||||
| method | body | result |
|
|
||||||
|--|--|--|
|
|
||||||
| jPOST | `{"q":"foo"}` | do a server-wide search; see the `[🔎]` search tab `raw` field for syntax |
|
|
||||||
|
|
||||||
| method | params | body | result |
|
|
||||||
|--|--|--|--|
|
|
||||||
| jPOST | `?tar` | `["foo","bar"]` | download folders `foo` and `bar` inside URL as a tar file |
|
|
||||||
|
|
||||||
## write
|
|
||||||
|
|
||||||
| method | params | result |
|
|
||||||
|--|--|--|
|
|
||||||
| GET | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar |
|
|
||||||
|
|
||||||
| method | params | body | result |
|
|
||||||
|--|--|--|--|
|
|
||||||
| PUT | | (binary data) | upload into file at URL |
|
|
||||||
| PUT | `?gz` | (binary data) | compress with gzip and write into file at URL |
|
|
||||||
| PUT | `?xz` | (binary data) | compress with xz and write into file at URL |
|
|
||||||
| mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL |
|
|
||||||
| mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json |
|
|
||||||
| mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL |
|
|
||||||
| GET | `?delete` | | delete URL recursively |
|
|
||||||
| jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively |
|
|
||||||
| uPOST | | `msg=foo` | send message `foo` into server log |
|
|
||||||
| mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL |
|
|
||||||
|
|
||||||
upload modifiers:
|
|
||||||
|
|
||||||
| http-header | url-param | effect |
|
|
||||||
|--|--|--|
|
|
||||||
| `Accept: url` | `want=url` | return just the file URL |
|
|
||||||
| `Rand: 4` | `rand=4` | generate random filename with 4 characters |
|
|
||||||
| `Life: 30` | `life=30` | delete file after 30 seconds |
|
|
||||||
|
|
||||||
* `life` only has an effect if the volume has a lifetime, and the volume lifetime must be greater than the file's
|
|
||||||
|
|
||||||
* server behavior of `msg` can be reconfigured with `--urlform`
|
|
||||||
|
|
||||||
## admin
|
|
||||||
|
|
||||||
| method | params | result |
|
|
||||||
|--|--|--|
|
|
||||||
| GET | `?reload=cfg` | reload config files and rescan volumes |
|
|
||||||
| GET | `?scan` | initiate a rescan of the volume which provides URL |
|
|
||||||
| GET | `?stack` | show a stacktrace of all threads |
|
|
||||||
|
|
||||||
## general
|
|
||||||
|
|
||||||
| method | params | result |
|
|
||||||
|--|--|--|
|
|
||||||
| GET | `?pw=x` | logout |
|
|
||||||
|
|
||||||
|
|
||||||
# dependencies
|
# dependencies
|
||||||
@@ -1268,6 +1316,9 @@ enable [thumbnails](#thumbnails) of...
|
|||||||
* **AVIF pictures:** `pyvips` or `ffmpeg` or `pillow-avif-plugin`
|
* **AVIF pictures:** `pyvips` or `ffmpeg` or `pillow-avif-plugin`
|
||||||
* **JPEG XL pictures:** `pyvips` or `ffmpeg`
|
* **JPEG XL pictures:** `pyvips` or `ffmpeg`
|
||||||
|
|
||||||
|
enable [smb](#smb-server) support:
|
||||||
|
* `impacket==0.10.0`
|
||||||
|
|
||||||
`pyvips` gives higher quality thumbnails than `Pillow` and is 320% faster, using 270% more ram: `sudo apt install libvips42 && python3 -m pip install --user -U pyvips`
|
`pyvips` gives higher quality thumbnails than `Pillow` and is 320% faster, using 270% more ram: `sudo apt install libvips42 && python3 -m pip install --user -U pyvips`
|
||||||
|
|
||||||
|
|
||||||
@@ -1288,40 +1339,39 @@ these are standalone programs and will never be imported / evaluated by copypart
|
|||||||
|
|
||||||
the self-contained "binary" [copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py) will unpack itself and run copyparty, assuming you have python installed of course
|
the self-contained "binary" [copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py) will unpack itself and run copyparty, assuming you have python installed of course
|
||||||
|
|
||||||
|
you can reduce the sfx size by repacking it; see [./docs/devnotes.md#sfx-repack](#./docs/devnotes.md#sfx-repack)
|
||||||
|
|
||||||
## sfx repack
|
|
||||||
|
|
||||||
reduce the size of an sfx by removing features
|
## copyparty.exe
|
||||||
|
|
||||||
if you don't need all the features, you can repack the sfx and save a bunch of space; all you need is an sfx and a copy of this repo (nothing else to download or build, except if you're on windows then you need msys2 or WSL)
|
download [copyparty.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty.exe) or [copyparty64.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty64.exe)
|
||||||
* `393k` size of original sfx.py as of v1.1.3
|
|
||||||
* `310k` after `./scripts/make-sfx.sh re no-cm`
|
|
||||||
* `269k` after `./scripts/make-sfx.sh re no-cm no-hl`
|
|
||||||
|
|
||||||
the features you can opt to drop are
|

|
||||||
* `cm`/easymde, the "fancy" markdown editor, saves ~82k
|
|
||||||
* `hl`, prism, the syntax hilighter, saves ~41k
|
|
||||||
* `fnt`, source-code-pro, the monospace font, saves ~9k
|
|
||||||
* `dd`, the custom mouse cursor for the media player tray tab, saves ~2k
|
|
||||||
|
|
||||||
for the `re`pack to work, first run one of the sfx'es once to unpack it
|
can be convenient on old machines where installing python is problematic, however is **not recommended** and should be considered a last resort -- if possible, please use **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** instead
|
||||||
|
|
||||||
**note:** you can also just download and run [scripts/copyparty-repack.sh](scripts/copyparty-repack.sh) -- this will grab the latest copyparty release from github and do a few repacks; works on linux/macos (and windows with msys2 or WSL)
|
* [copyparty.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty.exe) is compatible with 32bit windows7, which means it uses an ancient copy of python (3.7.9) which cannot be upgraded and will definitely become a security hazard at some point
|
||||||
|
|
||||||
|
* [copyparty64.exe](https://github.com/9001/copyparty/releases/latest/download/copyparty64.exe) is identical except 64bit so it [works in WinPE](https://user-images.githubusercontent.com/241032/205454984-e6b550df-3c49-486d-9267-1614078dd0dd.png)
|
||||||
|
|
||||||
|
meanwhile [copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py) instead relies on your system python which gives better performance and will stay safe as long as you keep your python install up-to-date
|
||||||
|
|
||||||
|
then again, if you are already into downloading shady binaries from the internet, you may also want my [minimal builds](./scripts/pyinstaller#ffmpeg) of [ffmpeg](https://ocv.me/stuff/bin/ffmpeg.exe) and [ffprobe](https://ocv.me/stuff/bin/ffprobe.exe) which enables copyparty to extract multimedia-info, do audio-transcoding, and thumbnails/spectrograms/waveforms, however it's much better to instead grab a [recent official build](https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-win64-gpl.zip) every once ina while if you can afford the size
|
||||||
|
|
||||||
|
|
||||||
# install on android
|
# install on android
|
||||||
|
|
||||||
install [Termux](https://termux.com/) (see [ocv.me/termux](https://ocv.me/termux/)) and then copy-paste this into Termux (long-tap) all at once:
|
install [Termux](https://termux.com/) + its companion app `Termux:API` (see [ocv.me/termux](https://ocv.me/termux/)) and then copy-paste this into Termux (long-tap) all at once:
|
||||||
```sh
|
```sh
|
||||||
apt update && apt -y full-upgrade && apt update && termux-setup-storage && apt -y install python && python -m ensurepip && python -m pip install --user -U copyparty
|
yes | pkg upgrade && termux-setup-storage && yes | pkg install python termux-api && python -m ensurepip && python -m pip install --user -U copyparty && { grep -qE 'PATH=.*\.local/bin' ~/.bashrc 2>/dev/null || { echo 'PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc && . ~/.bashrc; }; }
|
||||||
echo $?
|
echo $?
|
||||||
```
|
```
|
||||||
|
|
||||||
after the initial setup, you can launch copyparty at any time by running `copyparty` anywhere in Termux
|
after the initial setup, you can launch copyparty at any time by running `copyparty` anywhere in Termux -- and if you run it with `--qr` you'll get a [neat qr-code](#qr-code) pointing to your external ip
|
||||||
|
|
||||||
if you want thumbnails, `apt -y install ffmpeg`
|
if you want thumbnails (photos+videos) and you're okay with spending another 132 MiB of storage, `pkg install ffmpeg && python3 -m pip install --user -U pillow`
|
||||||
|
|
||||||
* or if you want to use vips instead, `apt -y install libvips && python -m pip install --user -U wheel && python -m pip install --user -U pyvips && (cd /data/data/com.termux/files/usr/lib/; ln -s libgobject-2.0.so{,.0}; ln -s libvips.so{,.42})`
|
* or if you want to use `vips` for photo-thumbs instead, `pkg install libvips && python -m pip install --user -U wheel && python -m pip install --user -U pyvips && (cd /data/data/com.termux/files/usr/lib/; ln -s libgobject-2.0.so{,.0}; ln -s libvips.so{,.42})`
|
||||||
|
|
||||||
|
|
||||||
# reporting bugs
|
# reporting bugs
|
||||||
@@ -1338,86 +1388,6 @@ journalctl -aS '48 hour ago' -u copyparty | grep -C10 FILENAME | tee bug.log
|
|||||||
if there's a wall of base64 in the log (thread stacks) then please include that, especially if you run into something freezing up or getting stuck, for example `OperationalError('database is locked')` -- alternatively you can visit `/?stack` to see the stacks live, so http://127.0.0.1:3923/?stack for example
|
if there's a wall of base64 in the log (thread stacks) then please include that, especially if you run into something freezing up or getting stuck, for example `OperationalError('database is locked')` -- alternatively you can visit `/?stack` to see the stacks live, so http://127.0.0.1:3923/?stack for example
|
||||||
|
|
||||||
|
|
||||||
# building
|
# devnotes
|
||||||
|
|
||||||
## dev env setup
|
for build instructions etc, see [./docs/devnotes.md](./docs/devnotes.md)
|
||||||
|
|
||||||
you need python 3.9 or newer due to type hints
|
|
||||||
|
|
||||||
the rest is mostly optional; if you need a working env for vscode or similar
|
|
||||||
|
|
||||||
```sh
|
|
||||||
python3 -m venv .venv
|
|
||||||
. .venv/bin/activate
|
|
||||||
pip install jinja2 strip_hints # MANDATORY
|
|
||||||
pip install mutagen # audio metadata
|
|
||||||
pip install pyftpdlib # ftp server
|
|
||||||
pip install Pillow pyheif-pillow-opener pillow-avif-plugin # thumbnails
|
|
||||||
pip install black==21.12b0 click==8.0.2 bandit pylint flake8 isort mypy # vscode tooling
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## just the sfx
|
|
||||||
|
|
||||||
first grab the web-dependencies from a previous sfx (assuming you don't need to modify something in those):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rm -rf copyparty/web/deps
|
|
||||||
curl -L https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py >x.py
|
|
||||||
python3 x.py -h
|
|
||||||
rm x.py
|
|
||||||
mv /tmp/pe-copyparty/copyparty/web/deps/ copyparty/web/deps/
|
|
||||||
```
|
|
||||||
|
|
||||||
then build the sfx using any of the following examples:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./scripts/make-sfx.sh # regular edition
|
|
||||||
./scripts/make-sfx.sh gz no-cm # gzip-compressed + no fancy markdown editor
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## complete release
|
|
||||||
|
|
||||||
also builds the sfx so skip the sfx section above
|
|
||||||
|
|
||||||
in the `scripts` folder:
|
|
||||||
|
|
||||||
* run `make -C deps-docker` to build all dependencies
|
|
||||||
* run `./rls.sh 1.2.3` which uploads to pypi + creates github release + sfx
|
|
||||||
|
|
||||||
|
|
||||||
# todo
|
|
||||||
|
|
||||||
roughly sorted by priority
|
|
||||||
|
|
||||||
* nothing! currently
|
|
||||||
|
|
||||||
|
|
||||||
## discarded ideas
|
|
||||||
|
|
||||||
* reduce up2k roundtrips
|
|
||||||
* start from a chunk index and just go
|
|
||||||
* terminate client on bad data
|
|
||||||
* not worth the effort, just throw enough conncetions at it
|
|
||||||
* single sha512 across all up2k chunks?
|
|
||||||
* crypto.subtle cannot into streaming, would have to use hashwasm, expensive
|
|
||||||
* separate sqlite table per tag
|
|
||||||
* performance fixed by skipping some indexes (`+mt.k`)
|
|
||||||
* audio fingerprinting
|
|
||||||
* only makes sense if there can be a wasm client and that doesn't exist yet (except for olaf which is agpl hence counts as not existing)
|
|
||||||
* `os.copy_file_range` for up2k cloning
|
|
||||||
* almost never hit this path anyways
|
|
||||||
* up2k partials ui
|
|
||||||
* feels like there isn't much point
|
|
||||||
* cache sha512 chunks on client
|
|
||||||
* too dangerous -- overtaken by turbo mode
|
|
||||||
* comment field
|
|
||||||
* nah
|
|
||||||
* look into android thumbnail cache file format
|
|
||||||
* absolutely not
|
|
||||||
* indexedDB for hashes, cfg enable/clear/sz, 2gb avail, ~9k for 1g, ~4k for 100m, 500k items before autoeviction
|
|
||||||
* blank hashlist when up-ok to skip handshake
|
|
||||||
* too many confusing side-effects
|
|
||||||
* hls framework for Someone Else to drop code into :^)
|
|
||||||
* probably not, too much stuff to consider -- seeking, start at offset, task stitching (probably np-hard), conditional passthru, rate-control (especially multi-consumer), session keepalive, cache mgmt...
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
# [`up2k.py`](up2k.py)
|
# [`up2k.py`](up2k.py)
|
||||||
* command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
|
* command-line up2k client [(webm)](https://ocv.me/stuff/u2cli.webm)
|
||||||
* file uploads, file-search, autoresume of aborted/broken uploads
|
* file uploads, file-search, autoresume of aborted/broken uploads
|
||||||
* faster than browsers
|
* sync local folder to server
|
||||||
|
* generally faster than browsers
|
||||||
* if something breaks just restart it
|
* if something breaks just restart it
|
||||||
|
|
||||||
|
|
||||||
@@ -11,7 +12,7 @@ produces a chronological list of all uploads by collecting info from up2k databa
|
|||||||
* optional mapping from IP-addresses to nicknames
|
* optional mapping from IP-addresses to nicknames
|
||||||
|
|
||||||
|
|
||||||
# [`copyparty-fuse.py`](copyparty-fuse.py)
|
# [`partyfuse.py`](partyfuse.py)
|
||||||
* mount a copyparty server as a local filesystem (read-only)
|
* mount a copyparty server as a local filesystem (read-only)
|
||||||
* **supports Windows!** -- expect `194 MiB/s` sequential read
|
* **supports Windows!** -- expect `194 MiB/s` sequential read
|
||||||
* **supports Linux** -- expect `117 MiB/s` sequential read
|
* **supports Linux** -- expect `117 MiB/s` sequential read
|
||||||
@@ -30,19 +31,19 @@ also consider using [../docs/rclone.md](../docs/rclone.md) instead for 5x perfor
|
|||||||
* install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/)
|
* install [winfsp](https://github.com/billziss-gh/winfsp/releases/latest) and [python 3](https://www.python.org/downloads/)
|
||||||
* [x] add python 3.x to PATH (it asks during install)
|
* [x] add python 3.x to PATH (it asks during install)
|
||||||
* `python -m pip install --user fusepy`
|
* `python -m pip install --user fusepy`
|
||||||
* `python ./copyparty-fuse.py n: http://192.168.1.69:3923/`
|
* `python ./partyfuse.py n: http://192.168.1.69:3923/`
|
||||||
|
|
||||||
10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled:
|
10% faster in [msys2](https://www.msys2.org/), 700% faster if debug prints are enabled:
|
||||||
* `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}`
|
* `pacman -S mingw64/mingw-w64-x86_64-python{,-pip}`
|
||||||
* `/mingw64/bin/python3 -m pip install --user fusepy`
|
* `/mingw64/bin/python3 -m pip install --user fusepy`
|
||||||
* `/mingw64/bin/python3 ./copyparty-fuse.py [...]`
|
* `/mingw64/bin/python3 ./partyfuse.py [...]`
|
||||||
|
|
||||||
you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE)
|
you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releases/latest), let me know if you [figure out how](https://github.com/dokan-dev/dokany/wiki/FUSE)
|
||||||
(winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine)
|
(winfsp's sshfs leaks, doesn't look like winfsp itself does, should be fine)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [`copyparty-fuse🅱️.py`](copyparty-fuseb.py)
|
# [`partyfuse2.py`](partyfuse2.py)
|
||||||
* mount a copyparty server as a local filesystem (read-only)
|
* mount a copyparty server as a local filesystem (read-only)
|
||||||
* does the same thing except more correct, `samba` approves
|
* does the same thing except more correct, `samba` approves
|
||||||
* **supports Linux** -- expect `18 MiB/s` (wait what)
|
* **supports Linux** -- expect `18 MiB/s` (wait what)
|
||||||
@@ -50,7 +51,7 @@ you could replace winfsp with [dokan](https://github.com/dokan-dev/dokany/releas
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [`copyparty-fuse-streaming.py`](copyparty-fuse-streaming.py)
|
# [`partyfuse-streaming.py`](partyfuse-streaming.py)
|
||||||
* pretend this doesn't exist
|
* pretend this doesn't exist
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ set -e
|
|||||||
#
|
#
|
||||||
# linux/alpine: requires gcc g++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-dev py3-{wheel,pip} py3-numpy{,-dev}
|
# linux/alpine: requires gcc g++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-dev py3-{wheel,pip} py3-numpy{,-dev}
|
||||||
# linux/debian: requires libav{codec,device,filter,format,resample,util}-dev {libfftw3,python3,libsndfile1}-dev python3-{numpy,pip} vamp-{plugin-sdk,examples} patchelf cmake
|
# linux/debian: requires libav{codec,device,filter,format,resample,util}-dev {libfftw3,python3,libsndfile1}-dev python3-{numpy,pip} vamp-{plugin-sdk,examples} patchelf cmake
|
||||||
|
# linux/fedora: requires gcc gcc-c++ make cmake patchelf {python3,ffmpeg,fftw,libsndfile}-devel python3-numpy vamp-plugin-sdk qm-vamp-plugins
|
||||||
# win64: requires msys2-mingw64 environment
|
# win64: requires msys2-mingw64 environment
|
||||||
# macos: requires macports
|
# macos: requires macports
|
||||||
#
|
#
|
||||||
@@ -160,12 +161,12 @@ install_keyfinder() {
|
|||||||
|
|
||||||
h="$HOME"
|
h="$HOME"
|
||||||
so="lib/libkeyfinder.so"
|
so="lib/libkeyfinder.so"
|
||||||
memes=()
|
memes=(-DBUILD_TESTING=OFF)
|
||||||
|
|
||||||
[ $win ] &&
|
[ $win ] &&
|
||||||
so="bin/libkeyfinder.dll" &&
|
so="bin/libkeyfinder.dll" &&
|
||||||
h="$(printf '%s\n' "$USERPROFILE" | tr '\\' '/')" &&
|
h="$(printf '%s\n' "$USERPROFILE" | tr '\\' '/')" &&
|
||||||
memes+=(-G "MinGW Makefiles" -DBUILD_TESTING=OFF)
|
memes+=(-G "MinGW Makefiles")
|
||||||
|
|
||||||
[ $mac ] &&
|
[ $mac ] &&
|
||||||
so="lib/libkeyfinder.dylib"
|
so="lib/libkeyfinder.dylib"
|
||||||
@@ -185,7 +186,7 @@ install_keyfinder() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# rm -rf /Users/ed/Library/Python/3.9/lib/python/site-packages/*keyfinder*
|
# rm -rf /Users/ed/Library/Python/3.9/lib/python/site-packages/*keyfinder*
|
||||||
CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include" \
|
CFLAGS="-I$h/pe/keyfinder/include -I/opt/local/include -I/usr/include/ffmpeg" \
|
||||||
LDFLAGS="-L$h/pe/keyfinder/lib -L$h/pe/keyfinder/lib64 -L/opt/local/lib" \
|
LDFLAGS="-L$h/pe/keyfinder/lib -L$h/pe/keyfinder/lib64 -L/opt/local/lib" \
|
||||||
PKG_CONFIG_PATH=/c/msys64/mingw64/lib/pkgconfig \
|
PKG_CONFIG_PATH=/c/msys64/mingw64/lib/pkgconfig \
|
||||||
$pybin -m pip install --user keyfinder
|
$pybin -m pip install --user keyfinder
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import print_function, unicode_literals
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
"""copyparty-fuse-streaming: remote copyparty as a local filesystem"""
|
"""partyfuse-streaming: remote copyparty as a local filesystem"""
|
||||||
__author__ = "ed <copyparty@ocv.me>"
|
__author__ = "ed <copyparty@ocv.me>"
|
||||||
__copyright__ = 2020
|
__copyright__ = 2020
|
||||||
__license__ = "MIT"
|
__license__ = "MIT"
|
||||||
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
|
|||||||
mount a copyparty server (local or remote) as a filesystem
|
mount a copyparty server (local or remote) as a filesystem
|
||||||
|
|
||||||
usage:
|
usage:
|
||||||
python copyparty-fuse-streaming.py http://192.168.1.69:3923/ ./music
|
python partyfuse-streaming.py http://192.168.1.69:3923/ ./music
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
python3 -m pip install --user fusepy
|
python3 -m pip install --user fusepy
|
||||||
@@ -21,7 +21,7 @@ dependencies:
|
|||||||
+ on Windows: https://github.com/billziss-gh/winfsp/releases/latest
|
+ on Windows: https://github.com/billziss-gh/winfsp/releases/latest
|
||||||
|
|
||||||
this was a mistake:
|
this was a mistake:
|
||||||
fork of copyparty-fuse.py with a streaming cache rather than readahead,
|
fork of partyfuse.py with a streaming cache rather than readahead,
|
||||||
thought this was gonna be way faster (and it kind of is)
|
thought this was gonna be way faster (and it kind of is)
|
||||||
except the overhead of reopening connections on trunc totally kills it
|
except the overhead of reopening connections on trunc totally kills it
|
||||||
"""
|
"""
|
||||||
@@ -62,12 +62,12 @@ except:
|
|||||||
else:
|
else:
|
||||||
libfuse = "apt install libfuse\n modprobe fuse"
|
libfuse = "apt install libfuse\n modprobe fuse"
|
||||||
|
|
||||||
print(
|
m = """\033[33m
|
||||||
"\n could not import fuse; these may help:"
|
could not import fuse; these may help:
|
||||||
+ "\n python3 -m pip install --user fusepy\n "
|
{} -m pip install --user fusepy
|
||||||
+ libfuse
|
{}
|
||||||
+ "\n"
|
\033[0m"""
|
||||||
)
|
print(m.format(sys.executable, libfuse))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ def dewin(txt):
|
|||||||
class RecentLog(object):
|
class RecentLog(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.mtx = threading.Lock()
|
self.mtx = threading.Lock()
|
||||||
self.f = None # open("copyparty-fuse.log", "wb")
|
self.f = None # open("partyfuse.log", "wb")
|
||||||
self.q = []
|
self.q = []
|
||||||
|
|
||||||
thr = threading.Thread(target=self.printer)
|
thr = threading.Thread(target=self.printer)
|
||||||
@@ -185,9 +185,9 @@ class RecentLog(object):
|
|||||||
print("".join(q), end="")
|
print("".join(q), end="")
|
||||||
|
|
||||||
|
|
||||||
# [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
|
||||||
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
|
||||||
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/
|
||||||
#
|
#
|
||||||
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
|
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
|
||||||
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
|
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import print_function, unicode_literals
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
"""copyparty-fuse: remote copyparty as a local filesystem"""
|
"""partyfuse: remote copyparty as a local filesystem"""
|
||||||
__author__ = "ed <copyparty@ocv.me>"
|
__author__ = "ed <copyparty@ocv.me>"
|
||||||
__copyright__ = 2019
|
__copyright__ = 2019
|
||||||
__license__ = "MIT"
|
__license__ = "MIT"
|
||||||
@@ -12,7 +12,7 @@ __url__ = "https://github.com/9001/copyparty/"
|
|||||||
mount a copyparty server (local or remote) as a filesystem
|
mount a copyparty server (local or remote) as a filesystem
|
||||||
|
|
||||||
usage:
|
usage:
|
||||||
python copyparty-fuse.py http://192.168.1.69:3923/ ./music
|
python partyfuse.py http://192.168.1.69:3923/ ./music
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
python3 -m pip install --user fusepy
|
python3 -m pip install --user fusepy
|
||||||
@@ -74,12 +74,12 @@ except:
|
|||||||
else:
|
else:
|
||||||
libfuse = "apt install libfuse3-3\n modprobe fuse"
|
libfuse = "apt install libfuse3-3\n modprobe fuse"
|
||||||
|
|
||||||
print(
|
m = """\033[33m
|
||||||
"\n could not import fuse; these may help:"
|
could not import fuse; these may help:
|
||||||
+ "\n python3 -m pip install --user fusepy\n "
|
{} -m pip install --user fusepy
|
||||||
+ libfuse
|
{}
|
||||||
+ "\n"
|
\033[0m"""
|
||||||
)
|
print(m.format(sys.executable, libfuse))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
@@ -166,7 +166,7 @@ def dewin(txt):
|
|||||||
class RecentLog(object):
|
class RecentLog(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.mtx = threading.Lock()
|
self.mtx = threading.Lock()
|
||||||
self.f = None # open("copyparty-fuse.log", "wb")
|
self.f = None # open("partyfuse.log", "wb")
|
||||||
self.q = []
|
self.q = []
|
||||||
|
|
||||||
thr = threading.Thread(target=self.printer)
|
thr = threading.Thread(target=self.printer)
|
||||||
@@ -197,9 +197,9 @@ class RecentLog(object):
|
|||||||
print("".join(q), end="")
|
print("".join(q), end="")
|
||||||
|
|
||||||
|
|
||||||
# [windows/cmd/cpy3] python dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/cmd/cpy3] python dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
|
||||||
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/cmd/msys2] C:\msys64\mingw64\bin\python3 dev\copyparty\bin\partyfuse.py q: http://192.168.1.159:1234/
|
||||||
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/copyparty-fuse.py q: http://192.168.1.159:1234/
|
# [windows/mty/msys2] /mingw64/bin/python3 /c/Users/ed/dev/copyparty/bin/partyfuse.py q: http://192.168.1.159:1234/
|
||||||
#
|
#
|
||||||
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
|
# [windows] find /q/music/albums/Phant*24bit -printf '%s %p\n' | sort -n | tail -n 8 | sed -r 's/^[0-9]+ //' | while IFS= read -r x; do dd if="$x" of=/dev/null bs=4k count=8192 & done
|
||||||
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
|
# [alpine] ll t; for x in t/2020_0724_16{2,3}*; do dd if="$x" of=/dev/null bs=4k count=10240 & done
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import print_function, unicode_literals
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
"""copyparty-fuseb: remote copyparty as a local filesystem"""
|
"""partyfuse2: remote copyparty as a local filesystem"""
|
||||||
__author__ = "ed <copyparty@ocv.me>"
|
__author__ = "ed <copyparty@ocv.me>"
|
||||||
__copyright__ = 2020
|
__copyright__ = 2020
|
||||||
__license__ = "MIT"
|
__license__ = "MIT"
|
||||||
@@ -32,9 +32,19 @@ try:
|
|||||||
if not hasattr(fuse, "__version__"):
|
if not hasattr(fuse, "__version__"):
|
||||||
raise Exception("your fuse-python is way old")
|
raise Exception("your fuse-python is way old")
|
||||||
except:
|
except:
|
||||||
print(
|
if WINDOWS:
|
||||||
"\n could not import fuse; these may help:\n python3 -m pip install --user fuse-python\n apt install libfuse\n modprobe fuse\n"
|
libfuse = "install https://github.com/billziss-gh/winfsp/releases/latest"
|
||||||
)
|
elif MACOS:
|
||||||
|
libfuse = "install https://osxfuse.github.io/"
|
||||||
|
else:
|
||||||
|
libfuse = "apt install libfuse\n modprobe fuse"
|
||||||
|
|
||||||
|
m = """\033[33m
|
||||||
|
could not import fuse; these may help:
|
||||||
|
{} -m pip install --user fuse-python
|
||||||
|
{}
|
||||||
|
\033[0m"""
|
||||||
|
print(m.format(sys.executable, libfuse))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
@@ -42,13 +52,13 @@ except:
|
|||||||
mount a copyparty server (local or remote) as a filesystem
|
mount a copyparty server (local or remote) as a filesystem
|
||||||
|
|
||||||
usage:
|
usage:
|
||||||
python ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas
|
python ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
sudo apk add fuse-dev python3-dev
|
sudo apk add fuse-dev python3-dev
|
||||||
python3 -m pip install --user fuse-python
|
python3 -m pip install --user fuse-python
|
||||||
|
|
||||||
fork of copyparty-fuse.py based on fuse-python which
|
fork of partyfuse.py based on fuse-python which
|
||||||
appears to be more compliant than fusepy? since this works with samba
|
appears to be more compliant than fusepy? since this works with samba
|
||||||
(probably just my garbage code tbh)
|
(probably just my garbage code tbh)
|
||||||
"""
|
"""
|
||||||
@@ -639,7 +649,7 @@ def main():
|
|||||||
print(" need argument: mount-path")
|
print(" need argument: mount-path")
|
||||||
print("example:")
|
print("example:")
|
||||||
print(
|
print(
|
||||||
" ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas"
|
" ./partyfuse2.py -f -o allow_other,auto_unmount,nonempty,pw=wark,url=http://192.168.1.69:3923 /mnt/nas"
|
||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
275
bin/up2k.py
275
bin/up2k.py
@@ -3,14 +3,12 @@ from __future__ import print_function, unicode_literals
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
up2k.py: upload to copyparty
|
up2k.py: upload to copyparty
|
||||||
2022-09-05, v0.19, ed <irc.rizon.net>, MIT-Licensed
|
2022-12-12, v1.0, ed <irc.rizon.net>, MIT-Licensed
|
||||||
https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py
|
https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py
|
||||||
|
|
||||||
- dependencies: requests
|
- dependencies: requests
|
||||||
- supports python 2.6, 2.7, and 3.3 through 3.11
|
- supports python 2.6, 2.7, and 3.3 through 3.12
|
||||||
|
- if something breaks just try again and it'll autoresume
|
||||||
- almost zero error-handling
|
|
||||||
- but if something breaks just try again and it'll autoresume
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -42,6 +40,7 @@ except ImportError:
|
|||||||
m = "requests/2.18.4 urllib3/1.23 chardet/3.0.4 certifi/2020.4.5.1 idna/2.7"
|
m = "requests/2.18.4 urllib3/1.23 chardet/3.0.4 certifi/2020.4.5.1 idna/2.7"
|
||||||
m = [" https://pypi.org/project/" + x + "/#files" for x in m.split()]
|
m = [" https://pypi.org/project/" + x + "/#files" for x in m.split()]
|
||||||
m = "\n ERROR: need these:\n" + "\n".join(m) + "\n"
|
m = "\n ERROR: need these:\n" + "\n".join(m) + "\n"
|
||||||
|
m += "\n for f in *.whl; do unzip $f; done; rm -r *.dist-info\n"
|
||||||
|
|
||||||
print(m.format(sys.executable))
|
print(m.format(sys.executable))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@@ -51,8 +50,7 @@ except ImportError:
|
|||||||
PY2 = sys.version_info < (3,)
|
PY2 = sys.version_info < (3,)
|
||||||
if PY2:
|
if PY2:
|
||||||
from Queue import Queue
|
from Queue import Queue
|
||||||
from urllib import unquote
|
from urllib import quote, unquote
|
||||||
from urllib import quote
|
|
||||||
|
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
bytes = str
|
bytes = str
|
||||||
@@ -69,6 +67,14 @@ VT100 = platform.system() != "Windows"
|
|||||||
req_ses = requests.Session()
|
req_ses = requests.Session()
|
||||||
|
|
||||||
|
|
||||||
|
class Daemon(threading.Thread):
|
||||||
|
def __init__(self, target, name=None, a=None):
|
||||||
|
# type: (Any, Any, Any) -> None
|
||||||
|
threading.Thread.__init__(self, target=target, args=a or (), name=name)
|
||||||
|
self.daemon = True
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
|
||||||
class File(object):
|
class File(object):
|
||||||
"""an up2k upload task; represents a single file"""
|
"""an up2k upload task; represents a single file"""
|
||||||
|
|
||||||
@@ -86,6 +92,7 @@ class File(object):
|
|||||||
self.kchunks = {} # type: dict[str, tuple[int, int]] # hash: [ ofs, sz ]
|
self.kchunks = {} # type: dict[str, tuple[int, int]] # hash: [ ofs, sz ]
|
||||||
|
|
||||||
# set by handshake
|
# set by handshake
|
||||||
|
self.recheck = False # duplicate; redo handshake after all files done
|
||||||
self.ucids = [] # type: list[str] # chunks which need to be uploaded
|
self.ucids = [] # type: list[str] # chunks which need to be uploaded
|
||||||
self.wark = None # type: str
|
self.wark = None # type: str
|
||||||
self.url = None # type: str
|
self.url = None # type: str
|
||||||
@@ -154,10 +161,7 @@ class MTHash(object):
|
|||||||
self.done_q = Queue()
|
self.done_q = Queue()
|
||||||
self.thrs = []
|
self.thrs = []
|
||||||
for _ in range(cores):
|
for _ in range(cores):
|
||||||
t = threading.Thread(target=self.worker)
|
self.thrs.append(Daemon(self.worker))
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
self.thrs.append(t)
|
|
||||||
|
|
||||||
def hash(self, f, fsz, chunksz, pcb=None, pcb_opaque=None):
|
def hash(self, f, fsz, chunksz, pcb=None, pcb_opaque=None):
|
||||||
with self.omutex:
|
with self.omutex:
|
||||||
@@ -257,10 +261,10 @@ def termsize():
|
|||||||
try:
|
try:
|
||||||
import fcntl, termios, struct
|
import fcntl, termios, struct
|
||||||
|
|
||||||
cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
|
r = struct.unpack(b"hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, b"AAAA"))
|
||||||
|
return r[::-1]
|
||||||
except:
|
except:
|
||||||
return
|
return None
|
||||||
return cr
|
|
||||||
|
|
||||||
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
|
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
|
||||||
if not cr:
|
if not cr:
|
||||||
@@ -270,12 +274,11 @@ def termsize():
|
|||||||
os.close(fd)
|
os.close(fd)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if not cr:
|
|
||||||
try:
|
try:
|
||||||
cr = (env["LINES"], env["COLUMNS"])
|
return cr or (int(env["COLUMNS"]), int(env["LINES"]))
|
||||||
except:
|
except:
|
||||||
cr = (25, 80)
|
return 80, 25
|
||||||
return int(cr[1]), int(cr[0])
|
|
||||||
|
|
||||||
|
|
||||||
class CTermsize(object):
|
class CTermsize(object):
|
||||||
@@ -290,9 +293,7 @@ class CTermsize(object):
|
|||||||
except:
|
except:
|
||||||
return
|
return
|
||||||
|
|
||||||
thr = threading.Thread(target=self.worker)
|
Daemon(self.worker)
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
def worker(self):
|
def worker(self):
|
||||||
while True:
|
while True:
|
||||||
@@ -359,26 +360,29 @@ def walkdir(err, top, seen):
|
|||||||
|
|
||||||
seen = seen[:] + [atop]
|
seen = seen[:] + [atop]
|
||||||
for ap, inf in sorted(statdir(err, top)):
|
for ap, inf in sorted(statdir(err, top)):
|
||||||
|
yield ap, inf
|
||||||
if stat.S_ISDIR(inf.st_mode):
|
if stat.S_ISDIR(inf.st_mode):
|
||||||
try:
|
try:
|
||||||
for x in walkdir(err, ap, seen):
|
for x in walkdir(err, ap, seen):
|
||||||
yield x
|
yield x
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
err.append((ap, str(ex)))
|
err.append((ap, str(ex)))
|
||||||
else:
|
|
||||||
yield ap, inf
|
|
||||||
|
|
||||||
|
|
||||||
def walkdirs(err, tops):
|
def walkdirs(err, tops):
|
||||||
"""recursive statdir for a list of tops, yields [top, relpath, stat]"""
|
"""recursive statdir for a list of tops, yields [top, relpath, stat]"""
|
||||||
sep = "{0}".format(os.sep).encode("ascii")
|
sep = "{0}".format(os.sep).encode("ascii")
|
||||||
for top in tops:
|
for top in tops:
|
||||||
|
isdir = os.path.isdir(top)
|
||||||
if top[-1:] == sep:
|
if top[-1:] == sep:
|
||||||
stop = top.rstrip(sep)
|
stop = top.rstrip(sep)
|
||||||
|
yield stop, b"", os.stat(stop)
|
||||||
else:
|
else:
|
||||||
stop = os.path.dirname(top)
|
stop, dn = os.path.split(top)
|
||||||
|
if isdir:
|
||||||
|
yield stop, dn, os.stat(stop)
|
||||||
|
|
||||||
if os.path.isdir(top):
|
if isdir:
|
||||||
for ap, inf in walkdir(err, top, []):
|
for ap, inf in walkdir(err, top, []):
|
||||||
yield stop, ap[len(stop) :].lstrip(sep), inf
|
yield stop, ap[len(stop) :].lstrip(sep), inf
|
||||||
else:
|
else:
|
||||||
@@ -420,7 +424,7 @@ def up2k_chunksize(filesize):
|
|||||||
while True:
|
while True:
|
||||||
for mul in [1, 2]:
|
for mul in [1, 2]:
|
||||||
nchunks = math.ceil(filesize * 1.0 / chunksize)
|
nchunks = math.ceil(filesize * 1.0 / chunksize)
|
||||||
if nchunks <= 256 or chunksize >= 32 * 1024 * 1024:
|
if nchunks <= 256 or (chunksize >= 32 * 1024 * 1024 and nchunks < 4096):
|
||||||
return chunksize
|
return chunksize
|
||||||
|
|
||||||
chunksize += stepsize
|
chunksize += stepsize
|
||||||
@@ -469,14 +473,17 @@ def get_hashlist(file, pcb, mth):
|
|||||||
file.kchunks[k] = [v1, v2]
|
file.kchunks[k] = [v1, v2]
|
||||||
|
|
||||||
|
|
||||||
def handshake(req_ses, url, file, pw, search):
|
def handshake(ar, file, search):
|
||||||
# type: (requests.Session, str, File, any, bool) -> list[str]
|
# type: (argparse.Namespace, File, bool) -> tuple[list[str], bool]
|
||||||
"""
|
"""
|
||||||
performs a handshake with the server; reply is:
|
performs a handshake with the server; reply is:
|
||||||
if search, a list of search results
|
if search, a list of search results
|
||||||
otherwise, a list of chunks to upload
|
otherwise, a list of chunks to upload
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
url = ar.url
|
||||||
|
pw = ar.a
|
||||||
|
|
||||||
req = {
|
req = {
|
||||||
"hash": [x[0] for x in file.cids],
|
"hash": [x[0] for x in file.cids],
|
||||||
"name": file.name,
|
"name": file.name,
|
||||||
@@ -485,11 +492,14 @@ def handshake(req_ses, url, file, pw, search):
|
|||||||
}
|
}
|
||||||
if search:
|
if search:
|
||||||
req["srch"] = 1
|
req["srch"] = 1
|
||||||
|
elif ar.dr:
|
||||||
|
req["replace"] = True
|
||||||
|
|
||||||
headers = {"Content-Type": "text/plain"} # wtf ed
|
headers = {"Content-Type": "text/plain"} # <=1.5.1 compat
|
||||||
if pw:
|
if pw:
|
||||||
headers["Cookie"] = "=".join(["cppwd", pw])
|
headers["Cookie"] = "=".join(["cppwd", pw])
|
||||||
|
|
||||||
|
file.recheck = False
|
||||||
if file.url:
|
if file.url:
|
||||||
url = file.url
|
url = file.url
|
||||||
elif b"/" in file.rel:
|
elif b"/" in file.rel:
|
||||||
@@ -504,6 +514,17 @@ def handshake(req_ses, url, file, pw, search):
|
|||||||
eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em))
|
eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
|
sc = r.status_code
|
||||||
|
if sc >= 400:
|
||||||
|
txt = r.text
|
||||||
|
if sc == 422 or "<pre>partial upload exists at a different" in txt:
|
||||||
|
file.recheck = True
|
||||||
|
return [], False
|
||||||
|
elif sc == 409 or "<pre>upload rejected, file already exists" in txt:
|
||||||
|
return [], False
|
||||||
|
|
||||||
|
raise Exception("http {0}: {1}".format(sc, txt))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
r = r.json()
|
r = r.json()
|
||||||
except:
|
except:
|
||||||
@@ -525,8 +546,8 @@ def handshake(req_ses, url, file, pw, search):
|
|||||||
return r["hash"], r["sprs"]
|
return r["hash"], r["sprs"]
|
||||||
|
|
||||||
|
|
||||||
def upload(req_ses, file, cid, pw):
|
def upload(file, cid, pw):
|
||||||
# type: (requests.Session, File, str, any) -> None
|
# type: (File, str, str) -> None
|
||||||
"""upload one specific chunk, `cid` (a chunk-hash)"""
|
"""upload one specific chunk, `cid` (a chunk-hash)"""
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
@@ -548,35 +569,22 @@ def upload(req_ses, file, cid, pw):
|
|||||||
f.f.close()
|
f.f.close()
|
||||||
|
|
||||||
|
|
||||||
class Daemon(threading.Thread):
|
|
||||||
def __init__(self, *a, **ka):
|
|
||||||
threading.Thread.__init__(self, *a, **ka)
|
|
||||||
self.daemon = True
|
|
||||||
|
|
||||||
|
|
||||||
class Ctl(object):
|
class Ctl(object):
|
||||||
"""
|
"""
|
||||||
this will be the coordinator which runs everything in parallel
|
the coordinator which runs everything in parallel
|
||||||
(hashing, handshakes, uploads) but right now it's p dumb
|
(hashing, handshakes, uploads)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, ar):
|
def __init__(self, ar):
|
||||||
self.ar = ar
|
|
||||||
ar.files = [
|
|
||||||
os.path.abspath(os.path.realpath(x.encode("utf-8")))
|
|
||||||
+ (x[-1:] if x[-1:] == os.sep else "").encode("utf-8")
|
|
||||||
for x in ar.files
|
|
||||||
]
|
|
||||||
ar.url = ar.url.rstrip("/") + "/"
|
|
||||||
if "://" not in ar.url:
|
|
||||||
ar.url = "http://" + ar.url
|
|
||||||
|
|
||||||
eprint("\nscanning {0} locations\n".format(len(ar.files)))
|
eprint("\nscanning {0} locations\n".format(len(ar.files)))
|
||||||
|
self.ar = ar
|
||||||
nfiles = 0
|
nfiles = 0
|
||||||
nbytes = 0
|
nbytes = 0
|
||||||
err = []
|
err = []
|
||||||
for _, _, inf in walkdirs(err, ar.files):
|
for _, _, inf in walkdirs(err, ar.files):
|
||||||
|
if stat.S_ISDIR(inf.st_mode):
|
||||||
|
continue
|
||||||
|
|
||||||
nfiles += 1
|
nfiles += 1
|
||||||
nbytes += inf.st_size
|
nbytes += inf.st_size
|
||||||
|
|
||||||
@@ -629,8 +637,8 @@ class Ctl(object):
|
|||||||
|
|
||||||
self.mutex = threading.Lock()
|
self.mutex = threading.Lock()
|
||||||
self.q_handshake = Queue() # type: Queue[File]
|
self.q_handshake = Queue() # type: Queue[File]
|
||||||
self.q_recheck = Queue() # type: Queue[File] # partial upload exists [...]
|
|
||||||
self.q_upload = Queue() # type: Queue[tuple[File, str]]
|
self.q_upload = Queue() # type: Queue[tuple[File, str]]
|
||||||
|
self.recheck = [] # type: list[File]
|
||||||
|
|
||||||
self.st_hash = [None, "(idle, starting...)"] # type: tuple[File, int]
|
self.st_hash = [None, "(idle, starting...)"] # type: tuple[File, int]
|
||||||
self.st_up = [None, "(idle, starting...)"] # type: tuple[File, int]
|
self.st_up = [None, "(idle, starting...)"] # type: tuple[File, int]
|
||||||
@@ -643,6 +651,9 @@ class Ctl(object):
|
|||||||
"""minimal basic slow boring fallback codepath"""
|
"""minimal basic slow boring fallback codepath"""
|
||||||
search = self.ar.s
|
search = self.ar.s
|
||||||
for nf, (top, rel, inf) in enumerate(self.filegen):
|
for nf, (top, rel, inf) in enumerate(self.filegen):
|
||||||
|
if stat.S_ISDIR(inf.st_mode) or not rel:
|
||||||
|
continue
|
||||||
|
|
||||||
file = File(top, rel, inf.st_size, inf.st_mtime)
|
file = File(top, rel, inf.st_size, inf.st_mtime)
|
||||||
upath = file.abs.decode("utf-8", "replace")
|
upath = file.abs.decode("utf-8", "replace")
|
||||||
|
|
||||||
@@ -652,7 +663,7 @@ class Ctl(object):
|
|||||||
burl = self.ar.url[:12] + self.ar.url[8:].split("/")[0] + "/"
|
burl = self.ar.url[:12] + self.ar.url[8:].split("/")[0] + "/"
|
||||||
while True:
|
while True:
|
||||||
print(" hs...")
|
print(" hs...")
|
||||||
hs, _ = handshake(req_ses, self.ar.url, file, self.ar.a, search)
|
hs, _ = handshake(self.ar, file, search)
|
||||||
if search:
|
if search:
|
||||||
if hs:
|
if hs:
|
||||||
for hit in hs:
|
for hit in hs:
|
||||||
@@ -669,19 +680,28 @@ class Ctl(object):
|
|||||||
ncs = len(hs)
|
ncs = len(hs)
|
||||||
for nc, cid in enumerate(hs):
|
for nc, cid in enumerate(hs):
|
||||||
print(" {0} up {1}".format(ncs - nc, cid))
|
print(" {0} up {1}".format(ncs - nc, cid))
|
||||||
upload(req_ses, file, cid, self.ar.a)
|
upload(file, cid, self.ar.a)
|
||||||
|
|
||||||
print(" ok!")
|
print(" ok!")
|
||||||
|
if file.recheck:
|
||||||
|
self.recheck.append(file)
|
||||||
|
|
||||||
|
if not self.recheck:
|
||||||
|
return
|
||||||
|
|
||||||
|
eprint("finalizing {0} duplicate files".format(len(self.recheck)))
|
||||||
|
for file in self.recheck:
|
||||||
|
handshake(self.ar, file, search)
|
||||||
|
|
||||||
def _fancy(self):
|
def _fancy(self):
|
||||||
if VT100:
|
if VT100:
|
||||||
atexit.register(self.cleanup_vt100)
|
atexit.register(self.cleanup_vt100)
|
||||||
ss.scroll_region(3)
|
ss.scroll_region(3)
|
||||||
|
|
||||||
Daemon(target=self.hasher).start()
|
Daemon(self.hasher)
|
||||||
for _ in range(self.ar.j):
|
for _ in range(self.ar.j):
|
||||||
Daemon(target=self.handshaker).start()
|
Daemon(self.handshaker)
|
||||||
Daemon(target=self.uploader).start()
|
Daemon(self.uploader)
|
||||||
|
|
||||||
idles = 0
|
idles = 0
|
||||||
while idles < 3:
|
while idles < 3:
|
||||||
@@ -743,6 +763,13 @@ class Ctl(object):
|
|||||||
t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft)
|
t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft)
|
||||||
eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail))
|
eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail))
|
||||||
|
|
||||||
|
if not self.recheck:
|
||||||
|
return
|
||||||
|
|
||||||
|
eprint("finalizing {0} duplicate files".format(len(self.recheck)))
|
||||||
|
for file in self.recheck:
|
||||||
|
handshake(self.ar, file, False)
|
||||||
|
|
||||||
def cleanup_vt100(self):
|
def cleanup_vt100(self):
|
||||||
ss.scroll_region(None)
|
ss.scroll_region(None)
|
||||||
eprint("\033[J\033]0;\033\\")
|
eprint("\033[J\033]0;\033\\")
|
||||||
@@ -754,8 +781,10 @@ class Ctl(object):
|
|||||||
prd = None
|
prd = None
|
||||||
ls = {}
|
ls = {}
|
||||||
for top, rel, inf in self.filegen:
|
for top, rel, inf in self.filegen:
|
||||||
if self.ar.z:
|
isdir = stat.S_ISDIR(inf.st_mode)
|
||||||
rd = os.path.dirname(rel)
|
if self.ar.z or self.ar.drd:
|
||||||
|
rd = rel if isdir else os.path.dirname(rel)
|
||||||
|
srd = rd.decode("utf-8", "replace").replace("\\", "/")
|
||||||
if prd != rd:
|
if prd != rd:
|
||||||
prd = rd
|
prd = rd
|
||||||
headers = {}
|
headers = {}
|
||||||
@@ -764,19 +793,34 @@ class Ctl(object):
|
|||||||
|
|
||||||
ls = {}
|
ls = {}
|
||||||
try:
|
try:
|
||||||
print(" ls ~{0}".format(rd.decode("utf-8", "replace")))
|
print(" ls ~{0}".format(srd))
|
||||||
r = req_ses.get(
|
zb = self.ar.url.encode("utf-8")
|
||||||
self.ar.url.encode("utf-8") + quotep(rd) + b"?ls",
|
zb += quotep(rd.replace(b"\\", b"/"))
|
||||||
headers=headers,
|
r = req_ses.get(zb + b"?ls&dots", headers=headers)
|
||||||
)
|
j = r.json()
|
||||||
for f in r.json()["files"]:
|
for f in j["dirs"] + j["files"]:
|
||||||
rfn = f["href"].split("?")[0].encode("utf-8", "replace")
|
rfn = f["href"].split("?")[0].rstrip("/")
|
||||||
ls[unquote(rfn)] = f
|
ls[unquote(rfn.encode("utf-8", "replace"))] = f
|
||||||
except:
|
except Exception as ex:
|
||||||
print(" mkdir ~{0}".format(rd.decode("utf-8", "replace")))
|
print(" mkdir ~{0} ({1})".format(srd, ex))
|
||||||
|
|
||||||
|
if self.ar.drd:
|
||||||
|
dp = os.path.join(top, rd)
|
||||||
|
lnodes = set(os.listdir(dp))
|
||||||
|
bnames = [x for x in ls if x not in lnodes]
|
||||||
|
if bnames:
|
||||||
|
vpath = self.ar.url.split("://")[-1].split("/", 1)[-1]
|
||||||
|
names = [x.decode("utf-8", "replace") for x in bnames]
|
||||||
|
locs = [vpath + srd + "/" + x for x in names]
|
||||||
|
print("DELETING ~{0}/#{1}".format(srd, len(names)))
|
||||||
|
req_ses.post(self.ar.url + "?delete", json=locs)
|
||||||
|
|
||||||
|
if isdir:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.ar.z:
|
||||||
rf = ls.get(os.path.basename(rel), None)
|
rf = ls.get(os.path.basename(rel), None)
|
||||||
if rf and rf["sz"] == inf.st_size and abs(rf["ts"] - inf.st_mtime) <= 1:
|
if rf and rf["sz"] == inf.st_size and abs(rf["ts"] - inf.st_mtime) <= 2:
|
||||||
self.nfiles -= 1
|
self.nfiles -= 1
|
||||||
self.nbytes -= inf.st_size
|
self.nbytes -= inf.st_size
|
||||||
continue
|
continue
|
||||||
@@ -785,16 +829,18 @@ class Ctl(object):
|
|||||||
while True:
|
while True:
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
if (
|
if (
|
||||||
self.hash_b - self.up_b < 1024 * 1024 * 128
|
self.hash_f - self.up_f == 1
|
||||||
and self.hash_c - self.up_c < 64
|
or (
|
||||||
and (
|
self.hash_b - self.up_b < 1024 * 1024 * 1024
|
||||||
|
and self.hash_c - self.up_c < 512
|
||||||
|
)
|
||||||
|
) and (
|
||||||
not self.ar.nh
|
not self.ar.nh
|
||||||
or (
|
or (
|
||||||
self.q_upload.empty()
|
self.q_upload.empty()
|
||||||
and self.q_handshake.empty()
|
and self.q_handshake.empty()
|
||||||
and not self.uploader_busy
|
and not self.uploader_busy
|
||||||
)
|
)
|
||||||
)
|
|
||||||
):
|
):
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -813,16 +859,10 @@ class Ctl(object):
|
|||||||
|
|
||||||
def handshaker(self):
|
def handshaker(self):
|
||||||
search = self.ar.s
|
search = self.ar.s
|
||||||
q = self.q_handshake
|
|
||||||
burl = self.ar.url[:8] + self.ar.url[8:].split("/")[0] + "/"
|
burl = self.ar.url[:8] + self.ar.url[8:].split("/")[0] + "/"
|
||||||
while True:
|
while True:
|
||||||
file = q.get()
|
file = self.q_handshake.get()
|
||||||
if not file:
|
if not file:
|
||||||
if q == self.q_handshake:
|
|
||||||
q = self.q_recheck
|
|
||||||
q.put(None)
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.q_upload.put(None)
|
self.q_upload.put(None)
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -830,16 +870,7 @@ class Ctl(object):
|
|||||||
self.handshaker_busy += 1
|
self.handshaker_busy += 1
|
||||||
|
|
||||||
upath = file.abs.decode("utf-8", "replace")
|
upath = file.abs.decode("utf-8", "replace")
|
||||||
|
hs, sprs = handshake(self.ar, file, search)
|
||||||
try:
|
|
||||||
hs, sprs = handshake(req_ses, self.ar.url, file, self.ar.a, search)
|
|
||||||
except Exception as ex:
|
|
||||||
if q == self.q_handshake and "<pre>partial upload exists" in str(ex):
|
|
||||||
self.q_recheck.put(file)
|
|
||||||
hs = []
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
if search:
|
if search:
|
||||||
if hs:
|
if hs:
|
||||||
for hit in hs:
|
for hit in hs:
|
||||||
@@ -856,8 +887,11 @@ class Ctl(object):
|
|||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if file.recheck:
|
||||||
|
self.recheck.append(file)
|
||||||
|
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
if not sprs and not self.serialized:
|
if hs and not sprs and not self.serialized:
|
||||||
t = "server filesystem does not support sparse files; serializing uploads\n"
|
t = "server filesystem does not support sparse files; serializing uploads\n"
|
||||||
eprint(t)
|
eprint(t)
|
||||||
self.serialized = True
|
self.serialized = True
|
||||||
@@ -869,6 +903,9 @@ class Ctl(object):
|
|||||||
self.up_c += len(file.cids) - file.up_c
|
self.up_c += len(file.cids) - file.up_c
|
||||||
self.up_b += file.size - file.up_b
|
self.up_b += file.size - file.up_b
|
||||||
|
|
||||||
|
if not file.recheck:
|
||||||
|
self.up_done(file)
|
||||||
|
|
||||||
if hs and file.up_c:
|
if hs and file.up_c:
|
||||||
# some chunks failed
|
# some chunks failed
|
||||||
self.up_c -= len(hs)
|
self.up_c -= len(hs)
|
||||||
@@ -900,10 +937,10 @@ class Ctl(object):
|
|||||||
|
|
||||||
file, cid = task
|
file, cid = task
|
||||||
try:
|
try:
|
||||||
upload(req_ses, file, cid, self.ar.a)
|
upload(file, cid, self.ar.a)
|
||||||
except:
|
except:
|
||||||
eprint("upload failed, retrying: {0} #{1}\n".format(file.name, cid[:8]))
|
eprint("upload failed, retrying: {0} #{1}\n".format(file.name, cid[:8]))
|
||||||
pass # handshake will fix it
|
# handshake will fix it
|
||||||
|
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
sz = file.kchunks[cid][1]
|
sz = file.kchunks[cid][1]
|
||||||
@@ -919,6 +956,10 @@ class Ctl(object):
|
|||||||
self.up_c += 1
|
self.up_c += 1
|
||||||
self.uploader_busy -= 1
|
self.uploader_busy -= 1
|
||||||
|
|
||||||
|
def up_done(self, file):
|
||||||
|
if self.ar.dl:
|
||||||
|
os.unlink(file.abs)
|
||||||
|
|
||||||
|
|
||||||
class APF(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
|
class APF(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
|
||||||
pass
|
pass
|
||||||
@@ -946,18 +987,64 @@ source file/folder selection uses rsync syntax, meaning that:
|
|||||||
ap.add_argument("-a", metavar="PASSWORD", help="password")
|
ap.add_argument("-a", metavar="PASSWORD", help="password")
|
||||||
ap.add_argument("-s", action="store_true", help="file-search (disables upload)")
|
ap.add_argument("-s", action="store_true", help="file-search (disables upload)")
|
||||||
ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible")
|
ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible")
|
||||||
|
|
||||||
|
ap = app.add_argument_group("compatibility")
|
||||||
|
ap.add_argument("--cls", action="store_true", help="clear screen before start")
|
||||||
|
ap.add_argument("--ws", action="store_true", help="copyparty is running on windows; wait before deleting files after uploading")
|
||||||
|
|
||||||
|
ap = app.add_argument_group("folder sync")
|
||||||
|
ap.add_argument("--dl", action="store_true", help="delete local files after uploading")
|
||||||
|
ap.add_argument("--dr", action="store_true", help="delete remote files which don't exist locally")
|
||||||
|
ap.add_argument("--drd", action="store_true", help="delete remote files during upload instead of afterwards; reduces peak disk space usage, but will reupload instead of detecting renames")
|
||||||
|
|
||||||
ap = app.add_argument_group("performance tweaks")
|
ap = app.add_argument_group("performance tweaks")
|
||||||
ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections")
|
ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections")
|
||||||
ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
|
ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
|
||||||
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
|
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
|
||||||
ap.add_argument("--safe", action="store_true", help="use simple fallback approach")
|
ap.add_argument("--safe", action="store_true", help="use simple fallback approach")
|
||||||
ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)")
|
ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)")
|
||||||
|
|
||||||
ap = app.add_argument_group("tls")
|
ap = app.add_argument_group("tls")
|
||||||
ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify")
|
ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify")
|
||||||
ap.add_argument("-td", action="store_true", help="disable certificate check")
|
ap.add_argument("-td", action="store_true", help="disable certificate check")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
Ctl(app.parse_args())
|
ar = app.parse_args()
|
||||||
|
if ar.drd:
|
||||||
|
ar.dr = True
|
||||||
|
|
||||||
|
for k in "dl dr drd".split():
|
||||||
|
errs = []
|
||||||
|
if ar.safe and getattr(ar, k):
|
||||||
|
errs.append(k)
|
||||||
|
|
||||||
|
if errs:
|
||||||
|
raise Exception("--safe is incompatible with " + str(errs))
|
||||||
|
|
||||||
|
ar.files = [
|
||||||
|
os.path.abspath(os.path.realpath(x.encode("utf-8")))
|
||||||
|
+ (x[-1:] if x[-1:] == os.sep else "").encode("utf-8")
|
||||||
|
for x in ar.files
|
||||||
|
]
|
||||||
|
|
||||||
|
ar.url = ar.url.rstrip("/") + "/"
|
||||||
|
if "://" not in ar.url:
|
||||||
|
ar.url = "http://" + ar.url
|
||||||
|
|
||||||
|
if ar.cls:
|
||||||
|
print("\x1b\x5b\x48\x1b\x5b\x32\x4a\x1b\x5b\x33\x4a", end="")
|
||||||
|
|
||||||
|
ctl = Ctl(ar)
|
||||||
|
|
||||||
|
if ar.dr and not ar.drd:
|
||||||
|
# run another pass for the deletes
|
||||||
|
if getattr(ctl, "up_br") and ar.ws:
|
||||||
|
# wait for up2k to mtime if there was uploads
|
||||||
|
time.sleep(4)
|
||||||
|
|
||||||
|
ar.drd = True
|
||||||
|
ar.z = True
|
||||||
|
Ctl(ar)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -27,7 +27,13 @@ however if your copyparty is behind a reverse-proxy, you may want to use [`share
|
|||||||
|
|
||||||
### [`explorer-nothumbs-nofoldertypes.reg`](explorer-nothumbs-nofoldertypes.reg)
|
### [`explorer-nothumbs-nofoldertypes.reg`](explorer-nothumbs-nofoldertypes.reg)
|
||||||
* disables thumbnails and folder-type detection in windows explorer
|
* disables thumbnails and folder-type detection in windows explorer
|
||||||
* makes it way faster (especially for slow/networked locations (such as copyparty-fuse))
|
* makes it way faster (especially for slow/networked locations (such as partyfuse))
|
||||||
|
|
||||||
|
### [`webdav-basicauth.reg`](webdav-basicauth.reg)
|
||||||
|
* enables webdav basic-auth over plaintext http; takes effect after a reboot OR after running `webdav-unlimit.bat`
|
||||||
|
|
||||||
|
### [`webdav-unlimit.bat`](webdav-unlimit.bat)
|
||||||
|
* removes the 47.6 MiB filesize limit when downloading from webdav
|
||||||
|
|
||||||
### [`cfssl.sh`](cfssl.sh)
|
### [`cfssl.sh`](cfssl.sh)
|
||||||
* creates CA and server certificates using cfssl
|
* creates CA and server certificates using cfssl
|
||||||
|
|||||||
15
contrib/apache/copyparty.conf
Normal file
15
contrib/apache/copyparty.conf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# when running copyparty behind a reverse proxy,
|
||||||
|
# the following arguments are recommended:
|
||||||
|
#
|
||||||
|
# --http-only lower latency on initial connection
|
||||||
|
# -i 127.0.0.1 only accept connections from nginx
|
||||||
|
#
|
||||||
|
# if you are doing location-based proxying (such as `/stuff` below)
|
||||||
|
# you must run copyparty with --rp-loc=stuff
|
||||||
|
#
|
||||||
|
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
|
||||||
|
|
||||||
|
LoadModule proxy_module modules/mod_proxy.so
|
||||||
|
ProxyPass "/stuff" "http://127.0.0.1:3923/stuff"
|
||||||
|
# do not specify ProxyPassReverse
|
||||||
|
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||||
@@ -1,15 +1,17 @@
|
|||||||
# when running copyparty behind a reverse proxy,
|
# when running copyparty behind a reverse proxy,
|
||||||
# the following arguments are recommended:
|
# the following arguments are recommended:
|
||||||
#
|
#
|
||||||
# -nc 512 important, see next paragraph
|
|
||||||
# --http-only lower latency on initial connection
|
# --http-only lower latency on initial connection
|
||||||
# -i 127.0.0.1 only accept connections from nginx
|
# -i 127.0.0.1 only accept connections from nginx
|
||||||
#
|
#
|
||||||
# -nc must match or exceed the webserver's max number of concurrent clients;
|
# -nc must match or exceed the webserver's max number of concurrent clients;
|
||||||
|
# copyparty default is 1024 if OS permits it (see "max clients:" on startup),
|
||||||
# nginx default is 512 (worker_processes 1, worker_connections 512)
|
# nginx default is 512 (worker_processes 1, worker_connections 512)
|
||||||
#
|
#
|
||||||
# you may also consider adding -j0 for CPU-intensive configurations
|
# you may also consider adding -j0 for CPU-intensive configurations
|
||||||
# (not that i can really think of any good examples)
|
# (not that i can really think of any good examples)
|
||||||
|
#
|
||||||
|
# on fedora/rhel, remember to setsebool -P httpd_can_network_connect 1
|
||||||
|
|
||||||
upstream cpp {
|
upstream cpp {
|
||||||
server 127.0.0.1:3923;
|
server 127.0.0.1:3923;
|
||||||
|
|||||||
51
contrib/webdav-cfg.bat
Normal file
51
contrib/webdav-cfg.bat
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
@echo off
|
||||||
|
rem removes the 47.6 MiB filesize limit when downloading from webdav
|
||||||
|
rem + optionally allows/enables password-auth over plaintext http
|
||||||
|
rem + optionally helps disable wpad
|
||||||
|
|
||||||
|
setlocal enabledelayedexpansion
|
||||||
|
|
||||||
|
net session >nul 2>&1
|
||||||
|
if %errorlevel% neq 0 (
|
||||||
|
echo sorry, you must run this as administrator
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
|
|
||||||
|
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v FileSizeLimitInBytes /t REG_DWORD /d 0xffffffff /f
|
||||||
|
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters /v FsCtlRequestTimeoutInSec /t REG_DWORD /d 0xffffffff /f
|
||||||
|
|
||||||
|
echo(
|
||||||
|
echo OK;
|
||||||
|
echo allow webdav basic-auth over plaintext http?
|
||||||
|
echo Y: login works, but the password will be visible in wireshark etc
|
||||||
|
echo N: login will NOT work unless you use https and valid certificates
|
||||||
|
set c=.
|
||||||
|
set /p "c=(Y/N): "
|
||||||
|
echo(
|
||||||
|
if /i not "!c!"=="y" goto :g1
|
||||||
|
reg add HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\services\WebClient\Parameters /v BasicAuthLevel /t REG_DWORD /d 0x2 /f
|
||||||
|
rem default is 1 (require tls)
|
||||||
|
|
||||||
|
:g1
|
||||||
|
echo(
|
||||||
|
echo OK;
|
||||||
|
echo do you want to disable wpad?
|
||||||
|
echo can give a HUGE speed boost depending on network settings
|
||||||
|
set c=.
|
||||||
|
set /p "c=(Y/N): "
|
||||||
|
echo(
|
||||||
|
if /i not "!c!"=="y" goto :g2
|
||||||
|
echo(
|
||||||
|
echo i'm about to open the [Connections] tab in [Internet Properties] for you;
|
||||||
|
echo please click [LAN settings] and disable [Automatically detect settings]
|
||||||
|
echo(
|
||||||
|
pause
|
||||||
|
control inetcpl.cpl,,4
|
||||||
|
|
||||||
|
:g2
|
||||||
|
net stop webclient
|
||||||
|
net start webclient
|
||||||
|
echo(
|
||||||
|
echo OK; all done
|
||||||
|
pause
|
||||||
@@ -7,16 +7,19 @@ import sys
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING
|
||||||
except:
|
except:
|
||||||
TYPE_CHECKING = False
|
TYPE_CHECKING = False
|
||||||
|
|
||||||
|
if True:
|
||||||
|
from typing import Any, Callable
|
||||||
|
|
||||||
PY2 = sys.version_info < (3,)
|
PY2 = sys.version_info < (3,)
|
||||||
if PY2:
|
if not PY2:
|
||||||
|
unicode: Callable[[Any], str] = str
|
||||||
|
else:
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
unicode = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
|
unicode = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
|
||||||
else:
|
|
||||||
unicode = str
|
|
||||||
|
|
||||||
WINDOWS: Any = (
|
WINDOWS: Any = (
|
||||||
[int(x) for x in platform.version().split(".")]
|
[int(x) for x in platform.version().split(".")]
|
||||||
@@ -40,8 +43,8 @@ except:
|
|||||||
class EnvParams(object):
|
class EnvParams(object):
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.t0 = time.time()
|
self.t0 = time.time()
|
||||||
self.mod = None
|
self.mod = ""
|
||||||
self.cfg = None
|
self.cfg = ""
|
||||||
self.ox = getattr(sys, "oxidized", None)
|
self.ox = getattr(sys, "oxidized", None)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,26 +9,30 @@ __license__ = "MIT"
|
|||||||
__url__ = "https://github.com/9001/copyparty/"
|
__url__ = "https://github.com/9001/copyparty/"
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import base64
|
||||||
import filecmp
|
import filecmp
|
||||||
import locale
|
import locale
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import uuid
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
from .__init__ import ANYWIN, CORES, PY2, VT100, WINDOWS, E, EnvParams, unicode
|
from .__init__ import ANYWIN, CORES, PY2, VT100, WINDOWS, E, EnvParams, unicode
|
||||||
from .__version__ import CODENAME, S_BUILD_DT, S_VERSION
|
from .__version__ import CODENAME, S_BUILD_DT, S_VERSION
|
||||||
from .authsrv import re_vol
|
from .authsrv import expand_config_file, re_vol
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
from .util import (
|
from .util import (
|
||||||
IMPLICATIONS,
|
IMPLICATIONS,
|
||||||
JINJA_VER,
|
JINJA_VER,
|
||||||
PYFTPD_VER,
|
PYFTPD_VER,
|
||||||
SQLITE_VER,
|
SQLITE_VER,
|
||||||
|
UNPLICATIONS,
|
||||||
align_tab,
|
align_tab,
|
||||||
ansi_re,
|
ansi_re,
|
||||||
min_ex,
|
min_ex,
|
||||||
@@ -37,13 +41,11 @@ from .util import (
|
|||||||
wrap,
|
wrap,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
|
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
HAVE_SSL = True
|
HAVE_SSL = True
|
||||||
@@ -52,6 +54,7 @@ except:
|
|||||||
HAVE_SSL = False
|
HAVE_SSL = False
|
||||||
|
|
||||||
printed: list[str] = []
|
printed: list[str] = []
|
||||||
|
u = unicode
|
||||||
|
|
||||||
|
|
||||||
class RiceFormatter(argparse.HelpFormatter):
|
class RiceFormatter(argparse.HelpFormatter):
|
||||||
@@ -76,7 +79,11 @@ class RiceFormatter(argparse.HelpFormatter):
|
|||||||
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
|
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
|
||||||
if action.option_strings or action.nargs in defaulting_nargs:
|
if action.option_strings or action.nargs in defaulting_nargs:
|
||||||
ret += fmt
|
ret += fmt
|
||||||
return ret
|
|
||||||
|
if not VT100:
|
||||||
|
ret = re.sub("\033\\[[0-9;]+m", "", ret)
|
||||||
|
|
||||||
|
return ret # type: ignore
|
||||||
|
|
||||||
def _fill_text(self, text: str, width: int, indent: str) -> str:
|
def _fill_text(self, text: str, width: int, indent: str) -> str:
|
||||||
"""same as RawDescriptionHelpFormatter(HelpFormatter)"""
|
"""same as RawDescriptionHelpFormatter(HelpFormatter)"""
|
||||||
@@ -99,7 +106,7 @@ class RiceFormatter(argparse.HelpFormatter):
|
|||||||
self.__add_whitespace(i, lWSpace, x)
|
self.__add_whitespace(i, lWSpace, x)
|
||||||
for i, x in enumerate(wrap(line, width, width - 1))
|
for i, x in enumerate(wrap(line, width, width - 1))
|
||||||
]
|
]
|
||||||
textRows[idx] = lines
|
textRows[idx] = lines # type: ignore
|
||||||
|
|
||||||
return [item for sublist in textRows for item in sublist]
|
return [item for sublist in textRows for item in sublist]
|
||||||
|
|
||||||
@@ -136,7 +143,7 @@ def init_E(E: EnvParams) -> None:
|
|||||||
# __init__ runs 18 times when oxidized; do expensive stuff here
|
# __init__ runs 18 times when oxidized; do expensive stuff here
|
||||||
|
|
||||||
def get_unixdir() -> str:
|
def get_unixdir() -> str:
|
||||||
paths: list[tuple[Callable[..., str], str]] = [
|
paths: list[tuple[Callable[..., Any], str]] = [
|
||||||
(os.environ.get, "XDG_CONFIG_HOME"),
|
(os.environ.get, "XDG_CONFIG_HOME"),
|
||||||
(os.path.expanduser, "~/.config"),
|
(os.path.expanduser, "~/.config"),
|
||||||
(os.environ.get, "TMPDIR"),
|
(os.environ.get, "TMPDIR"),
|
||||||
@@ -158,7 +165,7 @@ def init_E(E: EnvParams) -> None:
|
|||||||
if not os.path.isdir(p):
|
if not os.path.isdir(p):
|
||||||
os.mkdir(p)
|
os.mkdir(p)
|
||||||
|
|
||||||
return p
|
return p # type: ignore
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -191,7 +198,8 @@ def init_E(E: EnvParams) -> None:
|
|||||||
E.mod = _unpack()
|
E.mod = _unpack()
|
||||||
|
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
E.cfg = os.path.normpath(os.environ["APPDATA"] + "/copyparty")
|
bdir = os.environ.get("APPDATA") or os.environ.get("TEMP")
|
||||||
|
E.cfg = os.path.normpath(bdir + "/copyparty")
|
||||||
elif sys.platform == "darwin":
|
elif sys.platform == "darwin":
|
||||||
E.cfg = os.path.expanduser("~/Library/Preferences/copyparty")
|
E.cfg = os.path.expanduser("~/Library/Preferences/copyparty")
|
||||||
else:
|
else:
|
||||||
@@ -205,19 +213,49 @@ def init_E(E: EnvParams) -> None:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_srvname() -> str:
|
||||||
|
try:
|
||||||
|
ret: str = unicode(socket.gethostname()).split(".")[0]
|
||||||
|
except:
|
||||||
|
ret = ""
|
||||||
|
|
||||||
|
if ret not in ["", "localhost"]:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
fp = os.path.join(E.cfg, "name.txt")
|
||||||
|
lprint("using hostname from {}\n".format(fp))
|
||||||
|
try:
|
||||||
|
with open(fp, "rb") as f:
|
||||||
|
ret = f.read().decode("utf-8", "replace").strip()
|
||||||
|
except:
|
||||||
|
ret = ""
|
||||||
|
while len(ret) < 7:
|
||||||
|
ret += base64.b32encode(os.urandom(4))[:7].decode("utf-8").lower()
|
||||||
|
ret = re.sub("[234567=]", "", ret)[:7]
|
||||||
|
with open(fp, "wb") as f:
|
||||||
|
f.write(ret.encode("utf-8") + b"\n")
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def ensure_locale() -> None:
|
def ensure_locale() -> None:
|
||||||
|
safe = "en_US.UTF-8"
|
||||||
for x in [
|
for x in [
|
||||||
"en_US.UTF-8",
|
safe,
|
||||||
"English_United States.UTF8",
|
"English_United States.UTF8",
|
||||||
"English_United States.1252",
|
"English_United States.1252",
|
||||||
]:
|
]:
|
||||||
try:
|
try:
|
||||||
locale.setlocale(locale.LC_ALL, x)
|
locale.setlocale(locale.LC_ALL, x)
|
||||||
|
if x != safe:
|
||||||
lprint("Locale: {}\n".format(x))
|
lprint("Locale: {}\n".format(x))
|
||||||
break
|
return
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
t = "setlocale {} failed,\n sorting and dates will be funky"
|
||||||
|
warn(t.format(safe))
|
||||||
|
|
||||||
|
|
||||||
def ensure_cert() -> None:
|
def ensure_cert() -> None:
|
||||||
"""
|
"""
|
||||||
@@ -313,10 +351,12 @@ def configure_ssl_ciphers(al: argparse.Namespace) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def args_from_cfg(cfg_path: str) -> list[str]:
|
def args_from_cfg(cfg_path: str) -> list[str]:
|
||||||
|
lines: list[str] = []
|
||||||
|
expand_config_file(lines, cfg_path, "")
|
||||||
|
|
||||||
ret: list[str] = []
|
ret: list[str] = []
|
||||||
skip = False
|
skip = False
|
||||||
with open(cfg_path, "rb") as f:
|
for ln in lines:
|
||||||
for ln in [x.decode("utf-8").strip() for x in f]:
|
|
||||||
if not ln:
|
if not ln:
|
||||||
skip = False
|
skip = False
|
||||||
continue
|
continue
|
||||||
@@ -407,21 +447,8 @@ def showlic() -> None:
|
|||||||
print(f.read().decode("utf-8", "replace"))
|
print(f.read().decode("utf-8", "replace"))
|
||||||
|
|
||||||
|
|
||||||
def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Namespace:
|
def get_sects():
|
||||||
ap = argparse.ArgumentParser(
|
return [
|
||||||
formatter_class=formatter,
|
|
||||||
prog="copyparty",
|
|
||||||
description="http file sharing hub v{} ({})".format(S_VERSION, S_BUILD_DT),
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
fk_salt = unicode(os.path.getmtime(os.path.join(E.cfg, "cert.pem")))
|
|
||||||
except:
|
|
||||||
fk_salt = "hunter2"
|
|
||||||
|
|
||||||
hcores = min(CORES, 3) # 4% faster than 4+ on py3.9 @ r5-4500U
|
|
||||||
|
|
||||||
sects = [
|
|
||||||
[
|
[
|
||||||
"accounts",
|
"accounts",
|
||||||
"accounts and volumes",
|
"accounts and volumes",
|
||||||
@@ -438,6 +465,7 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
"m" (move): move files and folders; need "w" at destination
|
"m" (move): move files and folders; need "w" at destination
|
||||||
"d" (delete): permanently delete files and folders
|
"d" (delete): permanently delete files and folders
|
||||||
"g" (get): download files, but cannot see folder contents
|
"g" (get): download files, but cannot see folder contents
|
||||||
|
"G" (upget): "get", but can see filekeys of their own uploads
|
||||||
|
|
||||||
too many volflags to list here, see the other sections
|
too many volflags to list here, see the other sections
|
||||||
|
|
||||||
@@ -499,6 +527,8 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
\033[36mnohash=\\.iso$\033[35m skips hashing file contents if path matches *.iso
|
\033[36mnohash=\\.iso$\033[35m skips hashing file contents if path matches *.iso
|
||||||
\033[36mnoidx=\\.iso$\033[35m fully ignores the contents at paths matching *.iso
|
\033[36mnoidx=\\.iso$\033[35m fully ignores the contents at paths matching *.iso
|
||||||
\033[36mnoforget$\033[35m don't forget files when deleted from disk
|
\033[36mnoforget$\033[35m don't forget files when deleted from disk
|
||||||
|
\033[36mdbd=[acid|swal|wal|yolo]\033[35m database speed-durability tradeoff
|
||||||
|
\033[36mxlink$\033[35m cross-volume dupe detection / linking
|
||||||
\033[36mxdev\033[35m do not descend into other filesystems
|
\033[36mxdev\033[35m do not descend into other filesystems
|
||||||
\033[36mxvol\033[35m skip symlinks leaving the volume root
|
\033[36mxvol\033[35m skip symlinks leaving the volume root
|
||||||
|
|
||||||
@@ -558,79 +588,192 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
|
[
|
||||||
|
"dbd",
|
||||||
|
"database durability profiles",
|
||||||
|
dedent(
|
||||||
|
"""
|
||||||
|
mainly affects uploads of many small files on slow HDDs; speeds measured uploading 520 files on a WD20SPZX (SMR 2.5" 5400rpm 4kb)
|
||||||
|
|
||||||
|
\033[32macid\033[0m = extremely safe but slow; the old default. Should never lose any data no matter what
|
||||||
|
|
||||||
|
\033[32mswal\033[0m = 2.4x faster uploads yet 99.9%% as safe -- theoretical chance of losing metadata for the ~200 most recently uploaded files if there's a power-loss or your OS crashes
|
||||||
|
|
||||||
|
\033[32mwal\033[0m = another 21x faster on HDDs yet 90%% as safe; same pitfall as \033[33mswal\033[0m except more likely
|
||||||
|
|
||||||
|
\033[32myolo\033[0m = another 1.5x faster, and removes the occasional sudden upload-pause while the disk syncs, but now you're at risk of losing the entire database in a powerloss / OS-crash
|
||||||
|
|
||||||
|
profiles can be set globally (--dbd=yolo), or per-volume with volflags: -v ~/Music:music:r:c,dbd=acid
|
||||||
|
"""
|
||||||
|
),
|
||||||
|
],
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
u = unicode
|
|
||||||
|
|
||||||
|
def add_general(ap, nc, srvname):
|
||||||
ap2 = ap.add_argument_group('general options')
|
ap2 = ap.add_argument_group('general options')
|
||||||
ap2.add_argument("-c", metavar="PATH", type=u, action="append", help="add config file")
|
ap2.add_argument("-c", metavar="PATH", type=u, action="append", help="add config file")
|
||||||
ap2.add_argument("-nc", metavar="NUM", type=int, default=64, help="max num clients")
|
ap2.add_argument("-nc", metavar="NUM", type=int, default=nc, help="max num clients")
|
||||||
ap2.add_argument("-j", metavar="CORES", type=int, default=1, help="max num cpu cores, 0=all")
|
ap2.add_argument("-j", metavar="CORES", type=int, default=1, help="max num cpu cores, 0=all")
|
||||||
ap2.add_argument("-a", metavar="ACCT", type=u, action="append", help="add account, USER:PASS; example [ed:wark]")
|
ap2.add_argument("-a", metavar="ACCT", type=u, action="append", help="add account, \033[33mUSER\033[0m:\033[33mPASS\033[0m; example [\033[32med:wark\033[0m]")
|
||||||
ap2.add_argument("-v", metavar="VOL", type=u, action="append", help="add volume, SRC:DST:FLAG; examples [.::r], [/mnt/nas/music:/music:r:aed]")
|
ap2.add_argument("-v", metavar="VOL", type=u, action="append", help="add volume, \033[33mSRC\033[0m:\033[33mDST\033[0m:\033[33mFLAG\033[0m; examples [\033[32m.::r\033[0m], [\033[32m/mnt/nas/music:/music:r:aed\033[0m]")
|
||||||
ap2.add_argument("-ed", action="store_true", help="enable the ?dots url parameter / client option which allows clients to see dotfiles / hidden files")
|
ap2.add_argument("-ed", action="store_true", help="enable the ?dots url parameter / client option which allows clients to see dotfiles / hidden files")
|
||||||
ap2.add_argument("-emp", action="store_true", help="enable markdown plugins -- neat but dangerous, big XSS risk")
|
ap2.add_argument("-emp", action="store_true", help="enable markdown plugins -- neat but dangerous, big XSS risk")
|
||||||
ap2.add_argument("-mcr", metavar="SEC", type=int, default=60, help="md-editor mod-chk rate")
|
ap2.add_argument("-mcr", metavar="SEC", type=int, default=60, help="md-editor mod-chk rate")
|
||||||
ap2.add_argument("--urlform", metavar="MODE", type=u, default="print,get", help="how to handle url-form POSTs; see --help-urlform")
|
ap2.add_argument("--urlform", metavar="MODE", type=u, default="print,get", help="how to handle url-form POSTs; see --help-urlform")
|
||||||
ap2.add_argument("--wintitle", metavar="TXT", type=u, default="cpp @ $pub", help="window title, for example '$ip-10.1.2.' or '$ip-'")
|
ap2.add_argument("--wintitle", metavar="TXT", type=u, default="cpp @ $pub", help="window title, for example [\033[32m$ip-10.1.2.\033[0m] or [\033[32m$ip-]")
|
||||||
|
ap2.add_argument("--name", metavar="TXT", type=u, default=srvname, help="server name (displayed topleft in browser and in mDNS)")
|
||||||
ap2.add_argument("--license", action="store_true", help="show licenses and exit")
|
ap2.add_argument("--license", action="store_true", help="show licenses and exit")
|
||||||
ap2.add_argument("--version", action="store_true", help="show versions and exit")
|
ap2.add_argument("--version", action="store_true", help="show versions and exit")
|
||||||
|
|
||||||
|
|
||||||
|
def add_qr(ap, tty):
|
||||||
|
ap2 = ap.add_argument_group('qr options')
|
||||||
|
ap2.add_argument("--qr", action="store_true", help="show http:// QR-code on startup")
|
||||||
|
ap2.add_argument("--qrs", action="store_true", help="show https:// QR-code on startup")
|
||||||
|
ap2.add_argument("--qrl", metavar="PATH", type=u, default="", help="location to include in the url, for example [\033[32mpriv/?pw=hunter2\033[0m]")
|
||||||
|
ap2.add_argument("--qri", metavar="PREFIX", type=u, default="", help="select IP which starts with PREFIX; [\033[32m.\033[0m] to force default IP when mDNS URL would have been used instead")
|
||||||
|
ap2.add_argument("--qr-fg", metavar="COLOR", type=int, default=0 if tty else 16, help="foreground; try [\033[32m0\033[0m] if the qr-code is unreadable")
|
||||||
|
ap2.add_argument("--qr-bg", metavar="COLOR", type=int, default=229, help="background (white=255)")
|
||||||
|
ap2.add_argument("--qrp", metavar="CELLS", type=int, default=4, help="padding (spec says 4 or more, but 1 is usually fine)")
|
||||||
|
ap2.add_argument("--qrz", metavar="N", type=int, default=0, help="[\033[32m1\033[0m]=1x, [\033[32m2\033[0m]=2x, [\033[32m0\033[0m]=auto (try [\033[32m2\033[0m] on broken fonts)")
|
||||||
|
|
||||||
|
|
||||||
|
def add_upload(ap):
|
||||||
ap2 = ap.add_argument_group('upload options')
|
ap2 = ap.add_argument_group('upload options')
|
||||||
ap2.add_argument("--dotpart", action="store_true", help="dotfile incomplete uploads, hiding them from clients unless -ed")
|
ap2.add_argument("--dotpart", action="store_true", help="dotfile incomplete uploads, hiding them from clients unless -ed")
|
||||||
ap2.add_argument("--plain-ip", action="store_true", help="when avoiding filename collisions by appending the uploader's ip to the filename: append the plaintext ip instead of salting and hashing the ip")
|
ap2.add_argument("--plain-ip", action="store_true", help="when avoiding filename collisions by appending the uploader's ip to the filename: append the plaintext ip instead of salting and hashing the ip")
|
||||||
ap2.add_argument("--unpost", metavar="SEC", type=int, default=3600*12, help="grace period where uploads can be deleted by the uploader, even without delete permissions; 0=disabled")
|
ap2.add_argument("--unpost", metavar="SEC", type=int, default=3600*12, help="grace period where uploads can be deleted by the uploader, even without delete permissions; 0=disabled")
|
||||||
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600")
|
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600")
|
||||||
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload")
|
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (very slow on windows)")
|
||||||
ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even if copyparty thinks you're better off without -- probably useful on nfs and cow filesystems (zfs, btrfs)")
|
ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even when it might be dangerous (multiprocessing, filesystems lacking sparse-files support, ...)")
|
||||||
ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)")
|
ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)")
|
||||||
ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made")
|
ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made")
|
||||||
ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead")
|
ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead")
|
||||||
ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads")
|
ap2.add_argument("--no-dupe", action="store_true", help="reject duplicate files during upload; only matches within the same volume (volflag=nodupe)")
|
||||||
|
ap2.add_argument("--no-snap", action="store_true", help="disable snapshots -- forget unfinished uploads on shutdown; don't create .hist/up2k.snap files -- abandoned/interrupted uploads must be cleaned up manually")
|
||||||
|
ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads (volflag=magic)")
|
||||||
ap2.add_argument("--df", metavar="GiB", type=float, default=0, help="ensure GiB free disk space by rejecting upload requests")
|
ap2.add_argument("--df", metavar="GiB", type=float, default=0, help="ensure GiB free disk space by rejecting upload requests")
|
||||||
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
|
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
|
||||||
ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; 0 = off and warn if enabled, 1 = off, 2 = on, 3 = on and disable datecheck")
|
ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; [\033[32m0\033[0m] = off and warn if enabled, [\033[32m1\033[0m] = off, [\033[32m2\033[0m] = on, [\033[32m3\033[0m] = on and disable datecheck")
|
||||||
ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; s=smallest-first, n=alphabetical, fs=force-s, fn=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine")
|
ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; [\033[32ms\033[0m]=smallest-first, [\033[32mn\033[0m]=alphabetical, [\033[32mfs\033[0m]=force-s, [\033[32mfn\033[0m]=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine")
|
||||||
ap2.add_argument("--write-uplog", action="store_true", help="write POST reports to textfiles in working-directory")
|
ap2.add_argument("--write-uplog", action="store_true", help="write POST reports to textfiles in working-directory")
|
||||||
|
|
||||||
|
|
||||||
|
def add_network(ap):
|
||||||
ap2 = ap.add_argument_group('network options')
|
ap2 = ap.add_argument_group('network options')
|
||||||
ap2.add_argument("-i", metavar="IP", type=u, default="0.0.0.0", help="ip to bind (comma-sep.)")
|
ap2.add_argument("-i", metavar="IP", type=u, default="::", help="ip to bind (comma-sep.), default: all IPv4 and IPv6")
|
||||||
ap2.add_argument("-p", metavar="PORT", type=u, default="3923", help="ports to bind (comma/range)")
|
ap2.add_argument("-p", metavar="PORT", type=u, default="3923", help="ports to bind (comma/range)")
|
||||||
ap2.add_argument("--rproxy", metavar="DEPTH", type=int, default=1, help="which ip to keep; 0 = tcp, 1 = origin (first x-fwd), 2 = cloudflare, 3 = nginx, -1 = closest proxy")
|
ap2.add_argument("--ll", action="store_true", help="include link-local IPv4/IPv6 even if the NIC has routable IPs (breaks some mdns clients)")
|
||||||
|
ap2.add_argument("--rproxy", metavar="DEPTH", type=int, default=1, help="which ip to keep; [\033[32m0\033[0m]=tcp, [\033[32m1\033[0m]=origin (first x-fwd), [\033[32m2\033[0m]=cloudflare, [\033[32m3\033[0m]=nginx, [\033[32m-1\033[0m]=closest proxy")
|
||||||
|
ap2.add_argument("--rp-loc", metavar="PATH", type=u, default="", help="if reverse-proxying on a location instead of a dedicated domain/subdomain, provide the base location here (eg. /foo/bar)")
|
||||||
|
if ANYWIN:
|
||||||
|
ap2.add_argument("--reuseaddr", action="store_true", help="set reuseaddr on listening sockets on windows; allows rapid restart of copyparty at the expense of being able to accidentally start multiple instances")
|
||||||
ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes")
|
ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes")
|
||||||
ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds")
|
ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds")
|
||||||
ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds")
|
ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds")
|
||||||
|
|
||||||
|
|
||||||
|
def add_tls(ap):
|
||||||
ap2 = ap.add_argument_group('SSL/TLS options')
|
ap2 = ap.add_argument_group('SSL/TLS options')
|
||||||
ap2.add_argument("--http-only", action="store_true", help="disable ssl/tls -- force plaintext")
|
ap2.add_argument("--http-only", action="store_true", help="disable ssl/tls -- force plaintext")
|
||||||
ap2.add_argument("--https-only", action="store_true", help="disable plaintext -- force tls")
|
ap2.add_argument("--https-only", action="store_true", help="disable plaintext -- force tls")
|
||||||
ap2.add_argument("--ssl-ver", metavar="LIST", type=u, help="set allowed ssl/tls versions; [help] shows available versions; default is what your python version considers safe")
|
ap2.add_argument("--ssl-ver", metavar="LIST", type=u, help="set allowed ssl/tls versions; [\033[32mhelp\033[0m] shows available versions; default is what your python version considers safe")
|
||||||
ap2.add_argument("--ciphers", metavar="LIST", type=u, help="set allowed ssl/tls ciphers; [help] shows available ciphers")
|
ap2.add_argument("--ciphers", metavar="LIST", type=u, help="set allowed ssl/tls ciphers; [\033[32mhelp\033[0m] shows available ciphers")
|
||||||
ap2.add_argument("--ssl-dbg", action="store_true", help="dump some tls info")
|
ap2.add_argument("--ssl-dbg", action="store_true", help="dump some tls info")
|
||||||
ap2.add_argument("--ssl-log", metavar="PATH", type=u, help="log master secrets for later decryption in wireshark")
|
ap2.add_argument("--ssl-log", metavar="PATH", type=u, help="log master secrets for later decryption in wireshark")
|
||||||
|
|
||||||
ap2 = ap.add_argument_group('FTP options')
|
|
||||||
ap2.add_argument("--ftp", metavar="PORT", type=int, help="enable FTP server on PORT, for example 3921")
|
|
||||||
ap2.add_argument("--ftps", metavar="PORT", type=int, help="enable FTPS server on PORT, for example 3990")
|
|
||||||
ap2.add_argument("--ftp-dbg", action="store_true", help="enable debug logging")
|
|
||||||
ap2.add_argument("--ftp-nat", metavar="ADDR", type=u, help="the NAT address to use for passive connections")
|
|
||||||
ap2.add_argument("--ftp-pr", metavar="P-P", type=u, help="the range of TCP ports to use for passive connections, for example 12000-13000")
|
|
||||||
|
|
||||||
|
def add_zeroconf(ap):
|
||||||
|
ap2 = ap.add_argument_group("Zeroconf options")
|
||||||
|
ap2.add_argument("-z", action="store_true", help="enable all zeroconf backends (mdns, ssdp)")
|
||||||
|
ap2.add_argument("--z-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes\n └─example: \033[32meth0, wlo1, virhost0, 192.168.123.0/24, fd00:fda::/96\033[0m")
|
||||||
|
ap2.add_argument("--z-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
|
||||||
|
ap2.add_argument("-zv", action="store_true", help="verbose all zeroconf backends")
|
||||||
|
ap2.add_argument("--mc-hop", metavar="SEC", type=int, default=0, help="rejoin multicast groups every SEC seconds (workaround for some switches/routers which cause mDNS to suddenly stop working after some time); try [\033[32m300\033[0m] or [\033[32m180\033[0m]")
|
||||||
|
|
||||||
|
|
||||||
|
def add_zc_mdns(ap):
|
||||||
|
ap2 = ap.add_argument_group("Zeroconf-mDNS options:")
|
||||||
|
ap2.add_argument("--zm", action="store_true", help="announce the enabled protocols over mDNS (multicast DNS-SD) -- compatible with KDE, gnome, macOS, ...")
|
||||||
|
ap2.add_argument("--zm-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
|
||||||
|
ap2.add_argument("--zm-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
|
||||||
|
ap2.add_argument("--zm4", action="store_true", help="IPv4 only -- try this if some clients can't connect")
|
||||||
|
ap2.add_argument("--zm6", action="store_true", help="IPv6 only")
|
||||||
|
ap2.add_argument("--zmv", action="store_true", help="verbose mdns")
|
||||||
|
ap2.add_argument("--zmvv", action="store_true", help="verboser mdns")
|
||||||
|
ap2.add_argument("--zms", metavar="dhf", type=u, default="", help="list of services to announce -- d=webdav h=http f=ftp s=smb -- lowercase=plaintext uppercase=TLS -- default: all enabled services except http/https (\033[32mDdfs\033[0m if \033[33m--ftp\033[0m and \033[33m--smb\033[0m is set)")
|
||||||
|
ap2.add_argument("--zm-ld", metavar="PATH", type=u, default="", help="link a specific folder for webdav shares")
|
||||||
|
ap2.add_argument("--zm-lh", metavar="PATH", type=u, default="", help="link a specific folder for http shares")
|
||||||
|
ap2.add_argument("--zm-lf", metavar="PATH", type=u, default="", help="link a specific folder for ftp shares")
|
||||||
|
ap2.add_argument("--zm-ls", metavar="PATH", type=u, default="", help="link a specific folder for smb shares")
|
||||||
|
ap2.add_argument("--zm-mnic", action="store_true", help="merge NICs which share subnets; assume that same subnet means same network")
|
||||||
|
ap2.add_argument("--zm-msub", action="store_true", help="merge subnets on each NIC -- always enabled for ipv6 -- reduces network load, but gnome-gvfs clients may stop working")
|
||||||
|
ap2.add_argument("--zm-noneg", action="store_true", help="disable NSEC replies -- try this if some clients don't see copyparty")
|
||||||
|
|
||||||
|
|
||||||
|
def add_zc_ssdp(ap):
|
||||||
|
ap2 = ap.add_argument_group("Zeroconf-SSDP options:")
|
||||||
|
ap2.add_argument("--zs", action="store_true", help="announce the enabled protocols over SSDP -- compatible with Windows")
|
||||||
|
ap2.add_argument("--zs-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
|
||||||
|
ap2.add_argument("--zs-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
|
||||||
|
ap2.add_argument("--zsv", action="store_true", help="verbose SSDP")
|
||||||
|
ap2.add_argument("--zsl", metavar="PATH", type=u, default="/?hc", help="location to include in the url (or a complete external URL), for example [\033[32mpriv/?pw=hunter2\033[0m] (goes directly to /priv/ with password hunter2) or [\033[32m?hc=priv&pw=hunter2\033[0m] (shows mounting options for /priv/ with password)")
|
||||||
|
ap2.add_argument("--zsid", metavar="UUID", type=u, default=uuid.uuid4().urn[4:], help="USN (device identifier) to announce")
|
||||||
|
|
||||||
|
|
||||||
|
def add_ftp(ap):
|
||||||
|
ap2 = ap.add_argument_group('FTP options')
|
||||||
|
ap2.add_argument("--ftp", metavar="PORT", type=int, help="enable FTP server on PORT, for example \033[32m3921")
|
||||||
|
ap2.add_argument("--ftps", metavar="PORT", type=int, help="enable FTPS server on PORT, for example \033[32m3990")
|
||||||
|
ap2.add_argument("--ftpv", action="store_true", help="verbose")
|
||||||
|
ap2.add_argument("--ftp-wt", metavar="SEC", type=int, default=7, help="grace period for resuming interrupted uploads (any client can write to any file last-modified more recently than SEC seconds ago)")
|
||||||
|
ap2.add_argument("--ftp-nat", metavar="ADDR", type=u, help="the NAT address to use for passive connections")
|
||||||
|
ap2.add_argument("--ftp-pr", metavar="P-P", type=u, help="the range of TCP ports to use for passive connections, for example \033[32m12000-13000")
|
||||||
|
|
||||||
|
|
||||||
|
def add_webdav(ap):
|
||||||
|
ap2 = ap.add_argument_group('WebDAV options')
|
||||||
|
ap2.add_argument("--daw", action="store_true", help="enable full write support. \033[1;31mWARNING:\033[0m This has side-effects -- PUT-operations will now \033[1;31mOVERWRITE\033[0m existing files, rather than inventing new filenames to avoid loss of data. You might want to instead set this as a volflag where needed. By not setting this flag, uploaded files can get written to a filename which the client does not expect (which might be okay, depending on client)")
|
||||||
|
ap2.add_argument("--dav-inf", action="store_true", help="allow depth:infinite requests (recursive file listing); extremely server-heavy but required for spec compliance -- luckily few clients rely on this")
|
||||||
|
ap2.add_argument("--dav-mac", action="store_true", help="disable apple-garbage filter -- allow macos to create junk files (._* and .DS_Store, .Spotlight-*, .fseventsd, .Trashes, .AppleDouble, __MACOS)")
|
||||||
|
|
||||||
|
|
||||||
|
def add_smb(ap):
|
||||||
|
ap2 = ap.add_argument_group('SMB/CIFS options')
|
||||||
|
ap2.add_argument("--smb", action="store_true", help="enable smb (read-only) -- this requires running copyparty as root on linux and macos unless --smb-port is set above 1024 and your OS does port-forwarding from 445 to that.\n\033[1;31mWARNING:\033[0m this protocol is dangerous! Never expose to the internet. Account permissions are coalesced; if one account has write-access to a volume, then all accounts do.")
|
||||||
|
ap2.add_argument("--smbw", action="store_true", help="enable write support (please dont)")
|
||||||
|
ap2.add_argument("--smb1", action="store_true", help="disable SMBv2, only enable SMBv1 (CIFS)")
|
||||||
|
ap2.add_argument("--smb-port", metavar="PORT", type=int, default=445, help="port to listen on -- if you change this value, you must NAT from TCP:445 to this port using iptables or similar")
|
||||||
|
ap2.add_argument("--smb-nwa-1", action="store_true", help="disable impacket#1433 workaround (truncate directory listings to 64kB)")
|
||||||
|
ap2.add_argument("--smb-nwa-2", action="store_true", help="disable impacket workaround for filecopy globs")
|
||||||
|
ap2.add_argument("--smbv", action="store_true", help="verbose")
|
||||||
|
ap2.add_argument("--smbvv", action="store_true", help="verboser")
|
||||||
|
ap2.add_argument("--smbvvv", action="store_true", help="verbosest")
|
||||||
|
|
||||||
|
|
||||||
|
def add_optouts(ap):
|
||||||
ap2 = ap.add_argument_group('opt-outs')
|
ap2 = ap.add_argument_group('opt-outs')
|
||||||
ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)")
|
ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)")
|
||||||
ap2.add_argument("--keep-qem", action="store_true", help="do not disable quick-edit-mode on windows (it is disabled to avoid accidental text selection which will deadlock copyparty)")
|
ap2.add_argument("--keep-qem", action="store_true", help="do not disable quick-edit-mode on windows (it is disabled to avoid accidental text selection which will deadlock copyparty)")
|
||||||
|
ap2.add_argument("--no-dav", action="store_true", help="disable webdav support")
|
||||||
ap2.add_argument("--no-del", action="store_true", help="disable delete operations")
|
ap2.add_argument("--no-del", action="store_true", help="disable delete operations")
|
||||||
ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations")
|
ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations")
|
||||||
ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI")
|
ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI")
|
||||||
ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI")
|
ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI")
|
||||||
ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar")
|
ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar")
|
||||||
ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (lifetime volflag)")
|
ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (as specified by the 'lifetime' volflag)")
|
||||||
|
|
||||||
|
|
||||||
|
def add_safety(ap, fk_salt):
|
||||||
ap2 = ap.add_argument_group('safety options')
|
ap2 = ap.add_argument_group('safety options')
|
||||||
ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js")
|
ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js")
|
||||||
ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --no-dot-mv --no-dot-ren --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih")
|
ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, webdav, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --no-dot-mv --no-dot-ren --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih")
|
||||||
ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r")
|
ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss --no-dav -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r")
|
||||||
ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments USER,VOL,FLAGS; example [**,*,ln,p,r]")
|
ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments \033[33mUSER\033[0m,\033[33mVOL\033[0m,\033[33mFLAGS\033[0m; example [\033[32m**,*,ln,p,r\033[0m]")
|
||||||
ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter")
|
ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter")
|
||||||
ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter")
|
ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter")
|
||||||
ap2.add_argument("--no-dot-mv", action="store_true", help="disallow moving dotfiles; makes it impossible to move folders containing dotfiles")
|
ap2.add_argument("--no-dot-mv", action="store_true", help="disallow moving dotfiles; makes it impossible to move folders containing dotfiles")
|
||||||
@@ -639,34 +782,44 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
ap2.add_argument("--no-readme", action="store_true", help="disable rendering readme.md into directory listings")
|
ap2.add_argument("--no-readme", action="store_true", help="disable rendering readme.md into directory listings")
|
||||||
ap2.add_argument("--vague-403", action="store_true", help="send 404 instead of 403 (security through ambiguity, very enterprise)")
|
ap2.add_argument("--vague-403", action="store_true", help="send 404 instead of 403 (security through ambiguity, very enterprise)")
|
||||||
ap2.add_argument("--force-js", action="store_true", help="don't send folder listings as HTML, force clients to use the embedded json instead -- slight protection against misbehaving search engines which ignore --no-robots")
|
ap2.add_argument("--force-js", action="store_true", help="don't send folder listings as HTML, force clients to use the embedded json instead -- slight protection against misbehaving search engines which ignore --no-robots")
|
||||||
ap2.add_argument("--no-robots", action="store_true", help="adds http and html headers asking search engines to not index anything")
|
ap2.add_argument("--no-robots", action="store_true", help="adds http and html headers asking search engines to not index anything (volflag=norobots)")
|
||||||
ap2.add_argument("--logout", metavar="H", type=float, default="8086", help="logout clients after H hours of inactivity (0.0028=10sec, 0.1=6min, 24=day, 168=week, 720=month, 8760=year)")
|
ap2.add_argument("--logout", metavar="H", type=float, default="8086", help="logout clients after H hours of inactivity; [\033[32m0.0028\033[0m]=10sec, [\033[32m0.1\033[0m]=6min, [\033[32m24\033[0m]=day, [\033[32m168\033[0m]=week, [\033[32m720\033[0m]=month, [\033[32m8760\033[0m]=year)")
|
||||||
ap2.add_argument("--ban-pw", metavar="N,W,B", type=u, default="9,60,1440", help="more than N wrong passwords in W minutes = ban for B minutes (disable with \"no\")")
|
ap2.add_argument("--ban-pw", metavar="N,W,B", type=u, default="9,60,1440", help="more than \033[33mN\033[0m wrong passwords in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes; disable with [\033[32mno\033[0m]")
|
||||||
ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than N 404's in W minutes = ban for B minutes (disabled by default since turbo-up2k counts as 404s)")
|
ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than \033[33mN\033[0m 404's in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes (disabled by default since turbo-up2k counts as 404s)")
|
||||||
|
ap2.add_argument("--aclose", metavar="MIN", type=int, default=10, help="if a client maxes out the server connection limit, downgrade it from connection:keep-alive to connection:close for MIN minutes (and also kill its active connections) -- disable with 0")
|
||||||
|
ap2.add_argument("--loris", metavar="B", type=int, default=60, help="if a client maxes out the server connection limit without sending headers, ban it for B minutes; disable with [\033[32m0\033[0m]")
|
||||||
|
|
||||||
|
|
||||||
|
def add_shutdown(ap):
|
||||||
ap2 = ap.add_argument_group('shutdown options')
|
ap2 = ap.add_argument_group('shutdown options')
|
||||||
ap2.add_argument("--ign-ebind", action="store_true", help="continue running even if it's impossible to listen on some of the requested endpoints")
|
ap2.add_argument("--ign-ebind", action="store_true", help="continue running even if it's impossible to listen on some of the requested endpoints")
|
||||||
ap2.add_argument("--ign-ebind-all", action="store_true", help="continue running even if it's impossible to receive connections at all")
|
ap2.add_argument("--ign-ebind-all", action="store_true", help="continue running even if it's impossible to receive connections at all")
|
||||||
ap2.add_argument("--exit", metavar="WHEN", type=u, default="", help="shutdown after WHEN has finished; for example 'idx' will do volume indexing + metadata analysis")
|
ap2.add_argument("--exit", metavar="WHEN", type=u, default="", help="shutdown after WHEN has finished; for example [\033[32midx\033[0m] will do volume indexing + metadata analysis")
|
||||||
|
|
||||||
|
|
||||||
|
def add_logging(ap):
|
||||||
ap2 = ap.add_argument_group('logging options')
|
ap2 = ap.add_argument_group('logging options')
|
||||||
ap2.add_argument("-q", action="store_true", help="quiet")
|
ap2.add_argument("-q", action="store_true", help="quiet")
|
||||||
ap2.add_argument("-lo", metavar="PATH", type=u, help="logfile, example: cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz")
|
ap2.add_argument("-lo", metavar="PATH", type=u, help="logfile, example: \033[32mcpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz")
|
||||||
ap2.add_argument("--no-voldump", action="store_true", help="do not list volumes and permissions on startup")
|
ap2.add_argument("--no-voldump", action="store_true", help="do not list volumes and permissions on startup")
|
||||||
ap2.add_argument("--log-conn", action="store_true", help="debug: print tcp-server msgs")
|
ap2.add_argument("--log-conn", action="store_true", help="debug: print tcp-server msgs")
|
||||||
ap2.add_argument("--log-htp", action="store_true", help="debug: print http-server threadpool scaling")
|
ap2.add_argument("--log-htp", action="store_true", help="debug: print http-server threadpool scaling")
|
||||||
ap2.add_argument("--ihead", metavar="HEADER", type=u, action='append', help="dump incoming header")
|
ap2.add_argument("--ihead", metavar="HEADER", type=u, action='append', help="dump incoming header")
|
||||||
ap2.add_argument("--lf-url", metavar="RE", type=u, default=r"^/\.cpr/|\?th=[wj]$", help="dont log URLs matching")
|
ap2.add_argument("--lf-url", metavar="RE", type=u, default=r"^/\.cpr/|\?th=[wj]$|/\.(_|ql_|DS_Store$|localized$)", help="dont log URLs matching")
|
||||||
|
|
||||||
|
|
||||||
|
def add_admin(ap):
|
||||||
ap2 = ap.add_argument_group('admin panel options')
|
ap2 = ap.add_argument_group('admin panel options')
|
||||||
ap2.add_argument("--no-reload", action="store_true", help="disable ?reload=cfg (reload users/volumes/volflags from config file)")
|
ap2.add_argument("--no-reload", action="store_true", help="disable ?reload=cfg (reload users/volumes/volflags from config file)")
|
||||||
ap2.add_argument("--no-rescan", action="store_true", help="disable ?scan (volume reindexing)")
|
ap2.add_argument("--no-rescan", action="store_true", help="disable ?scan (volume reindexing)")
|
||||||
ap2.add_argument("--no-stack", action="store_true", help="disable ?stack (list all stacks)")
|
ap2.add_argument("--no-stack", action="store_true", help="disable ?stack (list all stacks)")
|
||||||
|
|
||||||
|
|
||||||
|
def add_thumbnail(ap):
|
||||||
ap2 = ap.add_argument_group('thumbnail options')
|
ap2 = ap.add_argument_group('thumbnail options')
|
||||||
ap2.add_argument("--no-thumb", action="store_true", help="disable all thumbnails")
|
ap2.add_argument("--no-thumb", action="store_true", help="disable all thumbnails (volflag=dthumb)")
|
||||||
ap2.add_argument("--no-athumb", action="store_true", help="disable audio thumbnails (spectrograms)")
|
ap2.add_argument("--no-vthumb", action="store_true", help="disable video thumbnails (volflag=dvthumb)")
|
||||||
ap2.add_argument("--no-vthumb", action="store_true", help="disable video thumbnails")
|
ap2.add_argument("--no-athumb", action="store_true", help="disable audio thumbnails (spectrograms) (volflag=dathumb)")
|
||||||
ap2.add_argument("--th-size", metavar="WxH", default="320x256", help="thumbnail res")
|
ap2.add_argument("--th-size", metavar="WxH", default="320x256", help="thumbnail res")
|
||||||
ap2.add_argument("--th-mt", metavar="CORES", type=int, default=CORES, help="num cpu cores to use for generating thumbnails")
|
ap2.add_argument("--th-mt", metavar="CORES", type=int, default=CORES, help="num cpu cores to use for generating thumbnails")
|
||||||
ap2.add_argument("--th-convt", metavar="SEC", type=int, default=60, help="conversion timeout in seconds")
|
ap2.add_argument("--th-convt", metavar="SEC", type=int, default=60, help="conversion timeout in seconds")
|
||||||
@@ -689,10 +842,14 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
ap2.add_argument("--th-r-ffv", metavar="T,T", type=u, default="av1,asf,avi,flv,m4v,mkv,mjpeg,mjpg,mpg,mpeg,mpg2,mpeg2,h264,avc,mts,h265,hevc,mov,3gp,mp4,ts,mpegts,nut,ogv,ogm,rm,vob,webm,wmv", help="video formats to decode using ffmpeg")
|
ap2.add_argument("--th-r-ffv", metavar="T,T", type=u, default="av1,asf,avi,flv,m4v,mkv,mjpeg,mjpg,mpg,mpeg,mpg2,mpeg2,h264,avc,mts,h265,hevc,mov,3gp,mp4,ts,mpegts,nut,ogv,ogm,rm,vob,webm,wmv", help="video formats to decode using ffmpeg")
|
||||||
ap2.add_argument("--th-r-ffa", metavar="T,T", type=u, default="aac,m4a,ogg,opus,flac,alac,mp3,mp2,ac3,dts,wma,ra,wav,aif,aiff,au,alaw,ulaw,mulaw,amr,gsm,ape,tak,tta,wv,mpc", help="audio formats to decode using ffmpeg")
|
ap2.add_argument("--th-r-ffa", metavar="T,T", type=u, default="aac,m4a,ogg,opus,flac,alac,mp3,mp2,ac3,dts,wma,ra,wav,aif,aiff,au,alaw,ulaw,mulaw,amr,gsm,ape,tak,tta,wv,mpc", help="audio formats to decode using ffmpeg")
|
||||||
|
|
||||||
|
|
||||||
|
def add_transcoding(ap):
|
||||||
ap2 = ap.add_argument_group('transcoding options')
|
ap2 = ap.add_argument_group('transcoding options')
|
||||||
ap2.add_argument("--no-acode", action="store_true", help="disable audio transcoding")
|
ap2.add_argument("--no-acode", action="store_true", help="disable audio transcoding")
|
||||||
ap2.add_argument("--ac-maxage", metavar="SEC", type=int, default=86400, help="delete cached transcode output after SEC seconds")
|
ap2.add_argument("--ac-maxage", metavar="SEC", type=int, default=86400, help="delete cached transcode output after SEC seconds")
|
||||||
|
|
||||||
|
|
||||||
|
def add_db_general(ap, hcores):
|
||||||
ap2 = ap.add_argument_group('general db options')
|
ap2 = ap.add_argument_group('general db options')
|
||||||
ap2.add_argument("-e2d", action="store_true", help="enable up2k database, making files searchable + enables upload deduplocation")
|
ap2.add_argument("-e2d", action="store_true", help="enable up2k database, making files searchable + enables upload deduplocation")
|
||||||
ap2.add_argument("-e2ds", action="store_true", help="scan writable folders for new files on startup; sets -e2d")
|
ap2.add_argument("-e2ds", action="store_true", help="scan writable folders for new files on startup; sets -e2d")
|
||||||
@@ -700,19 +857,24 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
ap2.add_argument("-e2v", action="store_true", help="verify file integrity; rehash all files and compare with db")
|
ap2.add_argument("-e2v", action="store_true", help="verify file integrity; rehash all files and compare with db")
|
||||||
ap2.add_argument("-e2vu", action="store_true", help="on hash mismatch: update the database with the new hash")
|
ap2.add_argument("-e2vu", action="store_true", help="on hash mismatch: update the database with the new hash")
|
||||||
ap2.add_argument("-e2vp", action="store_true", help="on hash mismatch: panic and quit copyparty")
|
ap2.add_argument("-e2vp", action="store_true", help="on hash mismatch: panic and quit copyparty")
|
||||||
ap2.add_argument("--hist", metavar="PATH", type=u, help="where to store volume data (db, thumbs)")
|
ap2.add_argument("--hist", metavar="PATH", type=u, help="where to store volume data (db, thumbs) (volflag=hist)")
|
||||||
ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans")
|
ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans (volflag=nohash)")
|
||||||
ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans")
|
ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans (volflag=noidx)")
|
||||||
ap2.add_argument("--no-dhash", action="store_true", help="disable rescan acceleration; do full database integrity check -- makes the db ~5%% smaller and bootup/rescans 3~10x slower")
|
ap2.add_argument("--no-dhash", action="store_true", help="disable rescan acceleration; do full database integrity check -- makes the db ~5%% smaller and bootup/rescans 3~10x slower")
|
||||||
ap2.add_argument("--no-forget", action="store_true", help="never forget indexed files, even when deleted from disk -- makes it impossible to ever upload the same file twice")
|
ap2.add_argument("--re-dhash", action="store_true", help="rebuild the cache if it gets out of sync (for example crash on startup during metadata scanning)")
|
||||||
ap2.add_argument("--xdev", action="store_true", help="do not descend into other filesystems (symlink or bind-mount to another HDD, ...)")
|
ap2.add_argument("--no-forget", action="store_true", help="never forget indexed files, even when deleted from disk -- makes it impossible to ever upload the same file twice (volflag=noforget)")
|
||||||
ap2.add_argument("--xvol", action="store_true", help="skip symlinks leaving the volume root")
|
ap2.add_argument("--dbd", metavar="PROFILE", default="wal", help="database durability profile; sets the tradeoff between robustness and speed, see --help-dbd (volflag=dbd)")
|
||||||
|
ap2.add_argument("--xlink", action="store_true", help="on upload: check all volumes for dupes, not just the target volume (volflag=xlink)")
|
||||||
|
ap2.add_argument("--xdev", action="store_true", help="do not descend into other filesystems (symlink or bind-mount to another HDD, ...) (volflag=xdev)")
|
||||||
|
ap2.add_argument("--xvol", action="store_true", help="skip symlinks leaving the volume root (volflag=xvol)")
|
||||||
ap2.add_argument("--hash-mt", metavar="CORES", type=int, default=hcores, help="num cpu cores to use for file hashing; set 0 or 1 for single-core hashing")
|
ap2.add_argument("--hash-mt", metavar="CORES", type=int, default=hcores, help="num cpu cores to use for file hashing; set 0 or 1 for single-core hashing")
|
||||||
ap2.add_argument("--re-maxage", metavar="SEC", type=int, default=0, help="disk rescan volume interval, 0=off, can be set per-volume with the 'scan' volflag")
|
ap2.add_argument("--re-maxage", metavar="SEC", type=int, default=0, help="disk rescan volume interval, 0=off (volflag=scan)")
|
||||||
ap2.add_argument("--db-act", metavar="SEC", type=float, default=10, help="defer any scheduled volume reindexing until SEC seconds after last db write (uploads, renames, ...)")
|
ap2.add_argument("--db-act", metavar="SEC", type=float, default=10, help="defer any scheduled volume reindexing until SEC seconds after last db write (uploads, renames, ...)")
|
||||||
ap2.add_argument("--srch-time", metavar="SEC", type=int, default=45, help="search deadline -- terminate searches running for more than SEC seconds")
|
ap2.add_argument("--srch-time", metavar="SEC", type=int, default=45, help="search deadline -- terminate searches running for more than SEC seconds")
|
||||||
ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially")
|
ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially")
|
||||||
|
|
||||||
|
|
||||||
|
def add_db_metadata(ap):
|
||||||
ap2 = ap.add_argument_group('metadata db options')
|
ap2 = ap.add_argument_group('metadata db options')
|
||||||
ap2.add_argument("-e2t", action="store_true", help="enable metadata indexing; makes it possible to search for artist/title/codec/resolution/...")
|
ap2.add_argument("-e2t", action="store_true", help="enable metadata indexing; makes it possible to search for artist/title/codec/resolution/...")
|
||||||
ap2.add_argument("-e2ts", action="store_true", help="scan existing files on startup; sets -e2t")
|
ap2.add_argument("-e2ts", action="store_true", help="scan existing files on startup; sets -e2t")
|
||||||
@@ -722,7 +884,7 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
ap2.add_argument("--mtag-to", metavar="SEC", type=int, default=60, help="timeout for ffprobe tag-scan")
|
ap2.add_argument("--mtag-to", metavar="SEC", type=int, default=60, help="timeout for ffprobe tag-scan")
|
||||||
ap2.add_argument("--mtag-mt", metavar="CORES", type=int, default=CORES, help="num cpu cores to use for tag scanning")
|
ap2.add_argument("--mtag-mt", metavar="CORES", type=int, default=CORES, help="num cpu cores to use for tag scanning")
|
||||||
ap2.add_argument("--mtag-v", action="store_true", help="verbose tag scanning; print errors from mtp subprocesses and such")
|
ap2.add_argument("--mtag-v", action="store_true", help="verbose tag scanning; print errors from mtp subprocesses and such")
|
||||||
ap2.add_argument("--mtag-vv", action="store_true", help="debug mtp settings")
|
ap2.add_argument("--mtag-vv", action="store_true", help="debug mtp settings and mutagen/ffprobe parsers")
|
||||||
ap2.add_argument("-mtm", metavar="M=t,t,t", type=u, action="append", help="add/replace metadata mapping")
|
ap2.add_argument("-mtm", metavar="M=t,t,t", type=u, action="append", help="add/replace metadata mapping")
|
||||||
ap2.add_argument("-mte", metavar="M,M,M", type=u, help="tags to index/display (comma-sep.)",
|
ap2.add_argument("-mte", metavar="M,M,M", type=u, help="tags to index/display (comma-sep.)",
|
||||||
default="circle,album,.tn,artist,title,.bpm,key,.dur,.q,.vq,.aq,vc,ac,fmt,res,.fps,ahash,vhash")
|
default="circle,album,.tn,artist,title,.bpm,key,.dur,.q,.vq,.aq,vc,ac,fmt,res,.fps,ahash,vhash")
|
||||||
@@ -730,11 +892,14 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
default=".vq,.aq,vc,ac,fmt,res,.fps")
|
default=".vq,.aq,vc,ac,fmt,res,.fps")
|
||||||
ap2.add_argument("-mtp", metavar="M=[f,]BIN", type=u, action="append", help="read tag M using program BIN to parse the file")
|
ap2.add_argument("-mtp", metavar="M=[f,]BIN", type=u, action="append", help="read tag M using program BIN to parse the file")
|
||||||
|
|
||||||
|
|
||||||
|
def add_ui(ap, retry):
|
||||||
ap2 = ap.add_argument_group('ui options')
|
ap2 = ap.add_argument_group('ui options')
|
||||||
ap2.add_argument("--lang", metavar="LANG", type=u, default="eng", help="language")
|
ap2.add_argument("--lang", metavar="LANG", type=u, default="eng", help="language")
|
||||||
ap2.add_argument("--theme", metavar="NUM", type=int, default=0, help="default theme to use")
|
ap2.add_argument("--theme", metavar="NUM", type=int, default=0, help="default theme to use")
|
||||||
ap2.add_argument("--themes", metavar="NUM", type=int, default=8, help="number of themes installed")
|
ap2.add_argument("--themes", metavar="NUM", type=int, default=8, help="number of themes installed")
|
||||||
ap2.add_argument("--favico", metavar="TXT", type=u, default="c 000 none" if retry else "🎉 000 none", help="favicon text [ foreground [ background ] ], set blank to disable")
|
ap2.add_argument("--favico", metavar="TXT", type=u, default="c 000 none" if retry else "🎉 000 none", help="\033[33mfavicon-text\033[0m [ \033[33mforeground\033[0m [ \033[33mbackground\033[0m ] ], set blank to disable")
|
||||||
|
ap2.add_argument("--mpmc", metavar="URL", type=u, default="", help="change the mediaplayer-toggle mouse cursor; URL to a folder with {2..5}.png inside (or disable with [\033[32m.\033[0m])")
|
||||||
ap2.add_argument("--js-browser", metavar="L", type=u, help="URL to additional JS to include")
|
ap2.add_argument("--js-browser", metavar="L", type=u, help="URL to additional JS to include")
|
||||||
ap2.add_argument("--css-browser", metavar="L", type=u, help="URL to additional CSS to include")
|
ap2.add_argument("--css-browser", metavar="L", type=u, help="URL to additional CSS to include")
|
||||||
ap2.add_argument("--html-head", metavar="TXT", type=u, default="", help="text to append to the <head> of all HTML pages")
|
ap2.add_argument("--html-head", metavar="TXT", type=u, default="", help="text to append to the <head> of all HTML pages")
|
||||||
@@ -742,23 +907,86 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
|
|||||||
ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)")
|
ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)")
|
||||||
ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents")
|
ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents")
|
||||||
|
|
||||||
|
|
||||||
|
def add_debug(ap):
|
||||||
ap2 = ap.add_argument_group('debug options')
|
ap2 = ap.add_argument_group('debug options')
|
||||||
ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile; instead using a traditional file read loop")
|
ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile; instead using a traditional file read loop")
|
||||||
ap2.add_argument("--no-scandir", action="store_true", help="disable scandir; instead using listdir + stat on each file")
|
ap2.add_argument("--no-scandir", action="store_true", help="disable scandir; instead using listdir + stat on each file")
|
||||||
ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing before starting the httpd")
|
ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing before starting the httpd")
|
||||||
ap2.add_argument("--no-htp", action="store_true", help="disable httpserver threadpool, create threads as-needed instead")
|
ap2.add_argument("--no-htp", action="store_true", help="disable httpserver threadpool, create threads as-needed instead")
|
||||||
ap2.add_argument("--stackmon", metavar="P,S", type=u, help="write stacktrace to Path every S second, for example --stackmon=./st/%%Y-%%m/%%d/%%H%%M.xz,60")
|
ap2.add_argument("--rclone-mdns", action="store_true", help="use mdns-domain instead of server-ip on /?hc")
|
||||||
|
ap2.add_argument("--stackmon", metavar="P,S", type=u, help="write stacktrace to Path every S second, for example --stackmon=\033[32m./st/%%Y-%%m/%%d/%%H%%M.xz,60")
|
||||||
ap2.add_argument("--log-thrs", metavar="SEC", type=float, help="list active threads every SEC")
|
ap2.add_argument("--log-thrs", metavar="SEC", type=float, help="list active threads every SEC")
|
||||||
ap2.add_argument("--log-fk", metavar="REGEX", type=u, default="", help="log filekey params for files where path matches REGEX; '.' (a single dot) = all files")
|
ap2.add_argument("--log-fk", metavar="REGEX", type=u, default="", help="log filekey params for files where path matches REGEX; [\033[32m.\033[0m] (a single dot) = all files")
|
||||||
ap2.add_argument("--bak-flips", action="store_true", help="[up2k] if a client uploads a bitflipped/corrupted chunk, store a copy according to --bf-nc and --bf-dir")
|
ap2.add_argument("--bak-flips", action="store_true", help="[up2k] if a client uploads a bitflipped/corrupted chunk, store a copy according to --bf-nc and --bf-dir")
|
||||||
ap2.add_argument("--bf-nc", metavar="NUM", type=int, default=200, help="bak-flips: stop if there's more than NUM files at --kf-dir already; default: 6.3 GiB max (200*32M)")
|
ap2.add_argument("--bf-nc", metavar="NUM", type=int, default=200, help="bak-flips: stop if there's more than NUM files at --kf-dir already; default: 6.3 GiB max (200*32M)")
|
||||||
ap2.add_argument("--bf-dir", metavar="PATH", type=u, default="bf", help="bak-flips: store corrupted chunks at PATH; default: folder named 'bf' wherever copyparty was started")
|
ap2.add_argument("--bf-dir", metavar="PATH", type=u, default="bf", help="bak-flips: store corrupted chunks at PATH; default: folder named 'bf' wherever copyparty was started")
|
||||||
|
|
||||||
|
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def run_argparse(
|
||||||
|
argv: list[str], formatter: Any, retry: bool, nc: int
|
||||||
|
) -> argparse.Namespace:
|
||||||
|
ap = argparse.ArgumentParser(
|
||||||
|
formatter_class=formatter,
|
||||||
|
prog="copyparty",
|
||||||
|
description="http file sharing hub v{} ({})".format(S_VERSION, S_BUILD_DT),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
fk_salt = unicode(os.path.getmtime(os.path.join(E.cfg, "cert.pem")))
|
||||||
|
except:
|
||||||
|
fk_salt = "hunter2"
|
||||||
|
|
||||||
|
hcores = min(CORES, 4) # optimal on py3.11 @ r5-4500U
|
||||||
|
|
||||||
|
tty = os.environ.get("TERM", "").lower() == "linux"
|
||||||
|
|
||||||
|
srvname = get_srvname()
|
||||||
|
|
||||||
|
add_general(ap, nc, srvname)
|
||||||
|
add_network(ap)
|
||||||
|
add_tls(ap)
|
||||||
|
add_qr(ap, tty)
|
||||||
|
add_zeroconf(ap)
|
||||||
|
add_zc_mdns(ap)
|
||||||
|
add_zc_ssdp(ap)
|
||||||
|
add_upload(ap)
|
||||||
|
add_db_general(ap, hcores)
|
||||||
|
add_db_metadata(ap)
|
||||||
|
add_thumbnail(ap)
|
||||||
|
add_transcoding(ap)
|
||||||
|
add_ftp(ap)
|
||||||
|
add_webdav(ap)
|
||||||
|
add_smb(ap)
|
||||||
|
add_safety(ap, fk_salt)
|
||||||
|
add_optouts(ap)
|
||||||
|
add_shutdown(ap)
|
||||||
|
add_ui(ap, retry)
|
||||||
|
add_admin(ap)
|
||||||
|
add_logging(ap)
|
||||||
|
add_debug(ap)
|
||||||
|
|
||||||
ap2 = ap.add_argument_group("help sections")
|
ap2 = ap.add_argument_group("help sections")
|
||||||
|
sects = get_sects()
|
||||||
for k, h, _ in sects:
|
for k, h, _ in sects:
|
||||||
ap2.add_argument("--help-" + k, action="store_true", help=h)
|
ap2.add_argument("--help-" + k, action="store_true", help=h)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not retry:
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
for x in ap._actions:
|
||||||
|
if not x.help:
|
||||||
|
continue
|
||||||
|
|
||||||
|
a = ["ascii", "replace"]
|
||||||
|
x.help = x.help.encode(*a).decode(*a) + "\033[0m"
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
ret = ap.parse_args(args=argv[1:])
|
ret = ap.parse_args(args=argv[1:])
|
||||||
for k, h, t in sects:
|
for k, h, t in sects:
|
||||||
k2 = "help_" + k.replace("-", "_")
|
k2 = "help_" + k.replace("-", "_")
|
||||||
@@ -784,7 +1012,7 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
S_VERSION,
|
S_VERSION,
|
||||||
CODENAME,
|
CODENAME,
|
||||||
S_BUILD_DT,
|
S_BUILD_DT,
|
||||||
py_desc().replace("[", "\033[1;30m["),
|
py_desc().replace("[", "\033[90m["),
|
||||||
SQLITE_VER,
|
SQLITE_VER,
|
||||||
JINJA_VER,
|
JINJA_VER,
|
||||||
PYFTPD_VER,
|
PYFTPD_VER,
|
||||||
@@ -803,7 +1031,13 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
ensure_cert()
|
ensure_cert()
|
||||||
|
|
||||||
for k, v in zip(argv[1:], argv[2:]):
|
for k, v in zip(argv[1:], argv[2:]):
|
||||||
if k == "-c":
|
if k == "-c" and os.path.isfile(v):
|
||||||
|
supp = args_from_cfg(v)
|
||||||
|
argv.extend(supp)
|
||||||
|
|
||||||
|
for k in argv[1:]:
|
||||||
|
v = k[2:]
|
||||||
|
if k.startswith("-c") and v and os.path.isfile(v):
|
||||||
supp = args_from_cfg(v)
|
supp = args_from_cfg(v)
|
||||||
argv.extend(supp)
|
argv.extend(supp)
|
||||||
|
|
||||||
@@ -819,24 +1053,45 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
argv[idx] = nk
|
argv[idx] = nk
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
|
da = len(argv) == 1
|
||||||
try:
|
try:
|
||||||
if len(argv) == 1 and (ANYWIN or not os.geteuid()):
|
if da:
|
||||||
|
argv.extend(["--qr"])
|
||||||
|
if ANYWIN or not os.geteuid():
|
||||||
argv.extend(["-p80,443,3923", "--ign-ebind"])
|
argv.extend(["-p80,443,3923", "--ign-ebind"])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
if da:
|
||||||
|
t = "no arguments provided; will use {}\n"
|
||||||
|
lprint(t.format(" ".join(argv[1:])))
|
||||||
|
|
||||||
|
nc = 1024
|
||||||
|
try:
|
||||||
|
import resource
|
||||||
|
|
||||||
|
_, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||||
|
if hard > 0: # -1 == infinite
|
||||||
|
nc = min(nc, hard // 4)
|
||||||
|
except:
|
||||||
|
nc = 512
|
||||||
|
|
||||||
retry = False
|
retry = False
|
||||||
for fmtr in [RiceFormatter, RiceFormatter, Dodge11874, BasicDodge11874]:
|
for fmtr in [RiceFormatter, RiceFormatter, Dodge11874, BasicDodge11874]:
|
||||||
try:
|
try:
|
||||||
al = run_argparse(argv, fmtr, retry)
|
al = run_argparse(argv, fmtr, retry, nc)
|
||||||
|
break
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
raise
|
raise
|
||||||
except:
|
except:
|
||||||
retry = True
|
retry = True
|
||||||
lprint("\n[ {} ]:\n{}\n".format(fmtr, min_ex()))
|
lprint("\n[ {} ]:\n{}\n".format(fmtr, min_ex()))
|
||||||
|
|
||||||
assert al
|
try:
|
||||||
|
assert al # type: ignore
|
||||||
al.E = E # __init__ is not shared when oxidized
|
al.E = E # __init__ is not shared when oxidized
|
||||||
|
except:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
if WINDOWS and not al.keep_qem:
|
if WINDOWS and not al.keep_qem:
|
||||||
try:
|
try:
|
||||||
@@ -863,7 +1118,7 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
if re.match("c[^,]", opt):
|
if re.match("c[^,]", opt):
|
||||||
mod = True
|
mod = True
|
||||||
na.append("c," + opt[1:])
|
na.append("c," + opt[1:])
|
||||||
elif re.sub("^[rwmdg]*", "", opt) and "," not in opt:
|
elif re.sub("^[rwmdgG]*", "", opt) and "," not in opt:
|
||||||
mod = True
|
mod = True
|
||||||
perm = opt[0]
|
perm = opt[0]
|
||||||
if perm == "a":
|
if perm == "a":
|
||||||
@@ -888,6 +1143,11 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
if getattr(al, k1):
|
if getattr(al, k1):
|
||||||
setattr(al, k2, True)
|
setattr(al, k2, True)
|
||||||
|
|
||||||
|
# propagate unplications
|
||||||
|
for k1, k2 in UNPLICATIONS:
|
||||||
|
if getattr(al, k1):
|
||||||
|
setattr(al, k2, False)
|
||||||
|
|
||||||
al.i = al.i.split(",")
|
al.i = al.i.split(",")
|
||||||
try:
|
try:
|
||||||
if "-" in al.p:
|
if "-" in al.p:
|
||||||
@@ -904,6 +1164,12 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
zs = "argument {} cannot be '{}'; try one of these: {}"
|
zs = "argument {} cannot be '{}'; try one of these: {}"
|
||||||
raise Exception(zs.format(arg, val, okays))
|
raise Exception(zs.format(arg, val, okays))
|
||||||
|
|
||||||
|
if not al.qrs and [k for k in argv if k.startswith("--qr")]:
|
||||||
|
al.qr = True
|
||||||
|
|
||||||
|
if al.ihead:
|
||||||
|
al.ihead = [x.lower() for x in al.ihead]
|
||||||
|
|
||||||
if HAVE_SSL:
|
if HAVE_SSL:
|
||||||
if al.ssl_ver:
|
if al.ssl_ver:
|
||||||
configure_ssl_ver(al)
|
configure_ssl_ver(al)
|
||||||
@@ -919,6 +1185,10 @@ def main(argv: Optional[list[str]] = None) -> None:
|
|||||||
+ " (if you crash with codec errors then that is why)"
|
+ " (if you crash with codec errors then that is why)"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if PY2 and al.smb:
|
||||||
|
print("error: python2 cannot --smb")
|
||||||
|
return
|
||||||
|
|
||||||
if sys.version_info < (3, 6):
|
if sys.version_info < (3, 6):
|
||||||
al.no_scandir = True
|
al.no_scandir = True
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
VERSION = (1, 4, 3)
|
VERSION = (1, 5, 2)
|
||||||
CODENAME = "mostly reliable"
|
CODENAME = "babel"
|
||||||
BUILD_DT = (2022, 9, 26)
|
BUILD_DT = (2022, 12, 12)
|
||||||
|
|
||||||
S_VERSION = ".".join(map(str, VERSION))
|
S_VERSION = ".".join(map(str, VERSION))
|
||||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from .util import (
|
|||||||
IMPLICATIONS,
|
IMPLICATIONS,
|
||||||
META_NOBOTS,
|
META_NOBOTS,
|
||||||
SQLITE_VER,
|
SQLITE_VER,
|
||||||
|
UNPLICATIONS,
|
||||||
Pebkac,
|
Pebkac,
|
||||||
absreal,
|
absreal,
|
||||||
fsenc,
|
fsenc,
|
||||||
@@ -30,15 +31,12 @@ from .util import (
|
|||||||
unhumanize,
|
unhumanize,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
|
|
||||||
import typing
|
|
||||||
from typing import Any, Generator, Optional, Union
|
from typing import Any, Generator, Optional, Union
|
||||||
|
|
||||||
from .util import RootLogger
|
from .util import RootLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
pass
|
pass
|
||||||
@@ -58,18 +56,20 @@ class AXS(object):
|
|||||||
umove: Optional[Union[list[str], set[str]]] = None,
|
umove: Optional[Union[list[str], set[str]]] = None,
|
||||||
udel: Optional[Union[list[str], set[str]]] = None,
|
udel: Optional[Union[list[str], set[str]]] = None,
|
||||||
uget: Optional[Union[list[str], set[str]]] = None,
|
uget: Optional[Union[list[str], set[str]]] = None,
|
||||||
|
upget: Optional[Union[list[str], set[str]]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.uread: set[str] = set(uread or [])
|
self.uread: set[str] = set(uread or [])
|
||||||
self.uwrite: set[str] = set(uwrite or [])
|
self.uwrite: set[str] = set(uwrite or [])
|
||||||
self.umove: set[str] = set(umove or [])
|
self.umove: set[str] = set(umove or [])
|
||||||
self.udel: set[str] = set(udel or [])
|
self.udel: set[str] = set(udel or [])
|
||||||
self.uget: set[str] = set(uget or [])
|
self.uget: set[str] = set(uget or [])
|
||||||
|
self.upget: set[str] = set(upget or [])
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return "AXS({})".format(
|
return "AXS({})".format(
|
||||||
", ".join(
|
", ".join(
|
||||||
"{}={!r}".format(k, self.__dict__[k])
|
"{}={!r}".format(k, self.__dict__[k])
|
||||||
for k in "uread uwrite umove udel uget".split()
|
for k in "uread uwrite umove udel uget upget".split()
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -293,6 +293,7 @@ class VFS(object):
|
|||||||
self.amove: dict[str, list[str]] = {}
|
self.amove: dict[str, list[str]] = {}
|
||||||
self.adel: dict[str, list[str]] = {}
|
self.adel: dict[str, list[str]] = {}
|
||||||
self.aget: dict[str, list[str]] = {}
|
self.aget: dict[str, list[str]] = {}
|
||||||
|
self.apget: dict[str, list[str]] = {}
|
||||||
|
|
||||||
if realpath:
|
if realpath:
|
||||||
self.histpath = os.path.join(realpath, ".hist") # db / thumbcache
|
self.histpath = os.path.join(realpath, ".hist") # db / thumbcache
|
||||||
@@ -369,7 +370,6 @@ class VFS(object):
|
|||||||
|
|
||||||
def _find(self, vpath: str) -> tuple["VFS", str]:
|
def _find(self, vpath: str) -> tuple["VFS", str]:
|
||||||
"""return [vfs,remainder]"""
|
"""return [vfs,remainder]"""
|
||||||
vpath = undot(vpath)
|
|
||||||
if vpath == "":
|
if vpath == "":
|
||||||
return self, ""
|
return self, ""
|
||||||
|
|
||||||
@@ -380,13 +380,15 @@ class VFS(object):
|
|||||||
rem = ""
|
rem = ""
|
||||||
|
|
||||||
if name in self.nodes:
|
if name in self.nodes:
|
||||||
return self.nodes[name]._find(rem)
|
return self.nodes[name]._find(undot(rem))
|
||||||
|
|
||||||
return self, vpath
|
return self, vpath
|
||||||
|
|
||||||
def can_access(self, vpath: str, uname: str) -> tuple[bool, bool, bool, bool, bool]:
|
def can_access(
|
||||||
"""can Read,Write,Move,Delete,Get"""
|
self, vpath: str, uname: str
|
||||||
vn, _ = self._find(vpath)
|
) -> tuple[bool, bool, bool, bool, bool, bool]:
|
||||||
|
"""can Read,Write,Move,Delete,Get,Upget"""
|
||||||
|
vn, _ = self._find(undot(vpath))
|
||||||
c = vn.axs
|
c = vn.axs
|
||||||
return (
|
return (
|
||||||
uname in c.uread or "*" in c.uread,
|
uname in c.uread or "*" in c.uread,
|
||||||
@@ -394,6 +396,7 @@ class VFS(object):
|
|||||||
uname in c.umove or "*" in c.umove,
|
uname in c.umove or "*" in c.umove,
|
||||||
uname in c.udel or "*" in c.udel,
|
uname in c.udel or "*" in c.udel,
|
||||||
uname in c.uget or "*" in c.uget,
|
uname in c.uget or "*" in c.uget,
|
||||||
|
uname in c.upget or "*" in c.upget,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get(
|
def get(
|
||||||
@@ -405,6 +408,7 @@ class VFS(object):
|
|||||||
will_move: bool = False,
|
will_move: bool = False,
|
||||||
will_del: bool = False,
|
will_del: bool = False,
|
||||||
will_get: bool = False,
|
will_get: bool = False,
|
||||||
|
err: int = 403,
|
||||||
) -> tuple["VFS", str]:
|
) -> tuple["VFS", str]:
|
||||||
"""returns [vfsnode,fs_remainder] if user has the requested permissions"""
|
"""returns [vfsnode,fs_remainder] if user has the requested permissions"""
|
||||||
if ANYWIN:
|
if ANYWIN:
|
||||||
@@ -414,7 +418,7 @@ class VFS(object):
|
|||||||
self.log("vfs", "invalid relpath [{}]".format(vpath))
|
self.log("vfs", "invalid relpath [{}]".format(vpath))
|
||||||
raise Pebkac(404)
|
raise Pebkac(404)
|
||||||
|
|
||||||
vn, rem = self._find(vpath)
|
vn, rem = self._find(undot(vpath))
|
||||||
c: AXS = vn.axs
|
c: AXS = vn.axs
|
||||||
|
|
||||||
for req, d, msg in [
|
for req, d, msg in [
|
||||||
@@ -426,7 +430,7 @@ class VFS(object):
|
|||||||
]:
|
]:
|
||||||
if req and (uname not in d and "*" not in d) and uname != LEELOO_DALLAS:
|
if req and (uname not in d and "*" not in d) and uname != LEELOO_DALLAS:
|
||||||
t = "you don't have {}-access for this location"
|
t = "you don't have {}-access for this location"
|
||||||
raise Pebkac(403, t.format(msg))
|
raise Pebkac(err, t.format(msg))
|
||||||
|
|
||||||
return vn, rem
|
return vn, rem
|
||||||
|
|
||||||
@@ -441,11 +445,20 @@ class VFS(object):
|
|||||||
|
|
||||||
def canonical(self, rem: str, resolve: bool = True) -> str:
|
def canonical(self, rem: str, resolve: bool = True) -> str:
|
||||||
"""returns the canonical path (fully-resolved absolute fs path)"""
|
"""returns the canonical path (fully-resolved absolute fs path)"""
|
||||||
rp = self.realpath
|
ap = self.realpath
|
||||||
if rem:
|
if rem:
|
||||||
rp += "/" + rem
|
ap += "/" + rem
|
||||||
|
|
||||||
return absreal(rp) if resolve else rp
|
return absreal(ap) if resolve else ap
|
||||||
|
|
||||||
|
def dcanonical(self, rem: str) -> str:
|
||||||
|
"""resolves until the final component (filename)"""
|
||||||
|
ap = self.realpath
|
||||||
|
if rem:
|
||||||
|
ap += "/" + rem
|
||||||
|
|
||||||
|
ad, fn = os.path.split(ap)
|
||||||
|
return os.path.join(absreal(ad), fn)
|
||||||
|
|
||||||
def ls(
|
def ls(
|
||||||
self,
|
self,
|
||||||
@@ -562,14 +575,21 @@ class VFS(object):
|
|||||||
yield x
|
yield x
|
||||||
|
|
||||||
def zipgen(
|
def zipgen(
|
||||||
self, vrem: str, flt: set[str], uname: str, dots: bool, scandir: bool
|
self,
|
||||||
|
vrem: str,
|
||||||
|
flt: set[str],
|
||||||
|
uname: str,
|
||||||
|
dots: bool,
|
||||||
|
dirs: bool,
|
||||||
|
scandir: bool,
|
||||||
|
wrap: bool = True,
|
||||||
) -> Generator[dict[str, Any], None, None]:
|
) -> Generator[dict[str, Any], None, None]:
|
||||||
|
|
||||||
# if multiselect: add all items to archive root
|
# if multiselect: add all items to archive root
|
||||||
# if single folder: the folder itself is the top-level item
|
# if single folder: the folder itself is the top-level item
|
||||||
folder = "" if flt else (vrem.split("/")[-1] or "top")
|
folder = "" if flt or not wrap else (vrem.split("/")[-1] or "top")
|
||||||
|
|
||||||
g = self.walk(folder, vrem, [], uname, [[True]], dots, scandir, False)
|
g = self.walk(folder, vrem, [], uname, [[True, False]], dots, scandir, False)
|
||||||
for _, _, vpath, apath, files, rd, vd in g:
|
for _, _, vpath, apath, files, rd, vd in g:
|
||||||
if flt:
|
if flt:
|
||||||
files = [x for x in files if x[0] in flt]
|
files = [x for x in files if x[0] in flt]
|
||||||
@@ -603,6 +623,21 @@ class VFS(object):
|
|||||||
for f in [{"vp": v, "ap": a, "st": n[1]} for v, a, n in ret]:
|
for f in [{"vp": v, "ap": a, "st": n[1]} for v, a, n in ret]:
|
||||||
yield f
|
yield f
|
||||||
|
|
||||||
|
if not dirs:
|
||||||
|
continue
|
||||||
|
|
||||||
|
ts = int(time.time())
|
||||||
|
st = os.stat_result((16877, -1, -1, 1, 1000, 1000, 8, ts, ts, ts))
|
||||||
|
dnames = [n[0] for n in rd]
|
||||||
|
dstats = [n[1] for n in rd]
|
||||||
|
dnames += list(vd.keys())
|
||||||
|
dstats += [st] * len(vd)
|
||||||
|
vpaths = [vpath + "/" + n for n in dnames] if vpath else dnames
|
||||||
|
apaths = [os.path.join(apath, n) for n in dnames]
|
||||||
|
ret2 = list(zip(vpaths, apaths, dstats))
|
||||||
|
for d in [{"vp": v, "ap": a, "st": n} for v, a, n in ret2]:
|
||||||
|
yield d
|
||||||
|
|
||||||
|
|
||||||
if WINDOWS:
|
if WINDOWS:
|
||||||
re_vol = re.compile(r"^([a-zA-Z]:[\\/][^:]*|[^:]*):([^:]*):(.*)$")
|
re_vol = re.compile(r"^([a-zA-Z]:[\\/][^:]*|[^:]*):([^:]*):(.*)$")
|
||||||
@@ -668,7 +703,8 @@ class AuthSrv(object):
|
|||||||
|
|
||||||
def _parse_config_file(
|
def _parse_config_file(
|
||||||
self,
|
self,
|
||||||
fd: typing.BinaryIO,
|
fp: str,
|
||||||
|
cfg_lines: list[str],
|
||||||
acct: dict[str, str],
|
acct: dict[str, str],
|
||||||
daxs: dict[str, AXS],
|
daxs: dict[str, AXS],
|
||||||
mflags: dict[str, dict[str, Any]],
|
mflags: dict[str, dict[str, Any]],
|
||||||
@@ -678,7 +714,8 @@ class AuthSrv(object):
|
|||||||
vol_src = None
|
vol_src = None
|
||||||
vol_dst = None
|
vol_dst = None
|
||||||
self.line_ctr = 0
|
self.line_ctr = 0
|
||||||
for ln in [x.decode("utf-8").strip() for x in fd]:
|
expand_config_file(cfg_lines, fp, "")
|
||||||
|
for ln in cfg_lines:
|
||||||
self.line_ctr += 1
|
self.line_ctr += 1
|
||||||
if not ln and vol_src is not None:
|
if not ln and vol_src is not None:
|
||||||
vol_src = None
|
vol_src = None
|
||||||
@@ -707,6 +744,9 @@ class AuthSrv(object):
|
|||||||
if not vol_dst.startswith("/"):
|
if not vol_dst.startswith("/"):
|
||||||
raise Exception('invalid mountpoint "{}"'.format(vol_dst))
|
raise Exception('invalid mountpoint "{}"'.format(vol_dst))
|
||||||
|
|
||||||
|
if vol_src.startswith("~"):
|
||||||
|
vol_src = os.path.expanduser(vol_src)
|
||||||
|
|
||||||
# cfg files override arguments and previous files
|
# cfg files override arguments and previous files
|
||||||
vol_src = absreal(vol_src)
|
vol_src = absreal(vol_src)
|
||||||
vol_dst = vol_dst.strip("/")
|
vol_dst = vol_dst.strip("/")
|
||||||
@@ -723,12 +763,13 @@ class AuthSrv(object):
|
|||||||
t = "WARNING (config-file): permission flag 'a' is deprecated; please use 'rw' instead"
|
t = "WARNING (config-file): permission flag 'a' is deprecated; please use 'rw' instead"
|
||||||
self.log(t, 1)
|
self.log(t, 1)
|
||||||
|
|
||||||
|
assert vol_dst is not None
|
||||||
self._read_vol_str(lvl, uname, daxs[vol_dst], mflags[vol_dst])
|
self._read_vol_str(lvl, uname, daxs[vol_dst], mflags[vol_dst])
|
||||||
|
|
||||||
def _read_vol_str(
|
def _read_vol_str(
|
||||||
self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any]
|
self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any]
|
||||||
) -> None:
|
) -> None:
|
||||||
if lvl.strip("crwmdg"):
|
if lvl.strip("crwmdgG"):
|
||||||
raise Exception("invalid volflag: {},{}".format(lvl, uname))
|
raise Exception("invalid volflag: {},{}".format(lvl, uname))
|
||||||
|
|
||||||
if lvl == "c":
|
if lvl == "c":
|
||||||
@@ -758,7 +799,9 @@ class AuthSrv(object):
|
|||||||
("m", axs.umove),
|
("m", axs.umove),
|
||||||
("d", axs.udel),
|
("d", axs.udel),
|
||||||
("g", axs.uget),
|
("g", axs.uget),
|
||||||
]:
|
("G", axs.uget),
|
||||||
|
("G", axs.upget),
|
||||||
|
]: # b bb bbb
|
||||||
if ch in lvl:
|
if ch in lvl:
|
||||||
al.add(un)
|
al.add(un)
|
||||||
|
|
||||||
@@ -808,7 +851,7 @@ class AuthSrv(object):
|
|||||||
|
|
||||||
if self.args.v:
|
if self.args.v:
|
||||||
# list of src:dst:permset:permset:...
|
# list of src:dst:permset:permset:...
|
||||||
# permset is <rwmdg>[,username][,username] or <c>,<flag>[=args]
|
# permset is <rwmdgG>[,username][,username] or <c>,<flag>[=args]
|
||||||
for v_str in self.args.v:
|
for v_str in self.args.v:
|
||||||
m = re_vol.match(v_str)
|
m = re_vol.match(v_str)
|
||||||
if not m:
|
if not m:
|
||||||
@@ -829,12 +872,15 @@ class AuthSrv(object):
|
|||||||
|
|
||||||
if self.args.c:
|
if self.args.c:
|
||||||
for cfg_fn in self.args.c:
|
for cfg_fn in self.args.c:
|
||||||
with open(cfg_fn, "rb") as f:
|
lns: list[str] = []
|
||||||
try:
|
try:
|
||||||
self._parse_config_file(f, acct, daxs, mflags, mount)
|
self._parse_config_file(cfg_fn, lns, acct, daxs, mflags, mount)
|
||||||
except:
|
except:
|
||||||
t = "\n\033[1;31m\nerror in config file {} on line {}:\n\033[0m"
|
lns = lns[: self.line_ctr]
|
||||||
self.log(t.format(cfg_fn, self.line_ctr), 1)
|
slns = ["{:4}: {}".format(n, s) for n, s in enumerate(lns, 1)]
|
||||||
|
t = "\033[1;31m\nerror @ line {}, included from {}\033[0m"
|
||||||
|
t = t.format(self.line_ctr, cfg_fn)
|
||||||
|
self.log("\n{0}\n{1}{0}".format(t, "\n".join(slns)))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# case-insensitive; normalize
|
# case-insensitive; normalize
|
||||||
@@ -870,10 +916,11 @@ class AuthSrv(object):
|
|||||||
zv.flags = mflags[dst]
|
zv.flags = mflags[dst]
|
||||||
zv.dbv = None
|
zv.dbv = None
|
||||||
|
|
||||||
|
assert vfs
|
||||||
vfs.all_vols = {}
|
vfs.all_vols = {}
|
||||||
vfs.get_all_vols(vfs.all_vols)
|
vfs.get_all_vols(vfs.all_vols)
|
||||||
|
|
||||||
for perm in "read write move del get".split():
|
for perm in "read write move del get pget".split():
|
||||||
axs_key = "u" + perm
|
axs_key = "u" + perm
|
||||||
unames = ["*"] + list(acct.keys())
|
unames = ["*"] + list(acct.keys())
|
||||||
umap: dict[str, list[str]] = {x: [] for x in unames}
|
umap: dict[str, list[str]] = {x: [] for x in unames}
|
||||||
@@ -888,7 +935,7 @@ class AuthSrv(object):
|
|||||||
all_users = {}
|
all_users = {}
|
||||||
missing_users = {}
|
missing_users = {}
|
||||||
for axs in daxs.values():
|
for axs in daxs.values():
|
||||||
for d in [axs.uread, axs.uwrite, axs.umove, axs.udel, axs.uget]:
|
for d in [axs.uread, axs.uwrite, axs.umove, axs.udel, axs.uget, axs.upget]:
|
||||||
for usr in d:
|
for usr in d:
|
||||||
all_users[usr] = 1
|
all_users[usr] = 1
|
||||||
if usr != "*" and usr not in acct:
|
if usr != "*" and usr not in acct:
|
||||||
@@ -1071,7 +1118,12 @@ class AuthSrv(object):
|
|||||||
if getattr(self.args, k):
|
if getattr(self.args, k):
|
||||||
vol.flags[k] = True
|
vol.flags[k] = True
|
||||||
|
|
||||||
for ga, vf in [["no_forget", "noforget"], ["magic", "magic"]]:
|
for ga, vf in (
|
||||||
|
("no_forget", "noforget"),
|
||||||
|
("no_dupe", "nodupe"),
|
||||||
|
("magic", "magic"),
|
||||||
|
("xlink", "xlink"),
|
||||||
|
):
|
||||||
if getattr(self.args, ga):
|
if getattr(self.args, ga):
|
||||||
vol.flags[vf] = True
|
vol.flags[vf] = True
|
||||||
|
|
||||||
@@ -1079,6 +1131,16 @@ class AuthSrv(object):
|
|||||||
if k1 in vol.flags:
|
if k1 in vol.flags:
|
||||||
vol.flags[k2] = True
|
vol.flags[k2] = True
|
||||||
|
|
||||||
|
for k1, k2 in UNPLICATIONS:
|
||||||
|
if k1 in vol.flags:
|
||||||
|
vol.flags[k2] = False
|
||||||
|
|
||||||
|
dbds = "acid|swal|wal|yolo"
|
||||||
|
vol.flags["dbd"] = dbd = vol.flags.get("dbd") or self.args.dbd
|
||||||
|
if dbd not in dbds.split("|"):
|
||||||
|
t = "invalid dbd [{}]; must be one of [{}]"
|
||||||
|
raise Exception(t.format(dbd, dbds))
|
||||||
|
|
||||||
# default tag cfgs if unset
|
# default tag cfgs if unset
|
||||||
if "mte" not in vol.flags:
|
if "mte" not in vol.flags:
|
||||||
vol.flags["mte"] = self.args.mte
|
vol.flags["mte"] = self.args.mte
|
||||||
@@ -1175,6 +1237,18 @@ class AuthSrv(object):
|
|||||||
self.log(t.format(mtp), 1)
|
self.log(t.format(mtp), 1)
|
||||||
errors = True
|
errors = True
|
||||||
|
|
||||||
|
have_daw = False
|
||||||
|
for vol in vfs.all_vols.values():
|
||||||
|
daw = vol.flags.get("daw") or self.args.daw
|
||||||
|
if daw:
|
||||||
|
vol.flags["daw"] = True
|
||||||
|
have_daw = True
|
||||||
|
|
||||||
|
if have_daw and self.args.no_dav:
|
||||||
|
t = 'volume "/{}" has volflag "daw" (webdav write-access), but --no-dav is set'
|
||||||
|
self.log(t, 1)
|
||||||
|
errors = True
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@@ -1193,6 +1267,7 @@ class AuthSrv(object):
|
|||||||
[" move", "umove"],
|
[" move", "umove"],
|
||||||
["delete", "udel"],
|
["delete", "udel"],
|
||||||
[" get", "uget"],
|
[" get", "uget"],
|
||||||
|
[" upget", "upget"],
|
||||||
]:
|
]:
|
||||||
u = list(sorted(getattr(zv.axs, attr)))
|
u = list(sorted(getattr(zv.axs, attr)))
|
||||||
u = ", ".join("\033[35meverybody\033[0m" if x == "*" else x for x in u)
|
u = ", ".join("\033[35meverybody\033[0m" if x == "*" else x for x in u)
|
||||||
@@ -1288,10 +1363,11 @@ class AuthSrv(object):
|
|||||||
raise Exception("volume not found: " + zs)
|
raise Exception("volume not found: " + zs)
|
||||||
|
|
||||||
self.log(str({"users": users, "vols": vols, "flags": flags}))
|
self.log(str({"users": users, "vols": vols, "flags": flags}))
|
||||||
t = "/{}: read({}) write({}) move({}) del({}) get({})"
|
t = "/{}: read({}) write({}) move({}) del({}) get({}) upget({})"
|
||||||
for k, zv in self.vfs.all_vols.items():
|
for k, zv in self.vfs.all_vols.items():
|
||||||
vc = zv.axs
|
vc = zv.axs
|
||||||
self.log(t.format(k, vc.uread, vc.uwrite, vc.umove, vc.udel, vc.uget))
|
vs = [k, vc.uread, vc.uwrite, vc.umove, vc.udel, vc.uget, vc.upget]
|
||||||
|
self.log(t.format(*vs))
|
||||||
|
|
||||||
flag_v = "v" in flags
|
flag_v = "v" in flags
|
||||||
flag_ln = "ln" in flags
|
flag_ln = "ln" in flags
|
||||||
@@ -1316,7 +1392,7 @@ class AuthSrv(object):
|
|||||||
"",
|
"",
|
||||||
[],
|
[],
|
||||||
u,
|
u,
|
||||||
[[True]],
|
[[True, False]],
|
||||||
True,
|
True,
|
||||||
not self.args.no_scandir,
|
not self.args.no_scandir,
|
||||||
False,
|
False,
|
||||||
@@ -1360,3 +1436,33 @@ class AuthSrv(object):
|
|||||||
|
|
||||||
if not flag_r:
|
if not flag_r:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
def expand_config_file(ret: list[str], fp: str, ipath: str) -> None:
|
||||||
|
"""expand all % file includes"""
|
||||||
|
fp = absreal(fp)
|
||||||
|
ipath += " -> " + fp
|
||||||
|
ret.append("#\033[36m opening cfg file{}\033[0m".format(ipath))
|
||||||
|
if len(ipath.split(" -> ")) > 64:
|
||||||
|
raise Exception("hit max depth of 64 includes")
|
||||||
|
|
||||||
|
if os.path.isdir(fp):
|
||||||
|
for fn in sorted(os.listdir(fp)):
|
||||||
|
fp2 = os.path.join(fp, fn)
|
||||||
|
if not os.path.isfile(fp2):
|
||||||
|
continue # dont recurse
|
||||||
|
|
||||||
|
expand_config_file(ret, fp2, ipath)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(fp, "rb") as f:
|
||||||
|
for ln in [x.decode("utf-8").strip() for x in f]:
|
||||||
|
if ln.startswith("% "):
|
||||||
|
fp2 = ln[1:].strip()
|
||||||
|
fp2 = os.path.join(os.path.dirname(fp), fp2)
|
||||||
|
expand_config_file(ret, fp2, ipath)
|
||||||
|
continue
|
||||||
|
|
||||||
|
ret.append(ln)
|
||||||
|
|
||||||
|
ret.append("#\033[36m closed{}\033[0m".format(ipath))
|
||||||
|
|||||||
@@ -4,14 +4,13 @@ from __future__ import print_function, unicode_literals
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from ..util import SYMTIME, fsdec, fsenc
|
from ..util import SYMTIME, fsdec, fsenc
|
||||||
from . import path
|
from . import path as path
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Optional
|
from typing import Any, Optional
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
_ = (path,)
|
_ = (path,)
|
||||||
|
__all__ = ["path"]
|
||||||
|
|
||||||
# grep -hRiE '(^|[^a-zA-Z_\.-])os\.' . | gsed -r 's/ /\n/g;s/\(/(\n/g' | grep -hRiE '(^|[^a-zA-Z_\.-])os\.' | sort | uniq -c
|
# grep -hRiE '(^|[^a-zA-Z_\.-])os\.' . | gsed -r 's/ /\n/g;s/\(/(\n/g' | grep -hRiE '(^|[^a-zA-Z_\.-])os\.' | sort | uniq -c
|
||||||
# printf 'os\.(%s)' "$(grep ^def bos/__init__.py | gsed -r 's/^def //;s/\(.*//' | tr '\n' '|' | gsed -r 's/.$//')"
|
# printf 'os\.(%s)' "$(grep ^def bos/__init__.py | gsed -r 's/^def //;s/\(.*//' | tr '\n' '|' | gsed -r 's/.$//')"
|
||||||
@@ -25,19 +24,25 @@ def listdir(p: str = ".") -> list[str]:
|
|||||||
return [fsdec(x) for x in os.listdir(fsenc(p))]
|
return [fsdec(x) for x in os.listdir(fsenc(p))]
|
||||||
|
|
||||||
|
|
||||||
def makedirs(name: str, mode: int = 0o755, exist_ok: bool = True) -> None:
|
def makedirs(name: str, mode: int = 0o755, exist_ok: bool = True) -> bool:
|
||||||
bname = fsenc(name)
|
bname = fsenc(name)
|
||||||
try:
|
try:
|
||||||
os.makedirs(bname, mode)
|
os.makedirs(bname, mode)
|
||||||
|
return True
|
||||||
except:
|
except:
|
||||||
if not exist_ok or not os.path.isdir(bname):
|
if not exist_ok or not os.path.isdir(bname):
|
||||||
raise
|
raise
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def mkdir(p: str, mode: int = 0o755) -> None:
|
def mkdir(p: str, mode: int = 0o755) -> None:
|
||||||
return os.mkdir(fsenc(p), mode)
|
return os.mkdir(fsenc(p), mode)
|
||||||
|
|
||||||
|
|
||||||
|
def open(p: str, *a, **ka) -> int:
|
||||||
|
return os.open(fsenc(p), *a, **ka)
|
||||||
|
|
||||||
|
|
||||||
def rename(src: str, dst: str) -> None:
|
def rename(src: str, dst: str) -> None:
|
||||||
return os.rename(fsenc(src), fsenc(dst))
|
return os.rename(fsenc(src), fsenc(dst))
|
||||||
|
|
||||||
|
|||||||
@@ -3,21 +3,20 @@ from __future__ import print_function, unicode_literals
|
|||||||
|
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
import queue
|
import queue
|
||||||
|
|
||||||
from .__init__ import CORES, TYPE_CHECKING
|
from .__init__ import CORES, TYPE_CHECKING
|
||||||
from .broker_mpw import MpWorker
|
from .broker_mpw import MpWorker
|
||||||
from .broker_util import try_exec
|
from .broker_util import try_exec
|
||||||
from .util import mp
|
from .util import Daemon, mp
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any
|
from typing import Any
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MProcess(mp.Process):
|
class MProcess(mp.Process):
|
||||||
@@ -51,13 +50,7 @@ class BrokerMp(object):
|
|||||||
q_yield: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(64)
|
q_yield: queue.Queue[tuple[int, str, list[Any]]] = mp.Queue(64)
|
||||||
|
|
||||||
proc = MProcess(q_pend, q_yield, MpWorker, (q_pend, q_yield, self.args, n))
|
proc = MProcess(q_pend, q_yield, MpWorker, (q_pend, q_yield, self.args, n))
|
||||||
|
Daemon(self.collector, "mp-sink-{}".format(n), (proc,))
|
||||||
thr = threading.Thread(
|
|
||||||
target=self.collector, args=(proc,), name="mp-sink-{}".format(n)
|
|
||||||
)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
self.procs.append(proc)
|
self.procs.append(proc)
|
||||||
proc.start()
|
proc.start()
|
||||||
|
|
||||||
@@ -101,12 +94,15 @@ class BrokerMp(object):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# new ipc invoking managed service in hub
|
# new ipc invoking managed service in hub
|
||||||
|
try:
|
||||||
obj = self.hub
|
obj = self.hub
|
||||||
for node in dest.split("."):
|
for node in dest.split("."):
|
||||||
obj = getattr(obj, node)
|
obj = getattr(obj, node)
|
||||||
|
|
||||||
# TODO will deadlock if dest performs another ipc
|
# TODO will deadlock if dest performs another ipc
|
||||||
rv = try_exec(retq_id, obj, *args)
|
rv = try_exec(retq_id, obj, *args)
|
||||||
|
except:
|
||||||
|
rv = ["exception", "stack", traceback.format_exc()]
|
||||||
|
|
||||||
if retq_id:
|
if retq_id:
|
||||||
proc.q_pend.put((retq_id, "retq", rv))
|
proc.q_pend.put((retq_id, "retq", rv))
|
||||||
@@ -121,6 +117,10 @@ class BrokerMp(object):
|
|||||||
for p in self.procs:
|
for p in self.procs:
|
||||||
p.q_pend.put((0, dest, [args[0], len(self.procs)]))
|
p.q_pend.put((0, dest, [args[0], len(self.procs)]))
|
||||||
|
|
||||||
|
elif dest == "set_netdevs":
|
||||||
|
for p in self.procs:
|
||||||
|
p.q_pend.put((0, dest, list(args)))
|
||||||
|
|
||||||
elif dest == "cb_httpsrv_up":
|
elif dest == "cb_httpsrv_up":
|
||||||
self.hub.cb_httpsrv_up()
|
self.hub.cb_httpsrv_up()
|
||||||
|
|
||||||
|
|||||||
@@ -9,17 +9,16 @@ import threading
|
|||||||
|
|
||||||
import queue
|
import queue
|
||||||
|
|
||||||
|
from .__init__ import ANYWIN
|
||||||
from .authsrv import AuthSrv
|
from .authsrv import AuthSrv
|
||||||
from .broker_util import BrokerCli, ExceptionalQueue
|
from .broker_util import BrokerCli, ExceptionalQueue
|
||||||
from .httpsrv import HttpSrv
|
from .httpsrv import HttpSrv
|
||||||
from .util import FAKE_MP, HMaccas
|
from .util import FAKE_MP, Daemon, HMaccas
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
|
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MpWorker(BrokerCli):
|
class MpWorker(BrokerCli):
|
||||||
@@ -48,7 +47,11 @@ class MpWorker(BrokerCli):
|
|||||||
# we inherited signal_handler from parent,
|
# we inherited signal_handler from parent,
|
||||||
# replace it with something harmless
|
# replace it with something harmless
|
||||||
if not FAKE_MP:
|
if not FAKE_MP:
|
||||||
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGUSR1]:
|
sigs = [signal.SIGINT, signal.SIGTERM]
|
||||||
|
if not ANYWIN:
|
||||||
|
sigs.append(signal.SIGUSR1)
|
||||||
|
|
||||||
|
for sig in sigs:
|
||||||
signal.signal(sig, self.signal_handler)
|
signal.signal(sig, self.signal_handler)
|
||||||
|
|
||||||
# starting to look like a good idea
|
# starting to look like a good idea
|
||||||
@@ -60,10 +63,7 @@ class MpWorker(BrokerCli):
|
|||||||
|
|
||||||
# on winxp and some other platforms,
|
# on winxp and some other platforms,
|
||||||
# use thr.join() to block all signals
|
# use thr.join() to block all signals
|
||||||
thr = threading.Thread(target=self.main, name="mpw-main")
|
Daemon(self.main, "mpw-main").join()
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
thr.join()
|
|
||||||
|
|
||||||
def signal_handler(self, sig: Optional[int], frame: Optional[FrameType]) -> None:
|
def signal_handler(self, sig: Optional[int], frame: Optional[FrameType]) -> None:
|
||||||
# print('k')
|
# print('k')
|
||||||
@@ -97,6 +97,9 @@ class MpWorker(BrokerCli):
|
|||||||
elif dest == "listen":
|
elif dest == "listen":
|
||||||
self.httpsrv.listen(args[0], args[1])
|
self.httpsrv.listen(args[0], args[1])
|
||||||
|
|
||||||
|
elif dest == "set_netdevs":
|
||||||
|
self.httpsrv.set_netdevs(args[0])
|
||||||
|
|
||||||
elif dest == "retq":
|
elif dest == "retq":
|
||||||
# response from previous ipc call
|
# response from previous ipc call
|
||||||
with self.retpend_mutex:
|
with self.retpend_mutex:
|
||||||
|
|||||||
@@ -12,10 +12,8 @@ from .util import HMaccas
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any
|
from typing import Any
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BrokerThr(BrokerCli):
|
class BrokerThr(BrokerCli):
|
||||||
@@ -63,6 +61,10 @@ class BrokerThr(BrokerCli):
|
|||||||
self.httpsrv.listen(args[0], 1)
|
self.httpsrv.listen(args[0], 1)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if dest == "set_netdevs":
|
||||||
|
self.httpsrv.set_netdevs(args[0])
|
||||||
|
return
|
||||||
|
|
||||||
# new ipc invoking managed service in hub
|
# new ipc invoking managed service in hub
|
||||||
obj = self.hub
|
obj = self.hub
|
||||||
for node in dest.split("."):
|
for node in dest.split("."):
|
||||||
|
|||||||
@@ -10,12 +10,10 @@ from .__init__ import TYPE_CHECKING
|
|||||||
from .authsrv import AuthSrv
|
from .authsrv import AuthSrv
|
||||||
from .util import HMaccas, Pebkac
|
from .util import HMaccas, Pebkac
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
from .util import RootLogger
|
from .util import RootLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .httpsrv import HttpSrv
|
from .httpsrv import HttpSrv
|
||||||
@@ -41,12 +39,14 @@ class BrokerCli(object):
|
|||||||
for example resolving httpconn.* in httpcli -- see lines tagged #mypy404
|
for example resolving httpconn.* in httpcli -- see lines tagged #mypy404
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
log: "RootLogger"
|
||||||
|
args: argparse.Namespace
|
||||||
|
asrv: AuthSrv
|
||||||
|
httpsrv: "HttpSrv"
|
||||||
|
iphash: HMaccas
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.log: "RootLogger" = None
|
pass
|
||||||
self.args: argparse.Namespace = None
|
|
||||||
self.asrv: AuthSrv = None
|
|
||||||
self.httpsrv: "HttpSrv" = None
|
|
||||||
self.iphash: HMaccas = None
|
|
||||||
|
|
||||||
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
|
def ask(self, dest: str, *args: Any) -> ExceptionalQueue:
|
||||||
return ExceptionalQueue(1)
|
return ExceptionalQueue(1)
|
||||||
|
|||||||
72
copyparty/dxml.py
Normal file
72
copyparty/dxml.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import importlib
|
||||||
|
import sys
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
from .__init__ import PY2
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
def get_ET() -> ET.XMLParser:
|
||||||
|
pn = "xml.etree.ElementTree"
|
||||||
|
cn = "_elementtree"
|
||||||
|
|
||||||
|
cmod = sys.modules.pop(cn, None)
|
||||||
|
if not cmod:
|
||||||
|
return ET.XMLParser # type: ignore
|
||||||
|
|
||||||
|
pmod = sys.modules.pop(pn)
|
||||||
|
sys.modules[cn] = None # type: ignore
|
||||||
|
|
||||||
|
ret = importlib.import_module(pn)
|
||||||
|
for name, mod in ((pn, pmod), (cn, cmod)):
|
||||||
|
if mod:
|
||||||
|
sys.modules[name] = mod
|
||||||
|
else:
|
||||||
|
sys.modules.pop(name, None)
|
||||||
|
|
||||||
|
sys.modules["xml.etree"].ElementTree = pmod # type: ignore
|
||||||
|
ret.ParseError = ET.ParseError # type: ignore
|
||||||
|
return ret.XMLParser # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
XMLParser: ET.XMLParser = get_ET()
|
||||||
|
|
||||||
|
|
||||||
|
class DXMLParser(XMLParser): # type: ignore
|
||||||
|
def __init__(self) -> None:
|
||||||
|
tb = ET.TreeBuilder()
|
||||||
|
super(DXMLParser, self).__init__(target=tb)
|
||||||
|
|
||||||
|
p = self._parser if PY2 else self.parser
|
||||||
|
p.StartDoctypeDeclHandler = self.nope
|
||||||
|
p.EntityDeclHandler = self.nope
|
||||||
|
p.UnparsedEntityDeclHandler = self.nope
|
||||||
|
p.ExternalEntityRefHandler = self.nope
|
||||||
|
|
||||||
|
def nope(self, *a: Any, **ka: Any) -> None:
|
||||||
|
raise BadXML("{}, {}".format(a, ka))
|
||||||
|
|
||||||
|
|
||||||
|
class BadXML(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def parse_xml(txt: str) -> ET.Element:
|
||||||
|
parser = DXMLParser()
|
||||||
|
parser.feed(txt)
|
||||||
|
return parser.close() # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def mktnod(name: str, text: str) -> ET.Element:
|
||||||
|
el = ET.Element(name)
|
||||||
|
el.text = text
|
||||||
|
return el
|
||||||
|
|
||||||
|
|
||||||
|
def mkenod(name: str, sub_el: Optional[ET.Element] = None) -> ET.Element:
|
||||||
|
el = ET.Element(name)
|
||||||
|
if sub_el is not None:
|
||||||
|
el.append(sub_el)
|
||||||
|
return el
|
||||||
@@ -10,12 +10,10 @@ from .authsrv import AXS, VFS
|
|||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .util import chkcmd, min_ex
|
from .util import chkcmd, min_ex
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
from .util import RootLogger
|
from .util import RootLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Fstab(object):
|
class Fstab(object):
|
||||||
@@ -28,7 +26,7 @@ class Fstab(object):
|
|||||||
self.age = 0.0
|
self.age = 0.0
|
||||||
|
|
||||||
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
self.log_func("fstab", msg + "\033[K", c)
|
self.log_func("fstab", msg, c)
|
||||||
|
|
||||||
def get(self, path: str) -> str:
|
def get(self, path: str) -> str:
|
||||||
if len(self.cache) > 9000:
|
if len(self.cache) > 9000:
|
||||||
|
|||||||
@@ -6,18 +6,16 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import sys
|
import sys
|
||||||
import threading
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from pyftpdlib.authorizers import AuthenticationFailed, DummyAuthorizer
|
from pyftpdlib.authorizers import AuthenticationFailed, DummyAuthorizer
|
||||||
from pyftpdlib.filesystems import AbstractedFS, FilesystemError
|
from pyftpdlib.filesystems import AbstractedFS, FilesystemError
|
||||||
from pyftpdlib.handlers import FTPHandler
|
from pyftpdlib.handlers import FTPHandler
|
||||||
from pyftpdlib.log import config_logging
|
|
||||||
from pyftpdlib.servers import FTPServer
|
from pyftpdlib.servers import FTPServer
|
||||||
|
|
||||||
from .__init__ import PY2, TYPE_CHECKING, E
|
from .__init__ import PY2, TYPE_CHECKING, E
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .util import Pebkac, exclude_dotfiles, fsenc
|
from .util import Daemon, Pebkac, exclude_dotfiles, fsenc, ipnorm
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from pyftpdlib.ioloop import IOLoop
|
from pyftpdlib.ioloop import IOLoop
|
||||||
@@ -31,11 +29,9 @@ except ImportError:
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
import typing
|
import typing
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FtpAuth(DummyAuthorizer):
|
class FtpAuth(DummyAuthorizer):
|
||||||
@@ -46,21 +42,40 @@ class FtpAuth(DummyAuthorizer):
|
|||||||
def validate_authentication(
|
def validate_authentication(
|
||||||
self, username: str, password: str, handler: Any
|
self, username: str, password: str, handler: Any
|
||||||
) -> None:
|
) -> None:
|
||||||
|
handler.username = "{}:{}".format(username, password)
|
||||||
|
|
||||||
|
ip = handler.addr[0]
|
||||||
|
if ip.startswith("::ffff:"):
|
||||||
|
ip = ip[7:]
|
||||||
|
|
||||||
|
ip = ipnorm(ip)
|
||||||
|
bans = self.hub.bans
|
||||||
|
if ip in bans:
|
||||||
|
rt = bans[ip] - time.time()
|
||||||
|
if rt < 0:
|
||||||
|
logging.info("client unbanned")
|
||||||
|
del bans[ip]
|
||||||
|
else:
|
||||||
|
raise AuthenticationFailed("banned")
|
||||||
|
|
||||||
asrv = self.hub.asrv
|
asrv = self.hub.asrv
|
||||||
if username == "anonymous":
|
if username == "anonymous":
|
||||||
password = ""
|
|
||||||
|
|
||||||
uname = "*"
|
uname = "*"
|
||||||
if password:
|
else:
|
||||||
uname = asrv.iacct.get(password, "")
|
uname = asrv.iacct.get(password, "") or asrv.iacct.get(username, "") or "*"
|
||||||
|
|
||||||
|
if not uname or not (asrv.vfs.aread.get(uname) or asrv.vfs.awrite.get(uname)):
|
||||||
|
g = self.hub.gpwd
|
||||||
|
if g.lim:
|
||||||
|
bonk, ip = g.bonk(ip, handler.username)
|
||||||
|
if bonk:
|
||||||
|
logging.warning("client banned: invalid passwords")
|
||||||
|
bans[ip] = bonk
|
||||||
|
|
||||||
|
raise AuthenticationFailed("Authentication failed.")
|
||||||
|
|
||||||
handler.username = uname
|
handler.username = uname
|
||||||
|
|
||||||
if (password and not uname) or not (
|
|
||||||
asrv.vfs.aread.get(uname) or asrv.vfs.awrite.get(uname)
|
|
||||||
):
|
|
||||||
raise AuthenticationFailed("Authentication failed.")
|
|
||||||
|
|
||||||
def get_home_dir(self, username: str) -> str:
|
def get_home_dir(self, username: str) -> str:
|
||||||
return "/"
|
return "/"
|
||||||
|
|
||||||
@@ -94,6 +109,9 @@ class FtpFs(AbstractedFS):
|
|||||||
self.cwd = "/" # pyftpdlib convention of leading slash
|
self.cwd = "/" # pyftpdlib convention of leading slash
|
||||||
self.root = "/var/lib/empty"
|
self.root = "/var/lib/empty"
|
||||||
|
|
||||||
|
self.can_read = self.can_write = self.can_move = False
|
||||||
|
self.can_delete = self.can_get = self.can_upget = False
|
||||||
|
|
||||||
self.listdirinfo = self.listdir
|
self.listdirinfo = self.listdir
|
||||||
self.chdir(".")
|
self.chdir(".")
|
||||||
|
|
||||||
@@ -145,16 +163,36 @@ class FtpFs(AbstractedFS):
|
|||||||
w = "w" in mode or "a" in mode or "+" in mode
|
w = "w" in mode or "a" in mode or "+" in mode
|
||||||
|
|
||||||
ap = self.rv2a(filename, r, w)
|
ap = self.rv2a(filename, r, w)
|
||||||
if w and bos.path.exists(ap):
|
if w:
|
||||||
|
try:
|
||||||
|
st = bos.stat(ap)
|
||||||
|
td = time.time() - st.st_mtime
|
||||||
|
except:
|
||||||
|
td = 0
|
||||||
|
|
||||||
|
if td < -1 or td > self.args.ftp_wt:
|
||||||
raise FilesystemError("cannot open existing file for writing")
|
raise FilesystemError("cannot open existing file for writing")
|
||||||
|
|
||||||
self.validpath(ap)
|
self.validpath(ap)
|
||||||
return open(fsenc(ap), mode)
|
return open(fsenc(ap), mode)
|
||||||
|
|
||||||
def chdir(self, path: str) -> None:
|
def chdir(self, path: str) -> None:
|
||||||
self.cwd = join(self.cwd, path)
|
nwd = join(self.cwd, path)
|
||||||
x = self.hub.asrv.vfs.can_access(self.cwd.lstrip("/"), self.h.username)
|
vfs, rem = self.hub.asrv.vfs.get(nwd, self.uname, False, False)
|
||||||
self.can_read, self.can_write, self.can_move, self.can_delete, self.can_get = x
|
ap = vfs.canonical(rem)
|
||||||
|
if not bos.path.isdir(ap):
|
||||||
|
# returning 550 is library-default and suitable
|
||||||
|
raise FilesystemError("Failed to change directory")
|
||||||
|
|
||||||
|
self.cwd = nwd
|
||||||
|
(
|
||||||
|
self.can_read,
|
||||||
|
self.can_write,
|
||||||
|
self.can_move,
|
||||||
|
self.can_delete,
|
||||||
|
self.can_get,
|
||||||
|
self.can_upget,
|
||||||
|
) = self.hub.asrv.vfs.can_access(self.cwd.lstrip("/"), self.h.username)
|
||||||
|
|
||||||
def mkdir(self, path: str) -> None:
|
def mkdir(self, path: str) -> None:
|
||||||
ap = self.rv2a(path, w=True)
|
ap = self.rv2a(path, w=True)
|
||||||
@@ -166,7 +204,10 @@ class FtpFs(AbstractedFS):
|
|||||||
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, True, False)
|
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, True, False)
|
||||||
|
|
||||||
fsroot, vfs_ls1, vfs_virt = vfs.ls(
|
fsroot, vfs_ls1, vfs_virt = vfs.ls(
|
||||||
rem, self.uname, not self.args.no_scandir, [[True], [False, True]]
|
rem,
|
||||||
|
self.uname,
|
||||||
|
not self.args.no_scandir,
|
||||||
|
[[True, False], [False, True]],
|
||||||
)
|
)
|
||||||
vfs_ls = [x[0] for x in vfs_ls1]
|
vfs_ls = [x[0] for x in vfs_ls1]
|
||||||
vfs_ls.extend(vfs_virt.keys())
|
vfs_ls.extend(vfs_virt.keys())
|
||||||
@@ -195,7 +236,7 @@ class FtpFs(AbstractedFS):
|
|||||||
|
|
||||||
vp = join(self.cwd, path).lstrip("/")
|
vp = join(self.cwd, path).lstrip("/")
|
||||||
try:
|
try:
|
||||||
self.hub.up2k.handle_rm(self.uname, self.h.remote_ip, [vp])
|
self.hub.up2k.handle_rm(self.uname, self.h.remote_ip, [vp], [])
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise FilesystemError(str(ex))
|
raise FilesystemError(str(ex))
|
||||||
|
|
||||||
@@ -235,11 +276,14 @@ class FtpFs(AbstractedFS):
|
|||||||
|
|
||||||
def lstat(self, path: str) -> os.stat_result:
|
def lstat(self, path: str) -> os.stat_result:
|
||||||
ap = self.rv2a(path)
|
ap = self.rv2a(path)
|
||||||
return bos.lstat(ap)
|
return bos.stat(ap)
|
||||||
|
|
||||||
def isfile(self, path: str) -> bool:
|
def isfile(self, path: str) -> bool:
|
||||||
|
try:
|
||||||
st = self.stat(path)
|
st = self.stat(path)
|
||||||
return stat.S_ISREG(st.st_mode)
|
return stat.S_ISREG(st.st_mode)
|
||||||
|
except:
|
||||||
|
return False # expected for mojibake in ftp_SIZE()
|
||||||
|
|
||||||
def islink(self, path: str) -> bool:
|
def islink(self, path: str) -> bool:
|
||||||
ap = self.rv2a(path)
|
ap = self.rv2a(path)
|
||||||
@@ -276,8 +320,8 @@ class FtpFs(AbstractedFS):
|
|||||||
|
|
||||||
class FtpHandler(FTPHandler):
|
class FtpHandler(FTPHandler):
|
||||||
abstracted_fs = FtpFs
|
abstracted_fs = FtpFs
|
||||||
hub: "SvcHub" = None
|
hub: "SvcHub"
|
||||||
args: argparse.Namespace = None
|
args: argparse.Namespace
|
||||||
|
|
||||||
def __init__(self, conn: Any, server: Any, ioloop: Any = None) -> None:
|
def __init__(self, conn: Any, server: Any, ioloop: Any = None) -> None:
|
||||||
self.hub: "SvcHub" = FtpHandler.hub
|
self.hub: "SvcHub" = FtpHandler.hub
|
||||||
@@ -291,6 +335,9 @@ class FtpHandler(FTPHandler):
|
|||||||
# abspath->vpath mapping to resolve log_transfer paths
|
# abspath->vpath mapping to resolve log_transfer paths
|
||||||
self.vfs_map: dict[str, str] = {}
|
self.vfs_map: dict[str, str] = {}
|
||||||
|
|
||||||
|
# reduce non-debug logging
|
||||||
|
self.log_cmds_list = [x for x in self.log_cmds_list if x not in ("CWD", "XCWD")]
|
||||||
|
|
||||||
def ftp_STOR(self, file: str, mode: str = "w") -> Any:
|
def ftp_STOR(self, file: str, mode: str = "w") -> Any:
|
||||||
# Optional[str]
|
# Optional[str]
|
||||||
vp = join(self.fs.cwd, file).lstrip("/")
|
vp = join(self.fs.cwd, file).lstrip("/")
|
||||||
@@ -385,17 +432,15 @@ class Ftpd(object):
|
|||||||
if self.args.ftp_nat:
|
if self.args.ftp_nat:
|
||||||
h2.masquerade_address = self.args.ftp_nat
|
h2.masquerade_address = self.args.ftp_nat
|
||||||
|
|
||||||
if self.args.ftp_dbg:
|
lgr = logging.getLogger("pyftpdlib")
|
||||||
config_logging(level=logging.DEBUG)
|
lgr.setLevel(logging.DEBUG if self.args.ftpv else logging.INFO)
|
||||||
|
|
||||||
ioloop = IOLoop()
|
ioloop = IOLoop()
|
||||||
for ip in self.args.i:
|
for ip in self.args.i:
|
||||||
for h, lp in hs:
|
for h, lp in hs:
|
||||||
FTPServer((ip, int(lp)), h, ioloop)
|
FTPServer((ip, int(lp)), h, ioloop)
|
||||||
|
|
||||||
thr = threading.Thread(target=ioloop.loop, name="ftp")
|
Daemon(ioloop.loop, "ftp")
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
|
|
||||||
def join(p1: str, p2: str) -> str:
|
def join(p1: str, p2: str) -> str:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -25,15 +25,16 @@ from .th_srv import HAVE_PIL, HAVE_VIPS
|
|||||||
from .u2idx import U2idx
|
from .u2idx import U2idx
|
||||||
from .util import HMaccas, shut_socket
|
from .util import HMaccas, shut_socket
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Optional, Pattern, Union
|
from typing import Optional, Pattern, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .httpsrv import HttpSrv
|
from .httpsrv import HttpSrv
|
||||||
|
|
||||||
|
|
||||||
|
PTN_HTTP = re.compile(br"[A-Z]{3}[A-Z ]")
|
||||||
|
|
||||||
|
|
||||||
class HttpConn(object):
|
class HttpConn(object):
|
||||||
"""
|
"""
|
||||||
spawned by HttpSrv to handle an incoming client connection,
|
spawned by HttpSrv to handle an incoming client connection,
|
||||||
@@ -45,6 +46,7 @@ class HttpConn(object):
|
|||||||
) -> None:
|
) -> None:
|
||||||
self.s = sck
|
self.s = sck
|
||||||
self.sr: Optional[Util._Unrecv] = None
|
self.sr: Optional[Util._Unrecv] = None
|
||||||
|
self.cli: Optional[HttpCli] = None
|
||||||
self.addr = addr
|
self.addr = addr
|
||||||
self.hsrv = hsrv
|
self.hsrv = hsrv
|
||||||
|
|
||||||
@@ -55,6 +57,8 @@ class HttpConn(object):
|
|||||||
self.cert_path = hsrv.cert_path
|
self.cert_path = hsrv.cert_path
|
||||||
self.u2fh: Util.FHC = hsrv.u2fh # mypy404
|
self.u2fh: Util.FHC = hsrv.u2fh # mypy404
|
||||||
self.iphash: HMaccas = hsrv.broker.iphash
|
self.iphash: HMaccas = hsrv.broker.iphash
|
||||||
|
self.bans: dict[str, int] = hsrv.bans
|
||||||
|
self.aclose: dict[str, int] = hsrv.aclose
|
||||||
|
|
||||||
enth = (HAVE_PIL or HAVE_VIPS or HAVE_FFMPEG) and not self.args.no_thumb
|
enth = (HAVE_PIL or HAVE_VIPS or HAVE_FFMPEG) and not self.args.no_thumb
|
||||||
self.thumbcli: Optional[ThumbCli] = ThumbCli(hsrv) if enth else None # mypy404
|
self.thumbcli: Optional[ThumbCli] = ThumbCli(hsrv) if enth else None # mypy404
|
||||||
@@ -62,7 +66,7 @@ class HttpConn(object):
|
|||||||
|
|
||||||
self.t0: float = time.time() # mypy404
|
self.t0: float = time.time() # mypy404
|
||||||
self.stopping = False
|
self.stopping = False
|
||||||
self.nreq: int = 0 # mypy404
|
self.nreq: int = -1 # mypy404
|
||||||
self.nbyte: int = 0 # mypy404
|
self.nbyte: int = 0 # mypy404
|
||||||
self.u2idx: Optional[U2idx] = None
|
self.u2idx: Optional[U2idx] = None
|
||||||
self.log_func: "Util.RootLogger" = hsrv.log # mypy404
|
self.log_func: "Util.RootLogger" = hsrv.log # mypy404
|
||||||
@@ -134,9 +138,11 @@ class HttpConn(object):
|
|||||||
self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8"))
|
self.s.send(b"HTTP/1.1 400 Bad Request\r\n\r\n" + err.encode("utf-8"))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return method not in [None, b"GET ", b"HEAD", b"POST", b"PUT ", b"OPTI"]
|
return not method or not bool(PTN_HTTP.match(method))
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
|
self.s.settimeout(10)
|
||||||
|
|
||||||
self.sr = None
|
self.sr = None
|
||||||
if self.args.https_only:
|
if self.args.https_only:
|
||||||
is_https = True
|
is_https = True
|
||||||
@@ -205,6 +211,6 @@ class HttpConn(object):
|
|||||||
|
|
||||||
while not self.stopping:
|
while not self.stopping:
|
||||||
self.nreq += 1
|
self.nreq += 1
|
||||||
cli = HttpCli(self)
|
self.cli = HttpCli(self)
|
||||||
if not cli.run():
|
if not self.cli.run():
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -28,13 +28,18 @@ except ImportError:
|
|||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
from .__init__ import MACOS, TYPE_CHECKING, EnvParams
|
from .__init__ import ANYWIN, MACOS, TYPE_CHECKING, EnvParams
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .httpconn import HttpConn
|
from .httpconn import HttpConn
|
||||||
from .util import (
|
from .util import (
|
||||||
|
E_SCK,
|
||||||
FHC,
|
FHC,
|
||||||
|
Daemon,
|
||||||
Garda,
|
Garda,
|
||||||
Magician,
|
Magician,
|
||||||
|
Netdev,
|
||||||
|
NetMap,
|
||||||
|
ipnorm,
|
||||||
min_ex,
|
min_ex,
|
||||||
shut_socket,
|
shut_socket,
|
||||||
spack,
|
spack,
|
||||||
@@ -44,11 +49,10 @@ from .util import (
|
|||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .broker_util import BrokerCli
|
from .broker_util import BrokerCli
|
||||||
|
from .ssdp import SSDPr
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class HttpSrv(object):
|
class HttpSrv(object):
|
||||||
@@ -70,10 +74,15 @@ class HttpSrv(object):
|
|||||||
|
|
||||||
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
|
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
|
||||||
self.magician = Magician()
|
self.magician = Magician()
|
||||||
self.bans: dict[str, int] = {}
|
self.nm = NetMap([], {})
|
||||||
|
self.ssdp: Optional["SSDPr"] = None
|
||||||
self.gpwd = Garda(self.args.ban_pw)
|
self.gpwd = Garda(self.args.ban_pw)
|
||||||
self.g404 = Garda(self.args.ban_404)
|
self.g404 = Garda(self.args.ban_404)
|
||||||
|
self.bans: dict[str, int] = {}
|
||||||
|
self.aclose: dict[str, int] = {}
|
||||||
|
|
||||||
|
self.ip = ""
|
||||||
|
self.port = 0
|
||||||
self.name = "hsrv" + nsuf
|
self.name = "hsrv" + nsuf
|
||||||
self.mutex = threading.Lock()
|
self.mutex = threading.Lock()
|
||||||
self.stopping = False
|
self.stopping = False
|
||||||
@@ -96,13 +105,16 @@ class HttpSrv(object):
|
|||||||
|
|
||||||
env = jinja2.Environment()
|
env = jinja2.Environment()
|
||||||
env.loader = jinja2.FileSystemLoader(os.path.join(self.E.mod, "web"))
|
env.loader = jinja2.FileSystemLoader(os.path.join(self.E.mod, "web"))
|
||||||
self.j2 = {
|
jn = ["splash", "svcs", "browser", "browser2", "msg", "md", "mde", "cf"]
|
||||||
x: env.get_template(x + ".html")
|
self.j2 = {x: env.get_template(x + ".html") for x in jn}
|
||||||
for x in ["splash", "browser", "browser2", "msg", "md", "mde", "cf"]
|
|
||||||
}
|
|
||||||
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
|
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
|
||||||
self.prism = os.path.exists(zs)
|
self.prism = os.path.exists(zs)
|
||||||
|
|
||||||
|
if self.args.zs:
|
||||||
|
from .ssdp import SSDPr
|
||||||
|
|
||||||
|
self.ssdp = SSDPr(broker)
|
||||||
|
|
||||||
cert_path = os.path.join(self.E.cfg, "cert.pem")
|
cert_path = os.path.join(self.E.cfg, "cert.pem")
|
||||||
if bos.path.exists(cert_path):
|
if bos.path.exists(cert_path):
|
||||||
self.cert_path = cert_path
|
self.cert_path = cert_path
|
||||||
@@ -120,9 +132,7 @@ class HttpSrv(object):
|
|||||||
start_log_thrs(self.log, self.args.log_thrs, nid)
|
start_log_thrs(self.log, self.args.log_thrs, nid)
|
||||||
|
|
||||||
self.th_cfg: dict[str, Any] = {}
|
self.th_cfg: dict[str, Any] = {}
|
||||||
t = threading.Thread(target=self.post_init, name="hsrv-init2")
|
Daemon(self.post_init, "hsrv-init2")
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def post_init(self) -> None:
|
def post_init(self) -> None:
|
||||||
try:
|
try:
|
||||||
@@ -131,18 +141,16 @@ class HttpSrv(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
|
||||||
|
self.nm = NetMap([self.ip], netdevs)
|
||||||
|
|
||||||
def start_threads(self, n: int) -> None:
|
def start_threads(self, n: int) -> None:
|
||||||
self.tp_nthr += n
|
self.tp_nthr += n
|
||||||
if self.args.log_htp:
|
if self.args.log_htp:
|
||||||
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
|
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
|
||||||
|
|
||||||
for _ in range(n):
|
for _ in range(n):
|
||||||
thr = threading.Thread(
|
Daemon(self.thr_poolw, self.name + "-poolw")
|
||||||
target=self.thr_poolw,
|
|
||||||
name=self.name + "-poolw",
|
|
||||||
)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
def stop_threads(self, n: int) -> None:
|
def stop_threads(self, n: int) -> None:
|
||||||
self.tp_nthr -= n
|
self.tp_nthr -= n
|
||||||
@@ -170,26 +178,27 @@ class HttpSrv(object):
|
|||||||
def listen(self, sck: socket.socket, nlisteners: int) -> None:
|
def listen(self, sck: socket.socket, nlisteners: int) -> None:
|
||||||
if self.args.j != 1:
|
if self.args.j != 1:
|
||||||
# lost in the pickle; redefine
|
# lost in the pickle; redefine
|
||||||
|
if not ANYWIN or self.args.reuseaddr:
|
||||||
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
|
||||||
sck.settimeout(None) # < does not inherit, ^ does
|
|
||||||
|
|
||||||
ip, port = sck.getsockname()
|
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
sck.settimeout(None) # < does not inherit, ^ opts above do
|
||||||
|
|
||||||
|
self.ip, self.port = sck.getsockname()[:2]
|
||||||
self.srvs.append(sck)
|
self.srvs.append(sck)
|
||||||
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
|
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
|
||||||
t = threading.Thread(
|
Daemon(
|
||||||
target=self.thr_listen,
|
self.thr_listen,
|
||||||
args=(sck,),
|
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", self.ip, self.port),
|
||||||
name="httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
|
(sck,),
|
||||||
)
|
)
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def thr_listen(self, srv_sck: socket.socket) -> None:
|
def thr_listen(self, srv_sck: socket.socket) -> None:
|
||||||
"""listens on a shared tcp server"""
|
"""listens on a shared tcp server"""
|
||||||
ip, port = srv_sck.getsockname()
|
ip, port = srv_sck.getsockname()[:2]
|
||||||
fno = srv_sck.fileno()
|
fno = srv_sck.fileno()
|
||||||
msg = "subscribed @ {}:{} f{} p{}".format(ip, port, fno, os.getpid())
|
hip = "[{}]".format(ip) if ":" in ip else ip
|
||||||
|
msg = "subscribed @ {}:{} f{} p{}".format(hip, port, fno, os.getpid())
|
||||||
self.log(self.name, msg)
|
self.log(self.name, msg)
|
||||||
|
|
||||||
def fun() -> None:
|
def fun() -> None:
|
||||||
@@ -199,19 +208,80 @@ class HttpSrv(object):
|
|||||||
|
|
||||||
while not self.stopping:
|
while not self.stopping:
|
||||||
if self.args.log_conn:
|
if self.args.log_conn:
|
||||||
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="1;30")
|
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="90")
|
||||||
|
|
||||||
if self.ncli >= self.nclimax:
|
spins = 0
|
||||||
self.log(self.name, "at connection limit; waiting", 3)
|
|
||||||
while self.ncli >= self.nclimax:
|
while self.ncli >= self.nclimax:
|
||||||
|
if not spins:
|
||||||
|
self.log(self.name, "at connection limit; waiting", 3)
|
||||||
|
|
||||||
|
spins += 1
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
if spins != 50 or not self.args.aclose:
|
||||||
|
continue
|
||||||
|
|
||||||
|
ipfreq: dict[str, int] = {}
|
||||||
|
with self.mutex:
|
||||||
|
for c in self.clients:
|
||||||
|
ip = ipnorm(c.ip)
|
||||||
|
try:
|
||||||
|
ipfreq[ip] += 1
|
||||||
|
except:
|
||||||
|
ipfreq[ip] = 1
|
||||||
|
|
||||||
|
ip, n = sorted(ipfreq.items(), key=lambda x: x[1], reverse=True)[0]
|
||||||
|
if n < self.nclimax / 2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.aclose[ip] = int(time.time() + self.args.aclose * 60)
|
||||||
|
nclose = 0
|
||||||
|
nloris = 0
|
||||||
|
nconn = 0
|
||||||
|
with self.mutex:
|
||||||
|
for c in self.clients:
|
||||||
|
cip = ipnorm(c.ip)
|
||||||
|
if ip != cip:
|
||||||
|
continue
|
||||||
|
|
||||||
|
nconn += 1
|
||||||
|
try:
|
||||||
|
if (
|
||||||
|
c.nreq >= 1
|
||||||
|
or not c.cli
|
||||||
|
or c.cli.in_hdr_recv
|
||||||
|
or c.cli.keepalive
|
||||||
|
):
|
||||||
|
Daemon(c.shutdown)
|
||||||
|
nclose += 1
|
||||||
|
if c.nreq <= 0 and (not c.cli or c.cli.in_hdr_recv):
|
||||||
|
nloris += 1
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
t = "{} downgraded to connection:close for {} min; dropped {}/{} connections"
|
||||||
|
self.log(self.name, t.format(ip, self.args.aclose, nclose, nconn), 1)
|
||||||
|
|
||||||
|
if nloris < nconn / 2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
t = "slowloris (idle-conn): {} banned for {} min"
|
||||||
|
self.log(self.name, t.format(ip, self.args.loris, nclose), 1)
|
||||||
|
self.bans[ip] = int(time.time() + self.args.loris * 60)
|
||||||
|
|
||||||
if self.args.log_conn:
|
if self.args.log_conn:
|
||||||
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="1;30")
|
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="90")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sck, addr = srv_sck.accept()
|
sck, saddr = srv_sck.accept()
|
||||||
|
cip, cport = saddr[:2]
|
||||||
|
if cip.startswith("::ffff:"):
|
||||||
|
cip = cip[7:]
|
||||||
|
|
||||||
|
addr = (cip, cport)
|
||||||
except (OSError, socket.error) as ex:
|
except (OSError, socket.error) as ex:
|
||||||
|
if self.stopping:
|
||||||
|
break
|
||||||
|
|
||||||
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
|
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
|
||||||
time.sleep(0.02)
|
time.sleep(0.02)
|
||||||
continue
|
continue
|
||||||
@@ -220,7 +290,7 @@ class HttpSrv(object):
|
|||||||
t = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
|
t = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
|
||||||
"-" * 3, ip, port % 8, port
|
"-" * 3, ip, port % 8, port
|
||||||
)
|
)
|
||||||
self.log("%s %s" % addr, t, c="1;30")
|
self.log("%s %s" % addr, t, c="90")
|
||||||
|
|
||||||
self.accept(sck, addr)
|
self.accept(sck, addr)
|
||||||
|
|
||||||
@@ -241,10 +311,7 @@ class HttpSrv(object):
|
|||||||
if self.nid:
|
if self.nid:
|
||||||
name += "-{}".format(self.nid)
|
name += "-{}".format(self.nid)
|
||||||
|
|
||||||
thr = threading.Thread(target=self.periodic, name=name)
|
self.t_periodic = Daemon(self.periodic, name)
|
||||||
self.t_periodic = thr
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
if self.tp_q:
|
if self.tp_q:
|
||||||
self.tp_time = self.tp_time or now
|
self.tp_time = self.tp_time or now
|
||||||
@@ -259,13 +326,11 @@ class HttpSrv(object):
|
|||||||
t = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
|
t = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
|
||||||
self.log(self.name, t, 1)
|
self.log(self.name, t, 1)
|
||||||
|
|
||||||
thr = threading.Thread(
|
Daemon(
|
||||||
target=self.thr_client,
|
self.thr_client,
|
||||||
args=(sck, addr),
|
"httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
|
||||||
name="httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
|
(sck, addr),
|
||||||
)
|
)
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
def thr_poolw(self) -> None:
|
def thr_poolw(self) -> None:
|
||||||
assert self.tp_q
|
assert self.tp_q
|
||||||
@@ -324,15 +389,16 @@ class HttpSrv(object):
|
|||||||
with self.mutex:
|
with self.mutex:
|
||||||
self.clients.add(cli)
|
self.clients.add(cli)
|
||||||
|
|
||||||
|
# print("{}\n".format(len(self.clients)), end="")
|
||||||
fno = sck.fileno()
|
fno = sck.fileno()
|
||||||
try:
|
try:
|
||||||
if self.args.log_conn:
|
if self.args.log_conn:
|
||||||
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="1;30")
|
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="90")
|
||||||
|
|
||||||
cli.run()
|
cli.run()
|
||||||
|
|
||||||
except (OSError, socket.error) as ex:
|
except (OSError, socket.error) as ex:
|
||||||
if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
|
if ex.errno not in E_SCK:
|
||||||
self.log(
|
self.log(
|
||||||
"%s %s" % addr,
|
"%s %s" % addr,
|
||||||
"run({}): {}".format(fno, ex),
|
"run({}): {}".format(fno, ex),
|
||||||
@@ -342,7 +408,7 @@ class HttpSrv(object):
|
|||||||
finally:
|
finally:
|
||||||
sck = cli.s
|
sck = cli.s
|
||||||
if self.args.log_conn:
|
if self.args.log_conn:
|
||||||
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="1;30")
|
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="90")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
fno = sck.fileno()
|
fno = sck.fileno()
|
||||||
@@ -352,15 +418,9 @@ class HttpSrv(object):
|
|||||||
self.log(
|
self.log(
|
||||||
"%s %s" % addr,
|
"%s %s" % addr,
|
||||||
"shut({}): {}".format(fno, ex),
|
"shut({}): {}".format(fno, ex),
|
||||||
c="1;30",
|
c="90",
|
||||||
)
|
)
|
||||||
if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
|
if ex.errno not in E_SCK:
|
||||||
# 10038 No longer considered a socket
|
|
||||||
# 10054 Foribly closed by remote
|
|
||||||
# 107 Transport endpoint not connected
|
|
||||||
# 57 Socket is not connected
|
|
||||||
# 49 Can't assign requested address (wifi down)
|
|
||||||
# 9 Bad file descriptor
|
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
|
|||||||
512
copyparty/mdns.py
Normal file
512
copyparty/mdns.py
Normal file
@@ -0,0 +1,512 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import random
|
||||||
|
import select
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ipaddress import IPv4Network, IPv6Network
|
||||||
|
|
||||||
|
from .__init__ import TYPE_CHECKING
|
||||||
|
from .__init__ import unicode as U
|
||||||
|
from .multicast import MC_Sck, MCast
|
||||||
|
from .stolen.dnslib import CLASS as DC
|
||||||
|
from .stolen.dnslib import (
|
||||||
|
NSEC,
|
||||||
|
PTR,
|
||||||
|
QTYPE,
|
||||||
|
RR,
|
||||||
|
SRV,
|
||||||
|
TXT,
|
||||||
|
A,
|
||||||
|
AAAA,
|
||||||
|
DNSHeader,
|
||||||
|
DNSQuestion,
|
||||||
|
DNSRecord,
|
||||||
|
)
|
||||||
|
from .util import CachedSet, Daemon, Netdev, min_ex
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .svchub import SvcHub
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
MDNS4 = "224.0.0.251"
|
||||||
|
MDNS6 = "ff02::fb"
|
||||||
|
|
||||||
|
|
||||||
|
class MDNS_Sck(MC_Sck):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sck: socket.socket,
|
||||||
|
nd: Netdev,
|
||||||
|
grp: str,
|
||||||
|
ip: str,
|
||||||
|
net: Union[IPv4Network, IPv6Network],
|
||||||
|
):
|
||||||
|
super(MDNS_Sck, self).__init__(sck, nd, grp, ip, net)
|
||||||
|
|
||||||
|
self.bp_probe = b""
|
||||||
|
self.bp_ip = b""
|
||||||
|
self.bp_svc = b""
|
||||||
|
self.bp_bye = b""
|
||||||
|
|
||||||
|
self.last_tx = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class MDNS(MCast):
|
||||||
|
def __init__(self, hub: "SvcHub") -> None:
|
||||||
|
al = hub.args
|
||||||
|
grp4 = "" if al.zm6 else MDNS4
|
||||||
|
grp6 = "" if al.zm4 else MDNS6
|
||||||
|
super(MDNS, self).__init__(
|
||||||
|
hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv
|
||||||
|
)
|
||||||
|
self.srv: dict[socket.socket, MDNS_Sck] = {}
|
||||||
|
|
||||||
|
self.ttl = 300
|
||||||
|
|
||||||
|
zs = self.args.name + ".local."
|
||||||
|
zs = zs.encode("ascii", "replace").decode("ascii", "replace")
|
||||||
|
self.hn = "-".join(x for x in zs.split("?") if x) or (
|
||||||
|
"vault-{}".format(random.randint(1, 255))
|
||||||
|
)
|
||||||
|
self.lhn = self.hn.lower()
|
||||||
|
|
||||||
|
# requester ip -> (response deadline, srv, body):
|
||||||
|
self.q: dict[str, tuple[float, MDNS_Sck, bytes]] = {}
|
||||||
|
self.rx4 = CachedSet(0.42) # 3 probes @ 250..500..750 => 500ms span
|
||||||
|
self.rx6 = CachedSet(0.42)
|
||||||
|
self.svcs, self.sfqdns = self.build_svcs()
|
||||||
|
self.lsvcs = {k.lower(): v for k, v in self.svcs.items()}
|
||||||
|
self.lsfqdns = set([x.lower() for x in self.sfqdns])
|
||||||
|
|
||||||
|
self.probing = 0.0
|
||||||
|
self.unsolicited: list[float] = [] # scheduled announces on all nics
|
||||||
|
self.defend: dict[MDNS_Sck, float] = {} # server -> deadline
|
||||||
|
|
||||||
|
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
|
self.log_func("mDNS", msg, c)
|
||||||
|
|
||||||
|
def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]:
|
||||||
|
zms = self.args.zms
|
||||||
|
http = {"port": 80 if 80 in self.args.p else self.args.p[0]}
|
||||||
|
https = {"port": 443 if 443 in self.args.p else self.args.p[0]}
|
||||||
|
webdav = http.copy()
|
||||||
|
webdavs = https.copy()
|
||||||
|
webdav["u"] = webdavs["u"] = "u" # KDE requires username
|
||||||
|
ftp = {"port": (self.args.ftp if "f" in zms else self.args.ftps)}
|
||||||
|
smb = {"port": self.args.smb_port}
|
||||||
|
|
||||||
|
# some gvfs require path
|
||||||
|
zs = self.args.zm_ld or "/"
|
||||||
|
if zs:
|
||||||
|
webdav["path"] = zs
|
||||||
|
webdavs["path"] = zs
|
||||||
|
|
||||||
|
if self.args.zm_lh:
|
||||||
|
http["path"] = self.args.zm_lh
|
||||||
|
https["path"] = self.args.zm_lh
|
||||||
|
|
||||||
|
if self.args.zm_lf:
|
||||||
|
ftp["path"] = self.args.zm_lf
|
||||||
|
|
||||||
|
if self.args.zm_ls:
|
||||||
|
smb["path"] = self.args.zm_ls
|
||||||
|
|
||||||
|
svcs: dict[str, dict[str, Any]] = {}
|
||||||
|
|
||||||
|
if "d" in zms:
|
||||||
|
svcs["_webdav._tcp.local."] = webdav
|
||||||
|
|
||||||
|
if "D" in zms:
|
||||||
|
svcs["_webdavs._tcp.local."] = webdavs
|
||||||
|
|
||||||
|
if "h" in zms:
|
||||||
|
svcs["_http._tcp.local."] = http
|
||||||
|
|
||||||
|
if "H" in zms:
|
||||||
|
svcs["_https._tcp.local."] = https
|
||||||
|
|
||||||
|
if "f" in zms.lower():
|
||||||
|
svcs["_ftp._tcp.local."] = ftp
|
||||||
|
|
||||||
|
if "s" in zms.lower():
|
||||||
|
svcs["_smb._tcp.local."] = smb
|
||||||
|
|
||||||
|
sfqdns: set[str] = set()
|
||||||
|
for k, v in svcs.items():
|
||||||
|
name = "{}-c-{}".format(self.args.name, k.split(".")[0][1:])
|
||||||
|
v["name"] = name
|
||||||
|
sfqdns.add("{}.{}".format(name, k))
|
||||||
|
|
||||||
|
return svcs, sfqdns
|
||||||
|
|
||||||
|
def build_replies(self) -> None:
|
||||||
|
for srv in self.srv.values():
|
||||||
|
probe = DNSRecord(DNSHeader(0, 0), q=DNSQuestion(self.hn, QTYPE.ANY))
|
||||||
|
areply = DNSRecord(DNSHeader(0, 0x8400))
|
||||||
|
sreply = DNSRecord(DNSHeader(0, 0x8400))
|
||||||
|
bye = DNSRecord(DNSHeader(0, 0x8400))
|
||||||
|
|
||||||
|
have4 = have6 = False
|
||||||
|
for s2 in self.srv.values():
|
||||||
|
if srv.idx != s2.idx:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if s2.v6:
|
||||||
|
have6 = True
|
||||||
|
else:
|
||||||
|
have4 = True
|
||||||
|
|
||||||
|
for ip in srv.ips:
|
||||||
|
if ":" in ip:
|
||||||
|
qt = QTYPE.AAAA
|
||||||
|
ar = {"rclass": DC.F_IN, "rdata": AAAA(ip)}
|
||||||
|
else:
|
||||||
|
qt = QTYPE.A
|
||||||
|
ar = {"rclass": DC.F_IN, "rdata": A(ip)}
|
||||||
|
|
||||||
|
r0 = RR(self.hn, qt, ttl=0, **ar)
|
||||||
|
r120 = RR(self.hn, qt, ttl=120, **ar)
|
||||||
|
# rfc-10:
|
||||||
|
# SHOULD rr ttl 120sec for A/AAAA/SRV
|
||||||
|
# (and recommend 75min for all others)
|
||||||
|
|
||||||
|
probe.add_auth(r120)
|
||||||
|
areply.add_answer(r120)
|
||||||
|
sreply.add_answer(r120)
|
||||||
|
bye.add_answer(r0)
|
||||||
|
|
||||||
|
for sclass, props in self.svcs.items():
|
||||||
|
sname = props["name"]
|
||||||
|
sport = props["port"]
|
||||||
|
sfqdn = sname + "." + sclass
|
||||||
|
|
||||||
|
k = "_services._dns-sd._udp.local."
|
||||||
|
r = RR(k, QTYPE.PTR, DC.IN, 4500, PTR(sclass))
|
||||||
|
sreply.add_answer(r)
|
||||||
|
|
||||||
|
r = RR(sclass, QTYPE.PTR, DC.IN, 4500, PTR(sfqdn))
|
||||||
|
sreply.add_answer(r)
|
||||||
|
|
||||||
|
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 120, SRV(0, 0, sport, self.hn))
|
||||||
|
sreply.add_answer(r)
|
||||||
|
areply.add_answer(r)
|
||||||
|
|
||||||
|
r = RR(sfqdn, QTYPE.SRV, DC.F_IN, 0, SRV(0, 0, sport, self.hn))
|
||||||
|
bye.add_answer(r)
|
||||||
|
|
||||||
|
txts = []
|
||||||
|
for k in ("u", "path"):
|
||||||
|
if k not in props:
|
||||||
|
continue
|
||||||
|
|
||||||
|
zb = "{}={}".format(k, props[k]).encode("utf-8")
|
||||||
|
if len(zb) > 255:
|
||||||
|
t = "value too long for mdns: [{}]"
|
||||||
|
raise Exception(t.format(props[k]))
|
||||||
|
|
||||||
|
txts.append(zb)
|
||||||
|
|
||||||
|
# gvfs really wants txt even if they're empty
|
||||||
|
r = RR(sfqdn, QTYPE.TXT, DC.F_IN, 4500, TXT(txts))
|
||||||
|
sreply.add_answer(r)
|
||||||
|
|
||||||
|
if not (have4 and have6) and not self.args.zm_noneg:
|
||||||
|
ns = NSEC(self.hn, ["AAAA" if have6 else "A"])
|
||||||
|
r = RR(self.hn, QTYPE.NSEC, DC.F_IN, 120, ns)
|
||||||
|
areply.add_ar(r)
|
||||||
|
if len(sreply.pack()) < 1400:
|
||||||
|
sreply.add_ar(r)
|
||||||
|
|
||||||
|
srv.bp_probe = probe.pack()
|
||||||
|
srv.bp_ip = areply.pack()
|
||||||
|
srv.bp_svc = sreply.pack()
|
||||||
|
srv.bp_bye = bye.pack()
|
||||||
|
|
||||||
|
# since all replies are small enough to fit in one packet,
|
||||||
|
# always send full replies rather than just a/aaaa records
|
||||||
|
srv.bp_ip = srv.bp_svc
|
||||||
|
|
||||||
|
def send_probes(self) -> None:
|
||||||
|
slp = random.random() * 0.25
|
||||||
|
for _ in range(3):
|
||||||
|
time.sleep(slp)
|
||||||
|
slp = 0.25
|
||||||
|
if not self.running:
|
||||||
|
break
|
||||||
|
|
||||||
|
if self.args.zmv:
|
||||||
|
self.log("sending hostname probe...")
|
||||||
|
|
||||||
|
# ipv4: need to probe each ip (each server)
|
||||||
|
# ipv6: only need to probe each set of looped nics
|
||||||
|
probed6: set[str] = set()
|
||||||
|
for srv in self.srv.values():
|
||||||
|
if srv.ip in probed6:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
srv.sck.sendto(srv.bp_probe, (srv.grp, 5353))
|
||||||
|
if srv.v6:
|
||||||
|
for ip in srv.ips:
|
||||||
|
probed6.add(ip)
|
||||||
|
except Exception as ex:
|
||||||
|
self.log("sendto failed: {} ({})".format(srv.ip, ex), "90")
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
try:
|
||||||
|
bound = self.create_servers()
|
||||||
|
except:
|
||||||
|
t = "no server IP matches the mdns config\n{}"
|
||||||
|
self.log(t.format(min_ex()), 1)
|
||||||
|
bound = []
|
||||||
|
|
||||||
|
if not bound:
|
||||||
|
self.log("failed to announce copyparty services on the network", 3)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.build_replies()
|
||||||
|
Daemon(self.send_probes)
|
||||||
|
zf = time.time() + 2
|
||||||
|
self.probing = zf # cant unicast so give everyone an extra sec
|
||||||
|
self.unsolicited = [zf, zf + 1, zf + 3, zf + 7] # rfc-8.3
|
||||||
|
last_hop = time.time()
|
||||||
|
ihop = self.args.mc_hop
|
||||||
|
while self.running:
|
||||||
|
timeout = (
|
||||||
|
0.02 + random.random() * 0.07
|
||||||
|
if self.probing or self.q or self.defend or self.unsolicited
|
||||||
|
else (last_hop + ihop if ihop else 180)
|
||||||
|
)
|
||||||
|
rdy = select.select(self.srv, [], [], timeout)
|
||||||
|
rx: list[socket.socket] = rdy[0] # type: ignore
|
||||||
|
self.rx4.cln()
|
||||||
|
self.rx6.cln()
|
||||||
|
for sck in rx:
|
||||||
|
buf, addr = sck.recvfrom(4096)
|
||||||
|
try:
|
||||||
|
self.eat(buf, addr, sck)
|
||||||
|
except:
|
||||||
|
if not self.running:
|
||||||
|
return
|
||||||
|
|
||||||
|
t = "{} {} \033[33m|{}| {}\n{}".format(
|
||||||
|
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
|
||||||
|
)
|
||||||
|
self.log(t, 6)
|
||||||
|
|
||||||
|
if not self.probing:
|
||||||
|
self.process()
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.probing < time.time():
|
||||||
|
t = "probe ok; announcing [{}]"
|
||||||
|
self.log(t.format(self.hn[:-1]), 2)
|
||||||
|
self.probing = 0
|
||||||
|
|
||||||
|
def stop(self, panic=False) -> None:
|
||||||
|
self.running = False
|
||||||
|
if not panic:
|
||||||
|
for srv in self.srv.values():
|
||||||
|
try:
|
||||||
|
srv.sck.sendto(srv.bp_bye, (srv.grp, 5353))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.srv = {}
|
||||||
|
|
||||||
|
def eat(self, buf: bytes, addr: tuple[str, int], sck: socket.socket) -> None:
|
||||||
|
cip = addr[0]
|
||||||
|
v6 = ":" in cip
|
||||||
|
if (cip.startswith("169.254") and not self.ll_ok) or (
|
||||||
|
v6 and not cip.startswith("fe80")
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
cache = self.rx6 if v6 else self.rx4
|
||||||
|
if buf in cache.c:
|
||||||
|
return
|
||||||
|
|
||||||
|
srv: Optional[MDNS_Sck] = self.srv[sck] if v6 else self.map_client(cip) # type: ignore
|
||||||
|
if not srv:
|
||||||
|
return
|
||||||
|
|
||||||
|
cache.add(buf)
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
if self.args.zmv and cip != srv.ip and cip not in srv.ips:
|
||||||
|
t = "{} [{}] \033[36m{} \033[0m|{}|"
|
||||||
|
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
|
||||||
|
|
||||||
|
p = DNSRecord.parse(buf)
|
||||||
|
if self.args.zmvv:
|
||||||
|
self.log(str(p))
|
||||||
|
|
||||||
|
# check for incoming probes for our hostname
|
||||||
|
cips = [U(x.rdata) for x in p.auth if U(x.rname).lower() == self.lhn]
|
||||||
|
if cips and self.sips.isdisjoint(cips):
|
||||||
|
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
|
||||||
|
# avahi broadcasting 127.0.0.1-only packets
|
||||||
|
return
|
||||||
|
|
||||||
|
self.log("someone trying to steal our hostname: {}".format(cips), 3)
|
||||||
|
# immediately unicast
|
||||||
|
if not self.probing:
|
||||||
|
srv.sck.sendto(srv.bp_ip, (cip, 5353))
|
||||||
|
|
||||||
|
# and schedule multicast
|
||||||
|
self.defend[srv] = self.defend.get(srv, now + 0.1)
|
||||||
|
return
|
||||||
|
|
||||||
|
# check for someone rejecting our probe / hijacking our hostname
|
||||||
|
cips = [
|
||||||
|
U(x.rdata)
|
||||||
|
for x in p.rr
|
||||||
|
if U(x.rname).lower() == self.lhn and x.rclass == DC.F_IN
|
||||||
|
]
|
||||||
|
if cips and self.sips.isdisjoint(cips):
|
||||||
|
if not [x for x in cips if x not in ("::1", "127.0.0.1")]:
|
||||||
|
# avahi broadcasting 127.0.0.1-only packets
|
||||||
|
return
|
||||||
|
|
||||||
|
t = "mdns zeroconf: "
|
||||||
|
if self.probing:
|
||||||
|
t += "Cannot start; hostname '{}' is occupied"
|
||||||
|
else:
|
||||||
|
t += "Emergency stop; hostname '{}' got stolen"
|
||||||
|
|
||||||
|
t += " on {}! Use --name to set another hostname.\n\nName taken by {}\n\nYour IPs: {}\n"
|
||||||
|
self.log(t.format(self.args.name, srv.name, cips, list(self.sips)), 1)
|
||||||
|
self.stop(True)
|
||||||
|
return
|
||||||
|
|
||||||
|
# then rfc-6.7; dns pretending to be mdns (android...)
|
||||||
|
if p.header.id or addr[1] != 5353:
|
||||||
|
rsp: Optional[DNSRecord] = None
|
||||||
|
for r in p.questions:
|
||||||
|
try:
|
||||||
|
lhn = U(r.qname).lower()
|
||||||
|
except:
|
||||||
|
self.log("invalid question: {}".format(r))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if lhn != self.lhn:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if p.header.id and r.qtype in (QTYPE.A, QTYPE.AAAA):
|
||||||
|
rsp = rsp or DNSRecord(DNSHeader(p.header.id, 0x8400))
|
||||||
|
rsp.add_question(r)
|
||||||
|
for ip in srv.ips:
|
||||||
|
qt = r.qtype
|
||||||
|
v6 = ":" in ip
|
||||||
|
if v6 == (qt == QTYPE.AAAA):
|
||||||
|
rd = AAAA(ip) if v6 else A(ip)
|
||||||
|
rr = RR(self.hn, qt, DC.IN, 10, rd)
|
||||||
|
rsp.add_answer(rr)
|
||||||
|
if rsp:
|
||||||
|
srv.sck.sendto(rsp.pack(), addr[:2])
|
||||||
|
# but don't return in case it's a differently broken client
|
||||||
|
|
||||||
|
# then a/aaaa records
|
||||||
|
for r in p.questions:
|
||||||
|
try:
|
||||||
|
lhn = U(r.qname).lower()
|
||||||
|
except:
|
||||||
|
self.log("invalid question: {}".format(r))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if lhn != self.lhn:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# gvfs keeps repeating itself
|
||||||
|
found = False
|
||||||
|
unicast = False
|
||||||
|
for rr in p.rr:
|
||||||
|
try:
|
||||||
|
rname = U(rr.rname).lower()
|
||||||
|
except:
|
||||||
|
self.log("invalid rr: {}".format(rr))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if rname == self.lhn:
|
||||||
|
if rr.ttl > 60:
|
||||||
|
found = True
|
||||||
|
if rr.rclass == DC.F_IN:
|
||||||
|
unicast = True
|
||||||
|
|
||||||
|
if unicast:
|
||||||
|
# spec-compliant mDNS-over-unicast
|
||||||
|
srv.sck.sendto(srv.bp_ip, (cip, 5353))
|
||||||
|
elif addr[1] != 5353:
|
||||||
|
# just in case some clients use (and want us to use) invalid ports
|
||||||
|
srv.sck.sendto(srv.bp_ip, addr[:2])
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
self.q[cip] = (0, srv, srv.bp_ip)
|
||||||
|
return
|
||||||
|
|
||||||
|
deadline = now + (0.5 if p.header.tc else 0.02) # rfc-7.2
|
||||||
|
|
||||||
|
# and service queries
|
||||||
|
for r in p.questions:
|
||||||
|
if not r or not r.qname:
|
||||||
|
continue
|
||||||
|
|
||||||
|
qname = U(r.qname).lower()
|
||||||
|
if qname in self.lsvcs or qname == "_services._dns-sd._udp.local.":
|
||||||
|
self.q[cip] = (deadline, srv, srv.bp_svc)
|
||||||
|
break
|
||||||
|
# heed rfc-7.1 if there was an announce in the past 12sec
|
||||||
|
# (workaround gvfs race-condition where it occasionally
|
||||||
|
# doesn't read/decode the full response...)
|
||||||
|
if now < srv.last_tx + 12:
|
||||||
|
for rr in p.rr:
|
||||||
|
if not rr.rdata:
|
||||||
|
continue
|
||||||
|
|
||||||
|
rdata = U(rr.rdata).lower()
|
||||||
|
if rdata in self.lsfqdns:
|
||||||
|
if rr.ttl > 2250:
|
||||||
|
self.q.pop(cip, None)
|
||||||
|
break
|
||||||
|
|
||||||
|
def process(self) -> None:
|
||||||
|
tx = set()
|
||||||
|
now = time.time()
|
||||||
|
cooldown = 0.9 # rfc-6: 1
|
||||||
|
if self.unsolicited and self.unsolicited[0] < now:
|
||||||
|
self.unsolicited.pop(0)
|
||||||
|
cooldown = 0.1
|
||||||
|
for srv in self.srv.values():
|
||||||
|
tx.add(srv)
|
||||||
|
|
||||||
|
for srv, deadline in list(self.defend.items()):
|
||||||
|
if now < deadline:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self._tx(srv, srv.bp_ip, 0.02): # rfc-6: 0.25
|
||||||
|
self.defend.pop(srv)
|
||||||
|
|
||||||
|
for cip, (deadline, srv, msg) in list(self.q.items()):
|
||||||
|
if now < deadline:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.q.pop(cip)
|
||||||
|
self._tx(srv, msg, cooldown)
|
||||||
|
|
||||||
|
for srv in tx:
|
||||||
|
self._tx(srv, srv.bp_svc, cooldown)
|
||||||
|
|
||||||
|
def _tx(self, srv: MDNS_Sck, msg: bytes, cooldown: float) -> bool:
|
||||||
|
now = time.time()
|
||||||
|
if now < srv.last_tx + cooldown:
|
||||||
|
return False
|
||||||
|
|
||||||
|
srv.sck.sendto(msg, (srv.grp, 5353))
|
||||||
|
srv.last_tx = now
|
||||||
|
return True
|
||||||
@@ -12,25 +12,23 @@ from .__init__ import PY2, WINDOWS, E, unicode
|
|||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .util import REKOBO_LKEY, fsenc, min_ex, retchk, runcmd, uncyg
|
from .util import REKOBO_LKEY, fsenc, min_ex, retchk, runcmd, uncyg
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Union
|
from typing import Any, Union
|
||||||
|
|
||||||
from .util import RootLogger
|
from .util import RootLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def have_ff(cmd: str) -> bool:
|
def have_ff(scmd: str) -> bool:
|
||||||
if PY2:
|
if PY2:
|
||||||
print("# checking {}".format(cmd))
|
print("# checking {}".format(scmd))
|
||||||
cmd = (cmd + " -version").encode("ascii").split(b" ")
|
acmd = (scmd + " -version").encode("ascii").split(b" ")
|
||||||
try:
|
try:
|
||||||
sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate()
|
sp.Popen(acmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate()
|
||||||
return True
|
return True
|
||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return bool(shutil.which(cmd))
|
return bool(shutil.which(scmd))
|
||||||
|
|
||||||
|
|
||||||
HAVE_FFMPEG = have_ff("ffmpeg")
|
HAVE_FFMPEG = have_ff("ffmpeg")
|
||||||
@@ -269,7 +267,7 @@ class MTag(object):
|
|||||||
if self.backend == "mutagen":
|
if self.backend == "mutagen":
|
||||||
self.get = self.get_mutagen
|
self.get = self.get_mutagen
|
||||||
try:
|
try:
|
||||||
import mutagen # noqa: F401 # pylint: disable=unused-import,import-outside-toplevel
|
from mutagen import version # noqa: F401
|
||||||
except:
|
except:
|
||||||
self.log("could not load Mutagen, trying FFprobe instead", c=3)
|
self.log("could not load Mutagen, trying FFprobe instead", c=3)
|
||||||
self.backend = "ffprobe"
|
self.backend = "ffprobe"
|
||||||
@@ -381,20 +379,26 @@ class MTag(object):
|
|||||||
parser_output[alias] = (priority, tv[0])
|
parser_output[alias] = (priority, tv[0])
|
||||||
|
|
||||||
# take first value (lowest priority / most preferred)
|
# take first value (lowest priority / most preferred)
|
||||||
ret = {sk: unicode(tv[1]).strip() for sk, tv in parser_output.items()}
|
ret: dict[str, Union[str, float]] = {
|
||||||
|
sk: unicode(tv[1]).strip() for sk, tv in parser_output.items()
|
||||||
|
}
|
||||||
|
|
||||||
# track 3/7 => track 3
|
# track 3/7 => track 3
|
||||||
for sk, tv in ret.items():
|
for sk, zv in ret.items():
|
||||||
if sk[0] == ".":
|
if sk[0] == ".":
|
||||||
sv = str(tv).split("/")[0].strip().lstrip("0")
|
sv = str(zv).split("/")[0].strip().lstrip("0")
|
||||||
ret[sk] = sv or 0
|
ret[sk] = sv or 0
|
||||||
|
|
||||||
# normalize key notation to rkeobo
|
# normalize key notation to rkeobo
|
||||||
okey = ret.get("key")
|
okey = ret.get("key")
|
||||||
if okey:
|
if okey:
|
||||||
key = okey.replace(" ", "").replace("maj", "").replace("min", "m")
|
key = str(okey).replace(" ", "").replace("maj", "").replace("min", "m")
|
||||||
ret["key"] = REKOBO_LKEY.get(key.lower(), okey)
|
ret["key"] = REKOBO_LKEY.get(key.lower(), okey)
|
||||||
|
|
||||||
|
if self.args.mtag_vv:
|
||||||
|
zl = " ".join("\033[36m{} \033[33m{}".format(k, v) for k, v in ret.items())
|
||||||
|
self.log("norm: {}\033[0m".format(zl), "90")
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def compare(self, abspath: str) -> dict[str, Union[str, float]]:
|
def compare(self, abspath: str) -> dict[str, Union[str, float]]:
|
||||||
@@ -441,10 +445,15 @@ class MTag(object):
|
|||||||
if not bos.path.isfile(abspath):
|
if not bos.path.isfile(abspath):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
import mutagen
|
from mutagen import File
|
||||||
|
|
||||||
try:
|
try:
|
||||||
md = mutagen.File(fsenc(abspath), easy=True)
|
md = File(fsenc(abspath), easy=True)
|
||||||
|
assert md
|
||||||
|
if self.args.mtag_vv:
|
||||||
|
for zd in (md.info.__dict__, dict(md.tags)):
|
||||||
|
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()]
|
||||||
|
self.log("mutagen: {}\033[0m".format(" ".join(zl)), "90")
|
||||||
if not md.info.length and not md.info.codec:
|
if not md.info.length and not md.info.codec:
|
||||||
raise Exception()
|
raise Exception()
|
||||||
except:
|
except:
|
||||||
@@ -494,6 +503,12 @@ class MTag(object):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
ret, md = ffprobe(abspath, self.args.mtag_to)
|
ret, md = ffprobe(abspath, self.args.mtag_to)
|
||||||
|
|
||||||
|
if self.args.mtag_vv:
|
||||||
|
for zd in (ret, dict(md)):
|
||||||
|
zl = ["\033[36m{} \033[33m{}".format(k, v) for k, v in zd.items()]
|
||||||
|
self.log("ffprobe: {}\033[0m".format(" ".join(zl)), "90")
|
||||||
|
|
||||||
return self.normalize_tags(ret, md)
|
return self.normalize_tags(ret, md)
|
||||||
|
|
||||||
def get_bin(
|
def get_bin(
|
||||||
|
|||||||
372
copyparty/multicast.py
Normal file
372
copyparty/multicast.py
Normal file
@@ -0,0 +1,372 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
|
||||||
|
import ipaddress
|
||||||
|
from ipaddress import (
|
||||||
|
IPv4Address,
|
||||||
|
IPv4Network,
|
||||||
|
IPv6Address,
|
||||||
|
IPv6Network,
|
||||||
|
ip_address,
|
||||||
|
ip_network,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .__init__ import TYPE_CHECKING
|
||||||
|
from .util import MACOS, Netdev, min_ex, spack
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .svchub import SvcHub
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
if not hasattr(socket, "IPPROTO_IPV6"):
|
||||||
|
setattr(socket, "IPPROTO_IPV6", 41)
|
||||||
|
|
||||||
|
|
||||||
|
class NoIPs(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MC_Sck(object):
|
||||||
|
"""there is one socket for each server ip"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sck: socket.socket,
|
||||||
|
nd: Netdev,
|
||||||
|
grp: str,
|
||||||
|
ip: str,
|
||||||
|
net: Union[IPv4Network, IPv6Network],
|
||||||
|
):
|
||||||
|
self.sck = sck
|
||||||
|
self.idx = nd.idx
|
||||||
|
self.name = nd.name
|
||||||
|
self.grp = grp
|
||||||
|
self.mreq = b""
|
||||||
|
self.ip = ip
|
||||||
|
self.net = net
|
||||||
|
self.ips = {ip: net}
|
||||||
|
self.v6 = ":" in ip
|
||||||
|
self.have4 = ":" not in ip
|
||||||
|
self.have6 = ":" in ip
|
||||||
|
|
||||||
|
|
||||||
|
class MCast(object):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
hub: "SvcHub",
|
||||||
|
Srv: type[MC_Sck],
|
||||||
|
on: list[str],
|
||||||
|
off: list[str],
|
||||||
|
mc_grp_4: str,
|
||||||
|
mc_grp_6: str,
|
||||||
|
port: int,
|
||||||
|
vinit: bool,
|
||||||
|
) -> None:
|
||||||
|
"""disable ipv%d by setting mc_grp_%d empty"""
|
||||||
|
self.hub = hub
|
||||||
|
self.Srv = Srv
|
||||||
|
self.args = hub.args
|
||||||
|
self.asrv = hub.asrv
|
||||||
|
self.log_func = hub.log
|
||||||
|
self.on = on
|
||||||
|
self.off = off
|
||||||
|
self.grp4 = mc_grp_4
|
||||||
|
self.grp6 = mc_grp_6
|
||||||
|
self.port = port
|
||||||
|
self.vinit = vinit
|
||||||
|
|
||||||
|
self.srv: dict[socket.socket, MC_Sck] = {} # listening sockets
|
||||||
|
self.sips: set[str] = set() # all listening ips (including failed attempts)
|
||||||
|
self.ll_ok: set[str] = set() # fallback linklocal IPv4 and IPv6 addresses
|
||||||
|
self.b2srv: dict[bytes, MC_Sck] = {} # binary-ip -> server socket
|
||||||
|
self.b4: list[bytes] = [] # sorted list of binary-ips
|
||||||
|
self.b6: list[bytes] = [] # sorted list of binary-ips
|
||||||
|
self.cscache: dict[str, Optional[MC_Sck]] = {} # client ip -> server cache
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
|
||||||
|
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
|
self.log_func("multicast", msg, c)
|
||||||
|
|
||||||
|
def create_servers(self) -> list[str]:
|
||||||
|
bound: list[str] = []
|
||||||
|
netdevs = self.hub.tcpsrv.netdevs
|
||||||
|
ips = [x[0] for x in self.hub.tcpsrv.bound]
|
||||||
|
|
||||||
|
if "::" in ips:
|
||||||
|
ips = [x for x in ips if x != "::"] + list(
|
||||||
|
[x.split("/")[0] for x in netdevs if ":" in x]
|
||||||
|
)
|
||||||
|
ips.append("0.0.0.0")
|
||||||
|
|
||||||
|
if "0.0.0.0" in ips:
|
||||||
|
ips = [x for x in ips if x != "0.0.0.0"] + list(
|
||||||
|
[x.split("/")[0] for x in netdevs if ":" not in x]
|
||||||
|
)
|
||||||
|
|
||||||
|
ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
|
||||||
|
|
||||||
|
# ip -> ip/prefix
|
||||||
|
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips]
|
||||||
|
|
||||||
|
on = self.on[:]
|
||||||
|
off = self.off[:]
|
||||||
|
for lst in (on, off):
|
||||||
|
for av in list(lst):
|
||||||
|
try:
|
||||||
|
arg_net = ip_network(av, False)
|
||||||
|
except:
|
||||||
|
arg_net = None
|
||||||
|
|
||||||
|
for sk, sv in netdevs.items():
|
||||||
|
if arg_net:
|
||||||
|
net_ip = ip_address(sk.split("/")[0])
|
||||||
|
if net_ip in arg_net and sk not in lst:
|
||||||
|
lst.append(sk)
|
||||||
|
|
||||||
|
if (av == str(sv.idx) or av == sv.name) and sk not in lst:
|
||||||
|
lst.append(sk)
|
||||||
|
|
||||||
|
if on:
|
||||||
|
ips = [x for x in ips if x in on]
|
||||||
|
elif off:
|
||||||
|
ips = [x for x in ips if x not in off]
|
||||||
|
|
||||||
|
if not self.grp4:
|
||||||
|
ips = [x for x in ips if ":" in x]
|
||||||
|
|
||||||
|
if not self.grp6:
|
||||||
|
ips = [x for x in ips if ":" not in x]
|
||||||
|
|
||||||
|
ips = list(set(ips))
|
||||||
|
all_selected = ips[:]
|
||||||
|
|
||||||
|
# discard non-linklocal ipv6
|
||||||
|
ips = [x for x in ips if ":" not in x or x.startswith("fe80")]
|
||||||
|
|
||||||
|
if not ips:
|
||||||
|
raise NoIPs()
|
||||||
|
|
||||||
|
for ip in ips:
|
||||||
|
v6 = ":" in ip
|
||||||
|
netdev = netdevs[ip]
|
||||||
|
if not netdev.idx:
|
||||||
|
t = "using INADDR_ANY for ip [{}], netdev [{}]"
|
||||||
|
if not self.srv and ip not in ["::", "0.0.0.0"]:
|
||||||
|
self.log(t.format(ip, netdev), 3)
|
||||||
|
|
||||||
|
ipv = socket.AF_INET6 if v6 else socket.AF_INET
|
||||||
|
sck = socket.socket(ipv, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
|
||||||
|
sck.settimeout(None)
|
||||||
|
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
try:
|
||||||
|
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# most ipv6 clients expect multicast on linklocal ip only;
|
||||||
|
# add a/aaaa records for the other nic IPs
|
||||||
|
other_ips: set[str] = set()
|
||||||
|
if v6:
|
||||||
|
for nd in netdevs.values():
|
||||||
|
if nd.idx == netdev.idx and nd.ip in all_selected and ":" in nd.ip:
|
||||||
|
other_ips.add(nd.ip)
|
||||||
|
|
||||||
|
net = ipaddress.ip_network(ip, False)
|
||||||
|
ip = ip.split("/")[0]
|
||||||
|
srv = self.Srv(sck, netdev, self.grp6 if ":" in ip else self.grp4, ip, net)
|
||||||
|
for oth_ip in other_ips:
|
||||||
|
srv.ips[oth_ip.split("/")[0]] = ipaddress.ip_network(oth_ip, False)
|
||||||
|
|
||||||
|
# gvfs breaks if a linklocal ip appears in a dns reply
|
||||||
|
ll = {
|
||||||
|
k: v
|
||||||
|
for k, v in srv.ips.items()
|
||||||
|
if k.startswith("169.254") or k.startswith("fe80")
|
||||||
|
}
|
||||||
|
rt = {k: v for k, v in srv.ips.items() if k not in ll}
|
||||||
|
|
||||||
|
if self.args.ll or not rt:
|
||||||
|
self.ll_ok.update(list(ll))
|
||||||
|
|
||||||
|
if not self.args.ll:
|
||||||
|
srv.ips = rt or ll
|
||||||
|
|
||||||
|
if not srv.ips:
|
||||||
|
self.log("no IPs on {}; skipping [{}]".format(netdev, ip), 3)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.setup_socket(srv)
|
||||||
|
self.srv[sck] = srv
|
||||||
|
bound.append(ip)
|
||||||
|
except:
|
||||||
|
t = "announce failed on {} [{}]:\n{}"
|
||||||
|
self.log(t.format(netdev, ip, min_ex()), 3)
|
||||||
|
|
||||||
|
if self.args.zm_msub:
|
||||||
|
for s1 in self.srv.values():
|
||||||
|
for s2 in self.srv.values():
|
||||||
|
if s1.idx != s2.idx:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if s1.ip not in s2.ips:
|
||||||
|
s2.ips[s1.ip] = s1.net
|
||||||
|
|
||||||
|
if self.args.zm_mnic:
|
||||||
|
for s1 in self.srv.values():
|
||||||
|
for s2 in self.srv.values():
|
||||||
|
for ip1, net1 in list(s1.ips.items()):
|
||||||
|
for ip2, net2 in list(s2.ips.items()):
|
||||||
|
if net1 == net2 and ip1 != ip2:
|
||||||
|
s1.ips[ip2] = net2
|
||||||
|
|
||||||
|
self.sips = set([x.split("/")[0] for x in all_selected])
|
||||||
|
for srv in self.srv.values():
|
||||||
|
assert srv.ip in self.sips
|
||||||
|
|
||||||
|
return bound
|
||||||
|
|
||||||
|
def setup_socket(self, srv: MC_Sck) -> None:
|
||||||
|
sck = srv.sck
|
||||||
|
if srv.v6:
|
||||||
|
if self.vinit:
|
||||||
|
zsl = list(srv.ips.keys())
|
||||||
|
self.log("v6({}) idx({}) {}".format(srv.ip, srv.idx, zsl), 6)
|
||||||
|
|
||||||
|
for ip in srv.ips:
|
||||||
|
bip = socket.inet_pton(socket.AF_INET6, ip)
|
||||||
|
self.b2srv[bip] = srv
|
||||||
|
self.b6.append(bip)
|
||||||
|
|
||||||
|
grp = self.grp6 if srv.idx else ""
|
||||||
|
try:
|
||||||
|
if MACOS:
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
sck.bind((grp, self.port, 0, srv.idx))
|
||||||
|
except:
|
||||||
|
sck.bind(("", self.port, 0, srv.idx))
|
||||||
|
|
||||||
|
bgrp = socket.inet_pton(socket.AF_INET6, self.grp6)
|
||||||
|
dev = spack(b"@I", srv.idx)
|
||||||
|
srv.mreq = bgrp + dev
|
||||||
|
if srv.idx != socket.INADDR_ANY:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, dev)
|
||||||
|
|
||||||
|
try:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
|
||||||
|
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
|
||||||
|
except:
|
||||||
|
# macos
|
||||||
|
t = "failed to set IPv6 TTL/LOOP; announcements may not survive multiple switches/routers"
|
||||||
|
self.log(t, 3)
|
||||||
|
else:
|
||||||
|
if self.vinit:
|
||||||
|
self.log("v4({}) idx({})".format(srv.ip, srv.idx), 6)
|
||||||
|
|
||||||
|
bip = socket.inet_aton(srv.ip)
|
||||||
|
self.b2srv[bip] = srv
|
||||||
|
self.b4.append(bip)
|
||||||
|
|
||||||
|
grp = self.grp4 if srv.idx else ""
|
||||||
|
try:
|
||||||
|
if MACOS:
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
sck.bind((grp, self.port))
|
||||||
|
except:
|
||||||
|
sck.bind(("", self.port))
|
||||||
|
|
||||||
|
bgrp = socket.inet_aton(self.grp4)
|
||||||
|
dev = (
|
||||||
|
spack(b"=I", socket.INADDR_ANY)
|
||||||
|
if srv.idx == socket.INADDR_ANY
|
||||||
|
else socket.inet_aton(srv.ip)
|
||||||
|
)
|
||||||
|
srv.mreq = bgrp + dev
|
||||||
|
if srv.idx != socket.INADDR_ANY:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, dev)
|
||||||
|
|
||||||
|
try:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
|
||||||
|
sck.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
|
||||||
|
except:
|
||||||
|
# probably can't happen but dontcare if it does
|
||||||
|
t = "failed to set IPv4 TTL/LOOP; announcements may not survive multiple switches/routers"
|
||||||
|
self.log(t, 3)
|
||||||
|
|
||||||
|
self.hop(srv)
|
||||||
|
self.b4.sort(reverse=True)
|
||||||
|
self.b6.sort(reverse=True)
|
||||||
|
|
||||||
|
def hop(self, srv: MC_Sck) -> None:
|
||||||
|
"""rejoin to keepalive on routers/switches without igmp-snooping"""
|
||||||
|
sck = srv.sck
|
||||||
|
req = srv.mreq
|
||||||
|
if ":" in srv.ip:
|
||||||
|
try:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, req)
|
||||||
|
# linux does leaves/joins twice with 0.2~1.05s spacing
|
||||||
|
time.sleep(1.2)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
sck.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, req)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
sck.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, req)
|
||||||
|
time.sleep(1.2)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# t = "joining {} from ip {} idx {} with mreq {}"
|
||||||
|
# self.log(t.format(srv.grp, srv.ip, srv.idx, repr(srv.mreq)), 6)
|
||||||
|
sck.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, req)
|
||||||
|
|
||||||
|
def map_client(self, cip: str) -> Optional[MC_Sck]:
|
||||||
|
try:
|
||||||
|
return self.cscache[cip]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
ret: Optional[MC_Sck] = None
|
||||||
|
v6 = ":" in cip
|
||||||
|
ci = IPv6Address(cip) if v6 else IPv4Address(cip)
|
||||||
|
for x in self.b6 if v6 else self.b4:
|
||||||
|
srv = self.b2srv[x]
|
||||||
|
if any([x for x in srv.ips.values() if ci in x]):
|
||||||
|
ret = srv
|
||||||
|
break
|
||||||
|
|
||||||
|
if not ret and cip in ("127.0.0.1", "::1"):
|
||||||
|
# just give it something
|
||||||
|
ret = list(self.srv.values())[0]
|
||||||
|
|
||||||
|
if not ret and cip.startswith("169.254"):
|
||||||
|
# idk how to map LL IPv4 msgs to nics;
|
||||||
|
# just pick one and hope for the best
|
||||||
|
lls = (
|
||||||
|
x
|
||||||
|
for x in self.srv.values()
|
||||||
|
if next((y for y in x.ips if y in self.ll_ok), None)
|
||||||
|
)
|
||||||
|
ret = next(lls, None)
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
t = "new client on {} ({}): {}"
|
||||||
|
self.log(t.format(ret.name, ret.net, cip), 6)
|
||||||
|
else:
|
||||||
|
t = "could not map client {} to known subnet; maybe forwarded from another network?"
|
||||||
|
self.log(t.format(cip), 3)
|
||||||
|
|
||||||
|
if len(self.cscache) > 9000:
|
||||||
|
self.cscache = {}
|
||||||
|
|
||||||
|
self.cscache[cip] = ret
|
||||||
|
return ret
|
||||||
321
copyparty/smbd.py
Normal file
321
copyparty/smbd.py
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import stat
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from types import SimpleNamespace
|
||||||
|
|
||||||
|
from .__init__ import ANYWIN, TYPE_CHECKING
|
||||||
|
from .authsrv import LEELOO_DALLAS, VFS
|
||||||
|
from .bos import bos
|
||||||
|
from .util import Daemon, min_ex
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .svchub import SvcHub
|
||||||
|
|
||||||
|
|
||||||
|
lg = logging.getLogger("smb")
|
||||||
|
debug, info, warning, error = (lg.debug, lg.info, lg.warning, lg.error)
|
||||||
|
|
||||||
|
|
||||||
|
class SMB(object):
|
||||||
|
def __init__(self, hub: "SvcHub") -> None:
|
||||||
|
self.hub = hub
|
||||||
|
self.args = hub.args
|
||||||
|
self.asrv = hub.asrv
|
||||||
|
self.log = hub.log
|
||||||
|
self.files: dict[int, tuple[float, str]] = {}
|
||||||
|
|
||||||
|
lg.setLevel(logging.DEBUG if self.args.smbvvv else logging.INFO)
|
||||||
|
for x in ["impacket", "impacket.smbserver"]:
|
||||||
|
lgr = logging.getLogger(x)
|
||||||
|
lgr.setLevel(logging.DEBUG if self.args.smbvv else logging.INFO)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from impacket import smbserver
|
||||||
|
from impacket.ntlm import compute_lmhash, compute_nthash
|
||||||
|
except ImportError:
|
||||||
|
m = "\033[36m\n{}\033[31m\n\nERROR: need 'impacket'; please run this command:\033[33m\n {} -m pip install --user impacket\n\033[0m"
|
||||||
|
print(m.format(min_ex(), sys.executable))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# patch vfs into smbserver.os
|
||||||
|
fos = SimpleNamespace()
|
||||||
|
for k in os.__dict__:
|
||||||
|
try:
|
||||||
|
setattr(fos, k, getattr(os, k))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
fos.close = self._close
|
||||||
|
fos.listdir = self._listdir
|
||||||
|
fos.mkdir = self._mkdir
|
||||||
|
fos.open = self._open
|
||||||
|
fos.remove = self._unlink
|
||||||
|
fos.rename = self._rename
|
||||||
|
fos.stat = self._stat
|
||||||
|
fos.unlink = self._unlink
|
||||||
|
fos.utime = self._utime
|
||||||
|
smbserver.os = fos
|
||||||
|
|
||||||
|
# ...and smbserver.os.path
|
||||||
|
fop = SimpleNamespace()
|
||||||
|
for k in os.path.__dict__:
|
||||||
|
try:
|
||||||
|
setattr(fop, k, getattr(os.path, k))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
fop.exists = self._p_exists
|
||||||
|
fop.getsize = self._p_getsize
|
||||||
|
fop.isdir = self._p_isdir
|
||||||
|
smbserver.os.path = fop
|
||||||
|
|
||||||
|
if not self.args.smb_nwa_2:
|
||||||
|
fop.join = self._p_join
|
||||||
|
|
||||||
|
# other patches
|
||||||
|
smbserver.isInFileJail = self._is_in_file_jail
|
||||||
|
self._disarm()
|
||||||
|
|
||||||
|
ip = next((x for x in self.args.i if ":" not in x), None)
|
||||||
|
if not ip:
|
||||||
|
self.log("smb", "IPv6 not supported for SMB; listening on 0.0.0.0", 3)
|
||||||
|
ip = "0.0.0.0"
|
||||||
|
|
||||||
|
port = int(self.args.smb_port)
|
||||||
|
srv = smbserver.SimpleSMBServer(listenAddress=ip, listenPort=port)
|
||||||
|
|
||||||
|
ro = "no" if self.args.smbw else "yes" # (does nothing)
|
||||||
|
srv.addShare("A", "/", readOnly=ro)
|
||||||
|
srv.setSMB2Support(not self.args.smb1)
|
||||||
|
|
||||||
|
for name, pwd in self.asrv.acct.items():
|
||||||
|
for u, p in ((name, pwd), (pwd, "k")):
|
||||||
|
lmhash = compute_lmhash(p)
|
||||||
|
nthash = compute_nthash(p)
|
||||||
|
srv.addCredential(u, 0, lmhash, nthash)
|
||||||
|
|
||||||
|
chi = [random.randint(0, 255) for x in range(8)]
|
||||||
|
cha = "".join(["{:02x}".format(x) for x in chi])
|
||||||
|
srv.setSMBChallenge(cha)
|
||||||
|
|
||||||
|
self.srv = srv
|
||||||
|
self.stop = srv.stop
|
||||||
|
self.log("smb", "listening @ {}:{}".format(ip, port))
|
||||||
|
|
||||||
|
def start(self) -> None:
|
||||||
|
Daemon(self.srv.start)
|
||||||
|
|
||||||
|
def _v2a(self, caller: str, vpath: str, *a: Any) -> tuple[VFS, str]:
|
||||||
|
vpath = vpath.replace("\\", "/").lstrip("/")
|
||||||
|
# cf = inspect.currentframe().f_back
|
||||||
|
# c1 = cf.f_back.f_code.co_name
|
||||||
|
# c2 = cf.f_code.co_name
|
||||||
|
debug('%s("%s", %s)\033[K\033[0m', caller, vpath, str(a))
|
||||||
|
|
||||||
|
# TODO find a way to grab `identity` in smbComSessionSetupAndX and smb2SessionSetup
|
||||||
|
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, True, True)
|
||||||
|
return vfs, vfs.canonical(rem)
|
||||||
|
|
||||||
|
def _listdir(self, vpath: str, *a: Any, **ka: Any) -> list[str]:
|
||||||
|
vpath = vpath.replace("\\", "/").lstrip("/")
|
||||||
|
# caller = inspect.currentframe().f_back.f_code.co_name
|
||||||
|
debug('listdir("%s", %s)\033[K\033[0m', vpath, str(a))
|
||||||
|
vfs, rem = self.asrv.vfs.get(vpath, LEELOO_DALLAS, False, False)
|
||||||
|
_, vfs_ls, vfs_virt = vfs.ls(
|
||||||
|
rem, LEELOO_DALLAS, not self.args.no_scandir, [[False, False]]
|
||||||
|
)
|
||||||
|
dirs = [x[0] for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
|
||||||
|
fils = [x[0] for x in vfs_ls if x[0] not in dirs]
|
||||||
|
ls = list(vfs_virt.keys()) + dirs + fils
|
||||||
|
if self.args.smb_nwa_1:
|
||||||
|
return ls
|
||||||
|
|
||||||
|
# clients crash somewhere around 65760 byte
|
||||||
|
ret = []
|
||||||
|
sz = 112 * 2 # ['.', '..']
|
||||||
|
for n, fn in enumerate(ls):
|
||||||
|
if sz >= 64000:
|
||||||
|
t = "listing only %d of %d files (%d byte); see impacket#1433"
|
||||||
|
warning(t, n, len(ls), sz)
|
||||||
|
break
|
||||||
|
|
||||||
|
nsz = len(fn.encode("utf-16", "replace"))
|
||||||
|
nsz = ((nsz + 7) // 8) * 8
|
||||||
|
sz += 104 + nsz
|
||||||
|
ret.append(fn)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _open(
|
||||||
|
self, vpath: str, flags: int, *a: Any, chmod: int = 0o777, **ka: Any
|
||||||
|
) -> Any:
|
||||||
|
f_ro = os.O_RDONLY
|
||||||
|
if ANYWIN:
|
||||||
|
f_ro |= os.O_BINARY
|
||||||
|
|
||||||
|
wr = flags != f_ro
|
||||||
|
if wr and not self.args.smbw:
|
||||||
|
yeet("blocked write (no --smbw): " + vpath)
|
||||||
|
|
||||||
|
vfs, ap = self._v2a("open", vpath, *a)
|
||||||
|
if wr and not vfs.axs.uwrite:
|
||||||
|
yeet("blocked write (no-write-acc): " + vpath)
|
||||||
|
|
||||||
|
ret = bos.open(ap, flags, *a, mode=chmod, **ka)
|
||||||
|
if wr:
|
||||||
|
now = time.time()
|
||||||
|
nf = len(self.files)
|
||||||
|
if nf > 9000:
|
||||||
|
oldest = min([x[0] for x in self.files.values()])
|
||||||
|
cutoff = oldest + (now - oldest) / 2
|
||||||
|
self.files = {k: v for k, v in self.files.items() if v[0] > cutoff}
|
||||||
|
info("was tracking %d files, now %d", nf, len(self.files))
|
||||||
|
|
||||||
|
vpath = vpath.replace("\\", "/").lstrip("/")
|
||||||
|
self.files[ret] = (now, vpath)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _close(self, fd: int) -> None:
|
||||||
|
os.close(fd)
|
||||||
|
if fd not in self.files:
|
||||||
|
return
|
||||||
|
|
||||||
|
_, vp = self.files.pop(fd)
|
||||||
|
vp, fn = os.path.split(vp)
|
||||||
|
vfs, rem = self.hub.asrv.vfs.get(vp, LEELOO_DALLAS, False, True)
|
||||||
|
vfs, rem = vfs.get_dbv(rem)
|
||||||
|
self.hub.up2k.hash_file(
|
||||||
|
vfs.realpath,
|
||||||
|
vfs.flags,
|
||||||
|
rem,
|
||||||
|
fn,
|
||||||
|
"1.7.6.2",
|
||||||
|
time.time(),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _rename(self, vp1: str, vp2: str) -> None:
|
||||||
|
if not self.args.smbw:
|
||||||
|
yeet("blocked rename (no --smbw): " + vp1)
|
||||||
|
|
||||||
|
vp1 = vp1.lstrip("/")
|
||||||
|
vp2 = vp2.lstrip("/")
|
||||||
|
|
||||||
|
vfs2, ap2 = self._v2a("rename", vp2, vp1)
|
||||||
|
if not vfs2.axs.uwrite:
|
||||||
|
yeet("blocked rename (no-write-acc): " + vp2)
|
||||||
|
|
||||||
|
vfs1, _ = self.asrv.vfs.get(vp1, LEELOO_DALLAS, True, True)
|
||||||
|
if not vfs1.axs.umove:
|
||||||
|
yeet("blocked rename (no-move-acc): " + vp1)
|
||||||
|
|
||||||
|
self.hub.up2k.handle_mv(LEELOO_DALLAS, vp1, vp2)
|
||||||
|
try:
|
||||||
|
bos.makedirs(ap2)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _mkdir(self, vpath: str) -> None:
|
||||||
|
if not self.args.smbw:
|
||||||
|
yeet("blocked mkdir (no --smbw): " + vpath)
|
||||||
|
|
||||||
|
vfs, ap = self._v2a("mkdir", vpath)
|
||||||
|
if not vfs.axs.uwrite:
|
||||||
|
yeet("blocked mkdir (no-write-acc): " + vpath)
|
||||||
|
|
||||||
|
return bos.mkdir(ap)
|
||||||
|
|
||||||
|
def _stat(self, vpath: str, *a: Any, **ka: Any) -> os.stat_result:
|
||||||
|
return bos.stat(self._v2a("stat", vpath, *a)[1], *a, **ka)
|
||||||
|
|
||||||
|
def _unlink(self, vpath: str) -> None:
|
||||||
|
if not self.args.smbw:
|
||||||
|
yeet("blocked delete (no --smbw): " + vpath)
|
||||||
|
|
||||||
|
# return bos.unlink(self._v2a("stat", vpath, *a)[1])
|
||||||
|
vfs, ap = self._v2a("delete", vpath)
|
||||||
|
if not vfs.axs.udel:
|
||||||
|
yeet("blocked delete (no-del-acc): " + vpath)
|
||||||
|
|
||||||
|
vpath = vpath.replace("\\", "/").lstrip("/")
|
||||||
|
self.hub.up2k.handle_rm(LEELOO_DALLAS, "1.7.6.2", [vpath], [])
|
||||||
|
|
||||||
|
def _utime(self, vpath: str, times: tuple[float, float]) -> None:
|
||||||
|
if not self.args.smbw:
|
||||||
|
yeet("blocked utime (no --smbw): " + vpath)
|
||||||
|
|
||||||
|
vfs, ap = self._v2a("utime", vpath)
|
||||||
|
if not vfs.axs.uwrite:
|
||||||
|
yeet("blocked utime (no-write-acc): " + vpath)
|
||||||
|
|
||||||
|
return bos.utime(ap, times)
|
||||||
|
|
||||||
|
def _p_exists(self, vpath: str) -> bool:
|
||||||
|
try:
|
||||||
|
bos.stat(self._v2a("p.exists", vpath)[1])
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _p_getsize(self, vpath: str) -> int:
|
||||||
|
st = bos.stat(self._v2a("p.getsize", vpath)[1])
|
||||||
|
return st.st_size
|
||||||
|
|
||||||
|
def _p_isdir(self, vpath: str) -> bool:
|
||||||
|
try:
|
||||||
|
st = bos.stat(self._v2a("p.isdir", vpath)[1])
|
||||||
|
return stat.S_ISDIR(st.st_mode)
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _p_join(self, *a) -> str:
|
||||||
|
# impacket.smbserver reads globs from queryDirectoryRequest['Buffer']
|
||||||
|
# where somehow `fds.*` becomes `fds"*` so lets fix that
|
||||||
|
ret = os.path.join(*a)
|
||||||
|
return ret.replace('"', ".") # type: ignore
|
||||||
|
|
||||||
|
def _hook(self, *a: Any, **ka: Any) -> None:
|
||||||
|
src = inspect.currentframe().f_back.f_code.co_name
|
||||||
|
error("\033[31m%s:hook(%s)\033[0m", src, a)
|
||||||
|
raise Exception("nope")
|
||||||
|
|
||||||
|
def _disarm(self) -> None:
|
||||||
|
from impacket import smbserver
|
||||||
|
|
||||||
|
smbserver.os.chmod = self._hook
|
||||||
|
smbserver.os.chown = self._hook
|
||||||
|
smbserver.os.ftruncate = self._hook
|
||||||
|
smbserver.os.lchown = self._hook
|
||||||
|
smbserver.os.link = self._hook
|
||||||
|
smbserver.os.lstat = self._hook
|
||||||
|
smbserver.os.replace = self._hook
|
||||||
|
smbserver.os.scandir = self._hook
|
||||||
|
smbserver.os.symlink = self._hook
|
||||||
|
smbserver.os.truncate = self._hook
|
||||||
|
smbserver.os.walk = self._hook
|
||||||
|
|
||||||
|
smbserver.os.path.abspath = self._hook
|
||||||
|
smbserver.os.path.expanduser = self._hook
|
||||||
|
smbserver.os.path.getatime = self._hook
|
||||||
|
smbserver.os.path.getctime = self._hook
|
||||||
|
smbserver.os.path.getmtime = self._hook
|
||||||
|
smbserver.os.path.isabs = self._hook
|
||||||
|
smbserver.os.path.isfile = self._hook
|
||||||
|
smbserver.os.path.islink = self._hook
|
||||||
|
smbserver.os.path.realpath = self._hook
|
||||||
|
|
||||||
|
def _is_in_file_jail(self, *a: Any) -> bool:
|
||||||
|
# handled by vfs
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def yeet(msg: str) -> None:
|
||||||
|
info(msg)
|
||||||
|
raise Exception(msg)
|
||||||
195
copyparty/ssdp.py
Normal file
195
copyparty/ssdp.py
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import select
|
||||||
|
import socket
|
||||||
|
from email.utils import formatdate
|
||||||
|
|
||||||
|
from .__init__ import TYPE_CHECKING
|
||||||
|
from .multicast import MC_Sck, MCast
|
||||||
|
from .util import CachedSet, min_ex, html_escape
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from .broker_util import BrokerCli
|
||||||
|
from .httpcli import HttpCli
|
||||||
|
from .svchub import SvcHub
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
GRP = "239.255.255.250"
|
||||||
|
|
||||||
|
|
||||||
|
class SSDP_Sck(MC_Sck):
|
||||||
|
def __init__(self, *a):
|
||||||
|
super(SSDP_Sck, self).__init__(*a)
|
||||||
|
self.hport = 0
|
||||||
|
|
||||||
|
|
||||||
|
class SSDPr(object):
|
||||||
|
"""generates http responses for httpcli"""
|
||||||
|
|
||||||
|
def __init__(self, broker: "BrokerCli") -> None:
|
||||||
|
self.broker = broker
|
||||||
|
self.args = broker.args
|
||||||
|
|
||||||
|
def reply(self, hc: "HttpCli") -> bool:
|
||||||
|
if hc.vpath.endswith("device.xml"):
|
||||||
|
return self.tx_device(hc)
|
||||||
|
|
||||||
|
hc.reply(b"unknown request", 400)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def tx_device(self, hc: "HttpCli") -> bool:
|
||||||
|
zs = """
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<root xmlns="urn:schemas-upnp-org:device-1-0">
|
||||||
|
<specVersion>
|
||||||
|
<major>1</major>
|
||||||
|
<minor>0</minor>
|
||||||
|
</specVersion>
|
||||||
|
<URLBase>{}</URLBase>
|
||||||
|
<device>
|
||||||
|
<presentationURL>{}</presentationURL>
|
||||||
|
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
|
||||||
|
<friendlyName>{}</friendlyName>
|
||||||
|
<modelDescription>file server</modelDescription>
|
||||||
|
<manufacturer>ed</manufacturer>
|
||||||
|
<manufacturerURL>https://ocv.me/</manufacturerURL>
|
||||||
|
<modelName>copyparty</modelName>
|
||||||
|
<modelURL>https://github.com/9001/copyparty/</modelURL>
|
||||||
|
<UDN>{}</UDN>
|
||||||
|
<serviceList>
|
||||||
|
<service>
|
||||||
|
<serviceType>urn:schemas-upnp-org:device:Basic:1</serviceType>
|
||||||
|
<serviceId>urn:schemas-upnp-org:device:Basic</serviceId>
|
||||||
|
<controlURL>/.cpr/ssdp/services.xml</controlURL>
|
||||||
|
<eventSubURL>/.cpr/ssdp/services.xml</eventSubURL>
|
||||||
|
<SCPDURL>/.cpr/ssdp/services.xml</SCPDURL>
|
||||||
|
</service>
|
||||||
|
</serviceList>
|
||||||
|
</device>
|
||||||
|
</root>"""
|
||||||
|
|
||||||
|
c = html_escape
|
||||||
|
sip, sport = hc.s.getsockname()[:2]
|
||||||
|
proto = "https" if self.args.https_only else "http"
|
||||||
|
ubase = "{}://{}:{}".format(proto, sip, sport)
|
||||||
|
zsl = self.args.zsl
|
||||||
|
url = zsl if "://" in zsl else ubase + "/" + zsl.lstrip("/")
|
||||||
|
name = "{} @ {}".format(self.args.doctitle, self.args.name)
|
||||||
|
zs = zs.strip().format(c(ubase), c(url), c(name), c(self.args.zsid))
|
||||||
|
hc.reply(zs.encode("utf-8", "replace"))
|
||||||
|
return False # close connectino
|
||||||
|
|
||||||
|
|
||||||
|
class SSDPd(MCast):
|
||||||
|
"""communicates with ssdp clients over multicast"""
|
||||||
|
|
||||||
|
def __init__(self, hub: "SvcHub") -> None:
|
||||||
|
al = hub.args
|
||||||
|
vinit = al.zsv and not al.zmv
|
||||||
|
super(SSDPd, self).__init__(
|
||||||
|
hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit
|
||||||
|
)
|
||||||
|
self.srv: dict[socket.socket, SSDP_Sck] = {}
|
||||||
|
self.rxc = CachedSet(0.7)
|
||||||
|
self.txc = CachedSet(5) # win10: every 3 sec
|
||||||
|
self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I)
|
||||||
|
|
||||||
|
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
|
self.log_func("SSDP", msg, c)
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
try:
|
||||||
|
bound = self.create_servers()
|
||||||
|
except:
|
||||||
|
t = "no server IP matches the ssdp config\n{}"
|
||||||
|
self.log(t.format(min_ex()), 1)
|
||||||
|
bound = []
|
||||||
|
|
||||||
|
if not bound:
|
||||||
|
self.log("failed to announce copyparty services on the network", 3)
|
||||||
|
return
|
||||||
|
|
||||||
|
# find http port for this listening ip
|
||||||
|
for srv in self.srv.values():
|
||||||
|
tcps = self.hub.tcpsrv.bound
|
||||||
|
hp = next((x[1] for x in tcps if x[0] in ("0.0.0.0", srv.ip)), 0)
|
||||||
|
hp = hp or next((x[1] for x in tcps if x[0] == "::"), 0)
|
||||||
|
if not hp:
|
||||||
|
hp = tcps[0][1]
|
||||||
|
self.log("assuming port {} for {}".format(hp, srv.ip), 3)
|
||||||
|
srv.hport = hp
|
||||||
|
|
||||||
|
self.log("listening")
|
||||||
|
while self.running:
|
||||||
|
rdy = select.select(self.srv, [], [], 180)
|
||||||
|
rx: list[socket.socket] = rdy[0] # type: ignore
|
||||||
|
self.rxc.cln()
|
||||||
|
for sck in rx:
|
||||||
|
buf, addr = sck.recvfrom(4096)
|
||||||
|
try:
|
||||||
|
self.eat(buf, addr)
|
||||||
|
except:
|
||||||
|
if not self.running:
|
||||||
|
return
|
||||||
|
|
||||||
|
t = "{} {} \033[33m|{}| {}\n{}".format(
|
||||||
|
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
|
||||||
|
)
|
||||||
|
self.log(t, 6)
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.running = False
|
||||||
|
self.srv = {}
|
||||||
|
|
||||||
|
def eat(self, buf: bytes, addr: tuple[str, int]) -> None:
|
||||||
|
cip = addr[0]
|
||||||
|
if cip.startswith("169.254") and not self.ll_ok:
|
||||||
|
return
|
||||||
|
|
||||||
|
if buf in self.rxc.c:
|
||||||
|
return
|
||||||
|
|
||||||
|
srv: Optional[SSDP_Sck] = self.map_client(cip) # type: ignore
|
||||||
|
if not srv:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.rxc.add(buf)
|
||||||
|
if not buf.startswith(b"M-SEARCH * HTTP/1."):
|
||||||
|
raise Exception("not an ssdp message")
|
||||||
|
|
||||||
|
if not self.ptn_st.search(buf):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.args.zsv:
|
||||||
|
t = "{} [{}] \033[36m{} \033[0m|{}|"
|
||||||
|
self.log(t.format(srv.name, srv.ip, cip, len(buf)), "90")
|
||||||
|
|
||||||
|
zs = """
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
CACHE-CONTROL: max-age=1800
|
||||||
|
DATE: {0}
|
||||||
|
EXT:
|
||||||
|
LOCATION: http://{1}:{2}/.cpr/ssdp/device.xml
|
||||||
|
OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01
|
||||||
|
01-NLS: {3}
|
||||||
|
SERVER: UPnP/1.0
|
||||||
|
ST: upnp:rootdevice
|
||||||
|
USN: {3}::upnp:rootdevice
|
||||||
|
BOOTID.UPNP.ORG: 0
|
||||||
|
CONFIGID.UPNP.ORG: 1
|
||||||
|
|
||||||
|
"""
|
||||||
|
zs = zs.format(formatdate(usegmt=True), srv.ip, srv.hport, self.args.zsid)
|
||||||
|
zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace")
|
||||||
|
srv.sck.sendto(zb, addr[:2])
|
||||||
|
|
||||||
|
if cip not in self.txc.c:
|
||||||
|
self.log("{} [{}] --> {}".format(srv.name, srv.ip, cip), "6")
|
||||||
|
|
||||||
|
self.txc.add(cip)
|
||||||
|
self.txc.cln()
|
||||||
@@ -1,21 +1,19 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import print_function, unicode_literals
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import stat
|
||||||
import tarfile
|
import tarfile
|
||||||
import threading
|
|
||||||
|
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
|
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .sutil import StreamArc, errdesc
|
from .sutil import StreamArc, errdesc
|
||||||
from .util import fsenc, min_ex
|
from .util import Daemon, fsenc, min_ex
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Generator, Optional
|
from typing import Any, Generator, Optional
|
||||||
|
|
||||||
from .util import NamedLogger
|
from .util import NamedLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QFile(object): # inherit io.StringIO for painful typing
|
class QFile(object): # inherit io.StringIO for painful typing
|
||||||
@@ -60,9 +58,7 @@ class StreamTar(StreamArc):
|
|||||||
fmt = tarfile.GNU_FORMAT
|
fmt = tarfile.GNU_FORMAT
|
||||||
self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt) # type: ignore
|
self.tar = tarfile.open(fileobj=self.qfile, mode="w|", format=fmt) # type: ignore
|
||||||
|
|
||||||
w = threading.Thread(target=self._gen, name="star-gen")
|
Daemon(self._gen, "star-gen")
|
||||||
w.daemon = True
|
|
||||||
w.start()
|
|
||||||
|
|
||||||
def gen(self) -> Generator[Optional[bytes], None, None]:
|
def gen(self) -> Generator[Optional[bytes], None, None]:
|
||||||
try:
|
try:
|
||||||
@@ -84,6 +80,9 @@ class StreamTar(StreamArc):
|
|||||||
src = f["ap"]
|
src = f["ap"]
|
||||||
fsi = f["st"]
|
fsi = f["st"]
|
||||||
|
|
||||||
|
if stat.S_ISDIR(fsi.st_mode):
|
||||||
|
return
|
||||||
|
|
||||||
inf = tarfile.TarInfo(name=name)
|
inf = tarfile.TarInfo(name=name)
|
||||||
inf.mode = fsi.st_mode
|
inf.mode = fsi.st_mode
|
||||||
inf.size = fsi.st_size
|
inf.size = fsi.st_size
|
||||||
|
|||||||
5
copyparty/stolen/dnslib/README.md
Normal file
5
copyparty/stolen/dnslib/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
`dnslib` but heavily simplified/feature-stripped
|
||||||
|
|
||||||
|
L: MIT
|
||||||
|
Copyright (c) 2010 - 2017 Paul Chakravarti
|
||||||
|
https://github.com/paulc/dnslib/
|
||||||
11
copyparty/stolen/dnslib/__init__.py
Normal file
11
copyparty/stolen/dnslib/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
"""
|
||||||
|
L: MIT
|
||||||
|
Copyright (c) 2010 - 2017 Paul Chakravarti
|
||||||
|
https://github.com/paulc/dnslib/tree/0.9.23
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .dns import *
|
||||||
|
|
||||||
|
version = "0.9.23"
|
||||||
41
copyparty/stolen/dnslib/bimap.py
Normal file
41
copyparty/stolen/dnslib/bimap.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import types
|
||||||
|
|
||||||
|
|
||||||
|
class BimapError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Bimap(object):
|
||||||
|
def __init__(self, name, forward, error=AttributeError):
|
||||||
|
self.name = name
|
||||||
|
self.error = error
|
||||||
|
self.forward = forward.copy()
|
||||||
|
self.reverse = dict([(v, k) for (k, v) in list(forward.items())])
|
||||||
|
|
||||||
|
def get(self, k, default=None):
|
||||||
|
try:
|
||||||
|
return self.forward[k]
|
||||||
|
except KeyError:
|
||||||
|
return default or str(k)
|
||||||
|
|
||||||
|
def __getitem__(self, k):
|
||||||
|
try:
|
||||||
|
return self.forward[k]
|
||||||
|
except KeyError:
|
||||||
|
if isinstance(self.error, types.FunctionType):
|
||||||
|
return self.error(self.name, k, True)
|
||||||
|
else:
|
||||||
|
raise self.error("%s: Invalid forward lookup: [%s]" % (self.name, k))
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
try:
|
||||||
|
if k == "__wrapped__":
|
||||||
|
raise AttributeError()
|
||||||
|
return self.reverse[k]
|
||||||
|
except KeyError:
|
||||||
|
if isinstance(self.error, types.FunctionType):
|
||||||
|
return self.error(self.name, k, False)
|
||||||
|
else:
|
||||||
|
raise self.error("%s: Invalid reverse lookup: [%s]" % (self.name, k))
|
||||||
15
copyparty/stolen/dnslib/bit.py
Normal file
15
copyparty/stolen/dnslib/bit.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
|
||||||
|
def get_bits(data, offset, bits=1):
|
||||||
|
mask = ((1 << bits) - 1) << offset
|
||||||
|
return (data & mask) >> offset
|
||||||
|
|
||||||
|
|
||||||
|
def set_bits(data, value, offset, bits=1):
|
||||||
|
mask = ((1 << bits) - 1) << offset
|
||||||
|
clear = 0xFFFF ^ mask
|
||||||
|
data = (data & clear) | ((value << offset) & mask)
|
||||||
|
return data
|
||||||
56
copyparty/stolen/dnslib/buffer.py
Normal file
56
copyparty/stolen/dnslib/buffer.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import binascii
|
||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
class BufferError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Buffer(object):
|
||||||
|
def __init__(self, data=b""):
|
||||||
|
self.data = bytearray(data)
|
||||||
|
self.offset = 0
|
||||||
|
|
||||||
|
def remaining(self):
|
||||||
|
return len(self.data) - self.offset
|
||||||
|
|
||||||
|
def get(self, length):
|
||||||
|
if length > self.remaining():
|
||||||
|
raise BufferError(
|
||||||
|
"Not enough bytes [offset=%d,remaining=%d,requested=%d]"
|
||||||
|
% (self.offset, self.remaining(), length)
|
||||||
|
)
|
||||||
|
start = self.offset
|
||||||
|
end = self.offset + length
|
||||||
|
self.offset += length
|
||||||
|
return bytes(self.data[start:end])
|
||||||
|
|
||||||
|
def hex(self):
|
||||||
|
return binascii.hexlify(self.data)
|
||||||
|
|
||||||
|
def pack(self, fmt, *args):
|
||||||
|
self.offset += struct.calcsize(fmt)
|
||||||
|
self.data += struct.pack(fmt, *args)
|
||||||
|
|
||||||
|
def append(self, s):
|
||||||
|
self.offset += len(s)
|
||||||
|
self.data += s
|
||||||
|
|
||||||
|
def update(self, ptr, fmt, *args):
|
||||||
|
s = struct.pack(fmt, *args)
|
||||||
|
self.data[ptr : ptr + len(s)] = s
|
||||||
|
|
||||||
|
def unpack(self, fmt):
|
||||||
|
try:
|
||||||
|
data = self.get(struct.calcsize(fmt))
|
||||||
|
return struct.unpack(fmt, data)
|
||||||
|
except struct.error:
|
||||||
|
raise BufferError(
|
||||||
|
"Error unpacking struct '%s' <%s>"
|
||||||
|
% (fmt, binascii.hexlify(data).decode())
|
||||||
|
)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.data)
|
||||||
775
copyparty/stolen/dnslib/dns.py
Normal file
775
copyparty/stolen/dnslib/dns.py
Normal file
@@ -0,0 +1,775 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import binascii
|
||||||
|
from itertools import chain
|
||||||
|
|
||||||
|
from .bimap import Bimap, BimapError
|
||||||
|
from .bit import get_bits, set_bits
|
||||||
|
from .buffer import BufferError
|
||||||
|
from .label import DNSBuffer, DNSLabel
|
||||||
|
from .ranges import IP4, IP6, H, I, check_bytes
|
||||||
|
|
||||||
|
|
||||||
|
class DNSError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def unknown_qtype(name, key, forward):
|
||||||
|
if forward:
|
||||||
|
try:
|
||||||
|
return "TYPE%d" % (key,)
|
||||||
|
except:
|
||||||
|
raise DNSError("%s: Invalid forward lookup: [%s]" % (name, key))
|
||||||
|
else:
|
||||||
|
if key.startswith("TYPE"):
|
||||||
|
try:
|
||||||
|
return int(key[4:])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
raise DNSError("%s: Invalid reverse lookup: [%s]" % (name, key))
|
||||||
|
|
||||||
|
|
||||||
|
QTYPE = Bimap(
|
||||||
|
"QTYPE",
|
||||||
|
{1: "A", 12: "PTR", 16: "TXT", 28: "AAAA", 33: "SRV", 47: "NSEC", 255: "ANY"},
|
||||||
|
unknown_qtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
CLASS = Bimap("CLASS", {1: "IN", 254: "None", 255: "*", 0x8001: "F_IN"}, DNSError)
|
||||||
|
|
||||||
|
QR = Bimap("QR", {0: "QUERY", 1: "RESPONSE"}, DNSError)
|
||||||
|
|
||||||
|
RCODE = Bimap(
|
||||||
|
"RCODE",
|
||||||
|
{
|
||||||
|
0: "NOERROR",
|
||||||
|
1: "FORMERR",
|
||||||
|
2: "SERVFAIL",
|
||||||
|
3: "NXDOMAIN",
|
||||||
|
4: "NOTIMP",
|
||||||
|
5: "REFUSED",
|
||||||
|
6: "YXDOMAIN",
|
||||||
|
7: "YXRRSET",
|
||||||
|
8: "NXRRSET",
|
||||||
|
9: "NOTAUTH",
|
||||||
|
10: "NOTZONE",
|
||||||
|
},
|
||||||
|
DNSError,
|
||||||
|
)
|
||||||
|
|
||||||
|
OPCODE = Bimap(
|
||||||
|
"OPCODE", {0: "QUERY", 1: "IQUERY", 2: "STATUS", 4: "NOTIFY", 5: "UPDATE"}, DNSError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def label(label, origin=None):
|
||||||
|
if label.endswith("."):
|
||||||
|
return DNSLabel(label)
|
||||||
|
else:
|
||||||
|
return (origin if isinstance(origin, DNSLabel) else DNSLabel(origin)).add(label)
|
||||||
|
|
||||||
|
|
||||||
|
class DNSRecord(object):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, packet) -> "DNSRecord":
|
||||||
|
buffer = DNSBuffer(packet)
|
||||||
|
try:
|
||||||
|
header = DNSHeader.parse(buffer)
|
||||||
|
questions = []
|
||||||
|
rr = []
|
||||||
|
auth = []
|
||||||
|
ar = []
|
||||||
|
for i in range(header.q):
|
||||||
|
questions.append(DNSQuestion.parse(buffer))
|
||||||
|
for i in range(header.a):
|
||||||
|
rr.append(RR.parse(buffer))
|
||||||
|
for i in range(header.auth):
|
||||||
|
auth.append(RR.parse(buffer))
|
||||||
|
for i in range(header.ar):
|
||||||
|
ar.append(RR.parse(buffer))
|
||||||
|
return cls(header, questions, rr, auth=auth, ar=ar)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError(
|
||||||
|
"Error unpacking DNSRecord [offset=%d]: %s" % (buffer.offset, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def question(cls, qname, qtype="A", qclass="IN"):
|
||||||
|
return DNSRecord(
|
||||||
|
q=DNSQuestion(qname, getattr(QTYPE, qtype), getattr(CLASS, qclass))
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, header=None, questions=None, rr=None, q=None, a=None, auth=None, ar=None
|
||||||
|
) -> None:
|
||||||
|
self.header = header or DNSHeader()
|
||||||
|
self.questions: list[DNSQuestion] = questions or []
|
||||||
|
self.rr: list[RR] = rr or []
|
||||||
|
self.auth: list[RR] = auth or []
|
||||||
|
self.ar: list[RR] = ar or []
|
||||||
|
|
||||||
|
if q:
|
||||||
|
self.questions.append(q)
|
||||||
|
if a:
|
||||||
|
self.rr.append(a)
|
||||||
|
self.set_header_qa()
|
||||||
|
|
||||||
|
def reply(self, ra=1, aa=1):
|
||||||
|
return DNSRecord(
|
||||||
|
DNSHeader(id=self.header.id, bitmap=self.header.bitmap, qr=1, ra=ra, aa=aa),
|
||||||
|
q=self.q,
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_question(self, *q) -> None:
|
||||||
|
self.questions.extend(q)
|
||||||
|
self.set_header_qa()
|
||||||
|
|
||||||
|
def add_answer(self, *rr) -> None:
|
||||||
|
self.rr.extend(rr)
|
||||||
|
self.set_header_qa()
|
||||||
|
|
||||||
|
def add_auth(self, *auth) -> None:
|
||||||
|
self.auth.extend(auth)
|
||||||
|
self.set_header_qa()
|
||||||
|
|
||||||
|
def add_ar(self, *ar) -> None:
|
||||||
|
self.ar.extend(ar)
|
||||||
|
self.set_header_qa()
|
||||||
|
|
||||||
|
def set_header_qa(self) -> None:
|
||||||
|
self.header.q = len(self.questions)
|
||||||
|
self.header.a = len(self.rr)
|
||||||
|
self.header.auth = len(self.auth)
|
||||||
|
self.header.ar = len(self.ar)
|
||||||
|
|
||||||
|
def get_q(self):
|
||||||
|
return self.questions[0] if self.questions else DNSQuestion()
|
||||||
|
|
||||||
|
q = property(get_q)
|
||||||
|
|
||||||
|
def get_a(self):
|
||||||
|
return self.rr[0] if self.rr else RR()
|
||||||
|
|
||||||
|
a = property(get_a)
|
||||||
|
|
||||||
|
def pack(self) -> bytes:
|
||||||
|
self.set_header_qa()
|
||||||
|
buffer = DNSBuffer()
|
||||||
|
self.header.pack(buffer)
|
||||||
|
for q in self.questions:
|
||||||
|
q.pack(buffer)
|
||||||
|
for rr in self.rr:
|
||||||
|
rr.pack(buffer)
|
||||||
|
for auth in self.auth:
|
||||||
|
auth.pack(buffer)
|
||||||
|
for ar in self.ar:
|
||||||
|
ar.pack(buffer)
|
||||||
|
return buffer.data
|
||||||
|
|
||||||
|
def truncate(self):
|
||||||
|
return DNSRecord(DNSHeader(id=self.header.id, bitmap=self.header.bitmap, tc=1))
|
||||||
|
|
||||||
|
def format(self, prefix="", sort=False):
|
||||||
|
s = sorted if sort else lambda x: x
|
||||||
|
sections = [repr(self.header)]
|
||||||
|
sections.extend(s([repr(q) for q in self.questions]))
|
||||||
|
sections.extend(s([repr(rr) for rr in self.rr]))
|
||||||
|
sections.extend(s([repr(rr) for rr in self.auth]))
|
||||||
|
sections.extend(s([repr(rr) for rr in self.ar]))
|
||||||
|
return prefix + ("\n" + prefix).join(sections)
|
||||||
|
|
||||||
|
short = format
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.format()
|
||||||
|
|
||||||
|
__str__ = __repr__
|
||||||
|
|
||||||
|
|
||||||
|
class DNSHeader(object):
|
||||||
|
id = H("id")
|
||||||
|
bitmap = H("bitmap")
|
||||||
|
q = H("q")
|
||||||
|
a = H("a")
|
||||||
|
auth = H("auth")
|
||||||
|
ar = H("ar")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer):
|
||||||
|
try:
|
||||||
|
(id, bitmap, q, a, auth, ar) = buffer.unpack("!HHHHHH")
|
||||||
|
return cls(id, bitmap, q, a, auth, ar)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError(
|
||||||
|
"Error unpacking DNSHeader [offset=%d]: %s" % (buffer.offset, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, id=None, bitmap=None, q=0, a=0, auth=0, ar=0, **args) -> None:
|
||||||
|
self.id = id if id else 0
|
||||||
|
if bitmap is None:
|
||||||
|
self.bitmap = 0
|
||||||
|
else:
|
||||||
|
self.bitmap = bitmap
|
||||||
|
self.q = q
|
||||||
|
self.a = a
|
||||||
|
self.auth = auth
|
||||||
|
self.ar = ar
|
||||||
|
for k, v in args.items():
|
||||||
|
if k.lower() == "qr":
|
||||||
|
self.qr = v
|
||||||
|
elif k.lower() == "opcode":
|
||||||
|
self.opcode = v
|
||||||
|
elif k.lower() == "aa":
|
||||||
|
self.aa = v
|
||||||
|
elif k.lower() == "tc":
|
||||||
|
self.tc = v
|
||||||
|
elif k.lower() == "rd":
|
||||||
|
self.rd = v
|
||||||
|
elif k.lower() == "ra":
|
||||||
|
self.ra = v
|
||||||
|
elif k.lower() == "z":
|
||||||
|
self.z = v
|
||||||
|
elif k.lower() == "ad":
|
||||||
|
self.ad = v
|
||||||
|
elif k.lower() == "cd":
|
||||||
|
self.cd = v
|
||||||
|
elif k.lower() == "rcode":
|
||||||
|
self.rcode = v
|
||||||
|
|
||||||
|
def get_qr(self):
|
||||||
|
return get_bits(self.bitmap, 15)
|
||||||
|
|
||||||
|
def set_qr(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 15)
|
||||||
|
|
||||||
|
qr = property(get_qr, set_qr)
|
||||||
|
|
||||||
|
def get_opcode(self):
|
||||||
|
return get_bits(self.bitmap, 11, 4)
|
||||||
|
|
||||||
|
def set_opcode(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 11, 4)
|
||||||
|
|
||||||
|
opcode = property(get_opcode, set_opcode)
|
||||||
|
|
||||||
|
def get_aa(self):
|
||||||
|
return get_bits(self.bitmap, 10)
|
||||||
|
|
||||||
|
def set_aa(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 10)
|
||||||
|
|
||||||
|
aa = property(get_aa, set_aa)
|
||||||
|
|
||||||
|
def get_tc(self):
|
||||||
|
return get_bits(self.bitmap, 9)
|
||||||
|
|
||||||
|
def set_tc(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 9)
|
||||||
|
|
||||||
|
tc = property(get_tc, set_tc)
|
||||||
|
|
||||||
|
def get_rd(self):
|
||||||
|
return get_bits(self.bitmap, 8)
|
||||||
|
|
||||||
|
def set_rd(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 8)
|
||||||
|
|
||||||
|
rd = property(get_rd, set_rd)
|
||||||
|
|
||||||
|
def get_ra(self):
|
||||||
|
return get_bits(self.bitmap, 7)
|
||||||
|
|
||||||
|
def set_ra(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 7)
|
||||||
|
|
||||||
|
ra = property(get_ra, set_ra)
|
||||||
|
|
||||||
|
def get_z(self):
|
||||||
|
return get_bits(self.bitmap, 6)
|
||||||
|
|
||||||
|
def set_z(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 6)
|
||||||
|
|
||||||
|
z = property(get_z, set_z)
|
||||||
|
|
||||||
|
def get_ad(self):
|
||||||
|
return get_bits(self.bitmap, 5)
|
||||||
|
|
||||||
|
def set_ad(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 5)
|
||||||
|
|
||||||
|
ad = property(get_ad, set_ad)
|
||||||
|
|
||||||
|
def get_cd(self):
|
||||||
|
return get_bits(self.bitmap, 4)
|
||||||
|
|
||||||
|
def set_cd(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 4)
|
||||||
|
|
||||||
|
cd = property(get_cd, set_cd)
|
||||||
|
|
||||||
|
def get_rcode(self):
|
||||||
|
return get_bits(self.bitmap, 0, 4)
|
||||||
|
|
||||||
|
def set_rcode(self, val):
|
||||||
|
self.bitmap = set_bits(self.bitmap, val, 0, 4)
|
||||||
|
|
||||||
|
rcode = property(get_rcode, set_rcode)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.pack("!HHHHHH", self.id, self.bitmap, self.q, self.a, self.auth, self.ar)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
f = [
|
||||||
|
self.aa and "AA",
|
||||||
|
self.tc and "TC",
|
||||||
|
self.rd and "RD",
|
||||||
|
self.ra and "RA",
|
||||||
|
self.z and "Z",
|
||||||
|
self.ad and "AD",
|
||||||
|
self.cd and "CD",
|
||||||
|
]
|
||||||
|
if OPCODE.get(self.opcode) == "UPDATE":
|
||||||
|
f1 = "zo"
|
||||||
|
f2 = "pr"
|
||||||
|
f3 = "up"
|
||||||
|
f4 = "ad"
|
||||||
|
else:
|
||||||
|
f1 = "q"
|
||||||
|
f2 = "a"
|
||||||
|
f3 = "ns"
|
||||||
|
f4 = "ar"
|
||||||
|
return (
|
||||||
|
"<DNS Header: id=0x%x type=%s opcode=%s flags=%s "
|
||||||
|
"rcode='%s' %s=%d %s=%d %s=%d %s=%d>"
|
||||||
|
% (
|
||||||
|
self.id,
|
||||||
|
QR.get(self.qr),
|
||||||
|
OPCODE.get(self.opcode),
|
||||||
|
",".join(filter(None, f)),
|
||||||
|
RCODE.get(self.rcode),
|
||||||
|
f1,
|
||||||
|
self.q,
|
||||||
|
f2,
|
||||||
|
self.a,
|
||||||
|
f3,
|
||||||
|
self.auth,
|
||||||
|
f4,
|
||||||
|
self.ar,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
__str__ = __repr__
|
||||||
|
|
||||||
|
|
||||||
|
class DNSQuestion(object):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer):
|
||||||
|
try:
|
||||||
|
qname = buffer.decode_name()
|
||||||
|
qtype, qclass = buffer.unpack("!HH")
|
||||||
|
return cls(qname, qtype, qclass)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError(
|
||||||
|
"Error unpacking DNSQuestion [offset=%d]: %s" % (buffer.offset, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, qname=None, qtype=1, qclass=1) -> None:
|
||||||
|
self.qname = qname
|
||||||
|
self.qtype = qtype
|
||||||
|
self.qclass = qclass
|
||||||
|
|
||||||
|
def set_qname(self, qname):
|
||||||
|
if isinstance(qname, DNSLabel):
|
||||||
|
self._qname = qname
|
||||||
|
else:
|
||||||
|
self._qname = DNSLabel(qname)
|
||||||
|
|
||||||
|
def get_qname(self):
|
||||||
|
return self._qname
|
||||||
|
|
||||||
|
qname = property(get_qname, set_qname)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.encode_name(self.qname)
|
||||||
|
buffer.pack("!HH", self.qtype, self.qclass)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<DNS Question: '%s' qtype=%s qclass=%s>" % (
|
||||||
|
self.qname,
|
||||||
|
QTYPE.get(self.qtype),
|
||||||
|
CLASS.get(self.qclass),
|
||||||
|
)
|
||||||
|
|
||||||
|
__str__ = __repr__
|
||||||
|
|
||||||
|
|
||||||
|
class RR(object):
|
||||||
|
rtype = H("rtype")
|
||||||
|
rclass = H("rclass")
|
||||||
|
ttl = I("ttl")
|
||||||
|
rdlength = H("rdlength")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer):
|
||||||
|
try:
|
||||||
|
rname = buffer.decode_name()
|
||||||
|
rtype, rclass, ttl, rdlength = buffer.unpack("!HHIH")
|
||||||
|
if rdlength:
|
||||||
|
rdata = RDMAP.get(QTYPE.get(rtype), RD).parse(buffer, rdlength)
|
||||||
|
else:
|
||||||
|
rdata = ""
|
||||||
|
return cls(rname, rtype, rclass, ttl, rdata)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking RR [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, rname=None, rtype=1, rclass=1, ttl=0, rdata=None) -> None:
|
||||||
|
self.rname = rname
|
||||||
|
self.rtype = rtype
|
||||||
|
self.rclass = rclass
|
||||||
|
self.ttl = ttl
|
||||||
|
self.rdata = rdata
|
||||||
|
|
||||||
|
def set_rname(self, rname):
|
||||||
|
if isinstance(rname, DNSLabel):
|
||||||
|
self._rname = rname
|
||||||
|
else:
|
||||||
|
self._rname = DNSLabel(rname)
|
||||||
|
|
||||||
|
def get_rname(self):
|
||||||
|
return self._rname
|
||||||
|
|
||||||
|
rname = property(get_rname, set_rname)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.encode_name(self.rname)
|
||||||
|
buffer.pack("!HHI", self.rtype, self.rclass, self.ttl)
|
||||||
|
rdlength_ptr = buffer.offset
|
||||||
|
buffer.pack("!H", 0)
|
||||||
|
start = buffer.offset
|
||||||
|
self.rdata.pack(buffer)
|
||||||
|
end = buffer.offset
|
||||||
|
buffer.update(rdlength_ptr, "!H", end - start)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<DNS RR: '%s' rtype=%s rclass=%s ttl=%d rdata='%s'>" % (
|
||||||
|
self.rname,
|
||||||
|
QTYPE.get(self.rtype),
|
||||||
|
CLASS.get(self.rclass),
|
||||||
|
self.ttl,
|
||||||
|
self.rdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
__str__ = __repr__
|
||||||
|
|
||||||
|
|
||||||
|
class RD(object):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
data = buffer.get(length)
|
||||||
|
return cls(data)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking RD [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, data=b"") -> None:
|
||||||
|
check_bytes("data", data)
|
||||||
|
self.data = bytes(data)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.append(self.data)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if len(self.data) > 0:
|
||||||
|
return "\\# %d %s" % (
|
||||||
|
len(self.data),
|
||||||
|
binascii.hexlify(self.data).decode().upper(),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return "\\# 0"
|
||||||
|
|
||||||
|
attrs = ("data",)
|
||||||
|
|
||||||
|
|
||||||
|
def _force_bytes(x):
|
||||||
|
if isinstance(x, bytes):
|
||||||
|
return x
|
||||||
|
else:
|
||||||
|
return x.encode()
|
||||||
|
|
||||||
|
|
||||||
|
class TXT(RD):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
data = list()
|
||||||
|
start_bo = buffer.offset
|
||||||
|
now_length = 0
|
||||||
|
while buffer.offset < start_bo + length:
|
||||||
|
(txtlength,) = buffer.unpack("!B")
|
||||||
|
|
||||||
|
if now_length + txtlength < length:
|
||||||
|
now_length += txtlength
|
||||||
|
data.append(buffer.get(txtlength))
|
||||||
|
else:
|
||||||
|
raise DNSError(
|
||||||
|
"Invalid TXT record: len(%d) > RD len(%d)" % (txtlength, length)
|
||||||
|
)
|
||||||
|
return cls(data)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking TXT [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, data) -> None:
|
||||||
|
if type(data) in (tuple, list):
|
||||||
|
self.data = [_force_bytes(x) for x in data]
|
||||||
|
else:
|
||||||
|
self.data = [_force_bytes(data)]
|
||||||
|
if any([len(x) > 255 for x in self.data]):
|
||||||
|
raise DNSError("TXT record too long: %s" % self.data)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
for ditem in self.data:
|
||||||
|
if len(ditem) > 255:
|
||||||
|
raise DNSError("TXT record too long: %s" % ditem)
|
||||||
|
buffer.pack("!B", len(ditem))
|
||||||
|
buffer.append(ditem)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return ",".join([repr(x) for x in self.data])
|
||||||
|
|
||||||
|
|
||||||
|
class A(RD):
|
||||||
|
|
||||||
|
data = IP4("data")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
data = buffer.unpack("!BBBB")
|
||||||
|
return cls(data)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking A [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, data) -> None:
|
||||||
|
if type(data) in (tuple, list):
|
||||||
|
self.data = tuple(data)
|
||||||
|
else:
|
||||||
|
self.data = tuple(map(int, data.rstrip(".").split(".")))
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.pack("!BBBB", *self.data)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%d.%d.%d.%d" % self.data
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_ipv6(a):
|
||||||
|
l, _, r = a.partition("::")
|
||||||
|
l_groups = list(chain(*[divmod(int(x, 16), 256) for x in l.split(":") if x]))
|
||||||
|
r_groups = list(chain(*[divmod(int(x, 16), 256) for x in r.split(":") if x]))
|
||||||
|
zeros = [0] * (16 - len(l_groups) - len(r_groups))
|
||||||
|
return tuple(l_groups + zeros + r_groups)
|
||||||
|
|
||||||
|
|
||||||
|
def _format_ipv6(a):
|
||||||
|
left = []
|
||||||
|
right = []
|
||||||
|
current = "left"
|
||||||
|
for i in range(0, 16, 2):
|
||||||
|
group = (a[i] << 8) + a[i + 1]
|
||||||
|
if current == "left":
|
||||||
|
if group == 0 and i < 14:
|
||||||
|
if (a[i + 2] << 8) + a[i + 3] == 0:
|
||||||
|
current = "right"
|
||||||
|
else:
|
||||||
|
left.append("0")
|
||||||
|
else:
|
||||||
|
left.append("%x" % group)
|
||||||
|
else:
|
||||||
|
if group == 0 and len(right) == 0:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
right.append("%x" % group)
|
||||||
|
if len(left) < 8:
|
||||||
|
return ":".join(left) + "::" + ":".join(right)
|
||||||
|
else:
|
||||||
|
return ":".join(left)
|
||||||
|
|
||||||
|
|
||||||
|
class AAAA(RD):
|
||||||
|
data = IP6("data")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
data = buffer.unpack("!16B")
|
||||||
|
return cls(data)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking AAAA [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, data) -> None:
|
||||||
|
if type(data) in (tuple, list):
|
||||||
|
self.data = tuple(data)
|
||||||
|
else:
|
||||||
|
self.data = _parse_ipv6(data)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.pack("!16B", *self.data)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return _format_ipv6(self.data)
|
||||||
|
|
||||||
|
|
||||||
|
class CNAME(RD):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
label = buffer.decode_name()
|
||||||
|
return cls(label)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking CNAME [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, label=None) -> None:
|
||||||
|
self.label = label
|
||||||
|
|
||||||
|
def set_label(self, label):
|
||||||
|
if isinstance(label, DNSLabel):
|
||||||
|
self._label = label
|
||||||
|
else:
|
||||||
|
self._label = DNSLabel(label)
|
||||||
|
|
||||||
|
def get_label(self):
|
||||||
|
return self._label
|
||||||
|
|
||||||
|
label = property(get_label, set_label)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.encode_name(self.label)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%s" % (self.label)
|
||||||
|
|
||||||
|
attrs = ("label",)
|
||||||
|
|
||||||
|
|
||||||
|
class PTR(CNAME):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SRV(RD):
|
||||||
|
priority = H("priority")
|
||||||
|
weight = H("weight")
|
||||||
|
port = H("port")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
priority, weight, port = buffer.unpack("!HHH")
|
||||||
|
target = buffer.decode_name()
|
||||||
|
return cls(priority, weight, port, target)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking SRV [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, priority=0, weight=0, port=0, target=None) -> None:
|
||||||
|
self.priority = priority
|
||||||
|
self.weight = weight
|
||||||
|
self.port = port
|
||||||
|
self.target = target
|
||||||
|
|
||||||
|
def set_target(self, target):
|
||||||
|
if isinstance(target, DNSLabel):
|
||||||
|
self._target = target
|
||||||
|
else:
|
||||||
|
self._target = DNSLabel(target)
|
||||||
|
|
||||||
|
def get_target(self):
|
||||||
|
return self._target
|
||||||
|
|
||||||
|
target = property(get_target, set_target)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.pack("!HHH", self.priority, self.weight, self.port)
|
||||||
|
buffer.encode_name(self.target)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%d %d %d %s" % (self.priority, self.weight, self.port, self.target)
|
||||||
|
|
||||||
|
attrs = ("priority", "weight", "port", "target")
|
||||||
|
|
||||||
|
|
||||||
|
def decode_type_bitmap(type_bitmap):
|
||||||
|
rrlist = []
|
||||||
|
buf = DNSBuffer(type_bitmap)
|
||||||
|
while buf.remaining():
|
||||||
|
winnum, winlen = buf.unpack("BB")
|
||||||
|
bitmap = bytearray(buf.get(winlen))
|
||||||
|
for (pos, value) in enumerate(bitmap):
|
||||||
|
for i in range(8):
|
||||||
|
if (value << i) & 0x80:
|
||||||
|
bitpos = (256 * winnum) + (8 * pos) + i
|
||||||
|
rrlist.append(QTYPE[bitpos])
|
||||||
|
return rrlist
|
||||||
|
|
||||||
|
|
||||||
|
def encode_type_bitmap(rrlist):
|
||||||
|
rrlist = sorted([getattr(QTYPE, rr) for rr in rrlist])
|
||||||
|
buf = DNSBuffer()
|
||||||
|
curWindow = rrlist[0] // 256
|
||||||
|
bitmap = bytearray(32)
|
||||||
|
n = len(rrlist) - 1
|
||||||
|
for i, rr in enumerate(rrlist):
|
||||||
|
v = rr - curWindow * 256
|
||||||
|
bitmap[v // 8] |= 1 << (7 - v % 8)
|
||||||
|
|
||||||
|
if i == n or rrlist[i + 1] >= (curWindow + 1) * 256:
|
||||||
|
while bitmap[-1] == 0:
|
||||||
|
bitmap = bitmap[:-1]
|
||||||
|
buf.pack("BB", curWindow, len(bitmap))
|
||||||
|
buf.append(bitmap)
|
||||||
|
|
||||||
|
if i != n:
|
||||||
|
curWindow = rrlist[i + 1] // 256
|
||||||
|
bitmap = bytearray(32)
|
||||||
|
|
||||||
|
return buf.data
|
||||||
|
|
||||||
|
|
||||||
|
class NSEC(RD):
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, buffer, length):
|
||||||
|
try:
|
||||||
|
end = buffer.offset + length
|
||||||
|
name = buffer.decode_name()
|
||||||
|
rrlist = decode_type_bitmap(buffer.get(end - buffer.offset))
|
||||||
|
return cls(name, rrlist)
|
||||||
|
except (BufferError, BimapError) as e:
|
||||||
|
raise DNSError("Error unpacking NSEC [offset=%d]: %s" % (buffer.offset, e))
|
||||||
|
|
||||||
|
def __init__(self, label, rrlist) -> None:
|
||||||
|
self.label = label
|
||||||
|
self.rrlist = rrlist
|
||||||
|
|
||||||
|
def set_label(self, label):
|
||||||
|
if isinstance(label, DNSLabel):
|
||||||
|
self._label = label
|
||||||
|
else:
|
||||||
|
self._label = DNSLabel(label)
|
||||||
|
|
||||||
|
def get_label(self):
|
||||||
|
return self._label
|
||||||
|
|
||||||
|
label = property(get_label, set_label)
|
||||||
|
|
||||||
|
def pack(self, buffer):
|
||||||
|
buffer.encode_name(self.label)
|
||||||
|
buffer.append(encode_type_bitmap(self.rrlist))
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%s %s" % (self.label, " ".join(self.rrlist))
|
||||||
|
|
||||||
|
attrs = ("label", "rrlist")
|
||||||
|
|
||||||
|
|
||||||
|
RDMAP = {"A": A, "AAAA": AAAA, "TXT": TXT, "PTR": PTR, "SRV": SRV, "NSEC": NSEC}
|
||||||
154
copyparty/stolen/dnslib/label.py
Normal file
154
copyparty/stolen/dnslib/label.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .bit import get_bits, set_bits
|
||||||
|
from .buffer import Buffer, BufferError
|
||||||
|
|
||||||
|
LDH = set(range(33, 127))
|
||||||
|
ESCAPE = re.compile(r"\\([0-9][0-9][0-9])")
|
||||||
|
|
||||||
|
|
||||||
|
class DNSLabelError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DNSLabel(object):
|
||||||
|
def __init__(self, label):
|
||||||
|
if type(label) == DNSLabel:
|
||||||
|
self.label = label.label
|
||||||
|
elif type(label) in (list, tuple):
|
||||||
|
self.label = tuple(label)
|
||||||
|
else:
|
||||||
|
if not label or label in (b".", "."):
|
||||||
|
self.label = ()
|
||||||
|
elif type(label) is not bytes:
|
||||||
|
if type("") != type(b""):
|
||||||
|
|
||||||
|
label = ESCAPE.sub(lambda m: chr(int(m[1])), label)
|
||||||
|
self.label = tuple(label.encode("idna").rstrip(b".").split(b"."))
|
||||||
|
else:
|
||||||
|
if type("") == type(b""):
|
||||||
|
|
||||||
|
label = ESCAPE.sub(lambda m: chr(int(m.groups()[0])), label)
|
||||||
|
self.label = tuple(label.rstrip(b".").split(b"."))
|
||||||
|
|
||||||
|
def add(self, name):
|
||||||
|
new = DNSLabel(name)
|
||||||
|
if self.label:
|
||||||
|
new.label += self.label
|
||||||
|
return new
|
||||||
|
|
||||||
|
def idna(self):
|
||||||
|
return ".".join([s.decode("idna") for s in self.label]) + "."
|
||||||
|
|
||||||
|
def _decode(self, s):
|
||||||
|
if set(s).issubset(LDH):
|
||||||
|
|
||||||
|
return s.decode()
|
||||||
|
else:
|
||||||
|
|
||||||
|
return "".join([(chr(c) if (c in LDH) else "\\%03d" % c) for c in s])
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return ".".join([self._decode(bytearray(s)) for s in self.label]) + "."
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<DNSLabel: '%s'>" % str(self)
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(tuple(map(lambda x: x.lower(), self.label)))
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self == other
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if type(other) != DNSLabel:
|
||||||
|
return self.__eq__(DNSLabel(other))
|
||||||
|
else:
|
||||||
|
return [l.lower() for l in self.label] == [l.lower() for l in other.label]
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(b".".join(self.label))
|
||||||
|
|
||||||
|
|
||||||
|
class DNSBuffer(Buffer):
|
||||||
|
def __init__(self, data=b""):
|
||||||
|
super(DNSBuffer, self).__init__(data)
|
||||||
|
self.names = {}
|
||||||
|
|
||||||
|
def decode_name(self, last=-1):
|
||||||
|
label = []
|
||||||
|
done = False
|
||||||
|
while not done:
|
||||||
|
(length,) = self.unpack("!B")
|
||||||
|
if get_bits(length, 6, 2) == 3:
|
||||||
|
|
||||||
|
self.offset -= 1
|
||||||
|
pointer = get_bits(self.unpack("!H")[0], 0, 14)
|
||||||
|
save = self.offset
|
||||||
|
if last == save:
|
||||||
|
raise BufferError(
|
||||||
|
"Recursive pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
|
||||||
|
% (self.offset, pointer, len(self.data))
|
||||||
|
)
|
||||||
|
if pointer < self.offset:
|
||||||
|
self.offset = pointer
|
||||||
|
else:
|
||||||
|
|
||||||
|
raise BufferError(
|
||||||
|
"Invalid pointer in DNSLabel [offset=%d,pointer=%d,length=%d]"
|
||||||
|
% (self.offset, pointer, len(self.data))
|
||||||
|
)
|
||||||
|
label.extend(self.decode_name(save).label)
|
||||||
|
self.offset = save
|
||||||
|
done = True
|
||||||
|
else:
|
||||||
|
if length > 0:
|
||||||
|
l = self.get(length)
|
||||||
|
try:
|
||||||
|
l.decode()
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
raise BufferError("Invalid label <%s>" % l)
|
||||||
|
label.append(l)
|
||||||
|
else:
|
||||||
|
done = True
|
||||||
|
return DNSLabel(label)
|
||||||
|
|
||||||
|
def encode_name(self, name):
|
||||||
|
if not isinstance(name, DNSLabel):
|
||||||
|
name = DNSLabel(name)
|
||||||
|
if len(name) > 253:
|
||||||
|
raise DNSLabelError("Domain label too long: %r" % name)
|
||||||
|
name = list(name.label)
|
||||||
|
while name:
|
||||||
|
if tuple(name) in self.names:
|
||||||
|
|
||||||
|
pointer = self.names[tuple(name)]
|
||||||
|
pointer = set_bits(pointer, 3, 14, 2)
|
||||||
|
self.pack("!H", pointer)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.names[tuple(name)] = self.offset
|
||||||
|
element = name.pop(0)
|
||||||
|
if len(element) > 63:
|
||||||
|
raise DNSLabelError("Label component too long: %r" % element)
|
||||||
|
self.pack("!B", len(element))
|
||||||
|
self.append(element)
|
||||||
|
self.append(b"\x00")
|
||||||
|
|
||||||
|
def encode_name_nocompress(self, name):
|
||||||
|
if not isinstance(name, DNSLabel):
|
||||||
|
name = DNSLabel(name)
|
||||||
|
if len(name) > 253:
|
||||||
|
raise DNSLabelError("Domain label too long: %r" % name)
|
||||||
|
name = list(name.label)
|
||||||
|
while name:
|
||||||
|
element = name.pop(0)
|
||||||
|
if len(element) > 63:
|
||||||
|
raise DNSLabelError("Label component too long: %r" % element)
|
||||||
|
self.pack("!B", len(element))
|
||||||
|
self.append(element)
|
||||||
|
self.append(b"\x00")
|
||||||
105
copyparty/stolen/dnslib/lex.py
Normal file
105
copyparty/stolen/dnslib/lex.py
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import collections
|
||||||
|
|
||||||
|
try:
|
||||||
|
from StringIO import StringIO
|
||||||
|
except ImportError:
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
|
||||||
|
class Lexer(object):
|
||||||
|
|
||||||
|
escape_chars = "\\"
|
||||||
|
escape = {"n": "\n", "t": "\t", "r": "\r"}
|
||||||
|
|
||||||
|
def __init__(self, f, debug=False):
|
||||||
|
if hasattr(f, "read"):
|
||||||
|
self.f = f
|
||||||
|
elif type(f) == str:
|
||||||
|
self.f = StringIO(f)
|
||||||
|
elif type(f) == bytes:
|
||||||
|
self.f = StringIO(f.decode())
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid input")
|
||||||
|
self.debug = debug
|
||||||
|
self.q = collections.deque()
|
||||||
|
self.state = self.lexStart
|
||||||
|
self.escaped = False
|
||||||
|
self.eof = False
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self.parse()
|
||||||
|
|
||||||
|
def next_token(self):
|
||||||
|
if self.debug:
|
||||||
|
print("STATE", self.state)
|
||||||
|
(tok, self.state) = self.state()
|
||||||
|
return tok
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
while self.state is not None and not self.eof:
|
||||||
|
tok = self.next_token()
|
||||||
|
if tok:
|
||||||
|
yield tok
|
||||||
|
|
||||||
|
def read(self, n=1):
|
||||||
|
s = ""
|
||||||
|
while self.q and n > 0:
|
||||||
|
s += self.q.popleft()
|
||||||
|
n -= 1
|
||||||
|
s += self.f.read(n)
|
||||||
|
if s == "":
|
||||||
|
self.eof = True
|
||||||
|
if self.debug:
|
||||||
|
print("Read: >%s<" % repr(s))
|
||||||
|
return s
|
||||||
|
|
||||||
|
def peek(self, n=1):
|
||||||
|
s = ""
|
||||||
|
i = 0
|
||||||
|
while len(self.q) > i and n > 0:
|
||||||
|
s += self.q[i]
|
||||||
|
i += 1
|
||||||
|
n -= 1
|
||||||
|
r = self.f.read(n)
|
||||||
|
if n > 0 and r == "":
|
||||||
|
self.eof = True
|
||||||
|
self.q.extend(r)
|
||||||
|
if self.debug:
|
||||||
|
print("Peek : >%s<" % repr(s + r))
|
||||||
|
return s + r
|
||||||
|
|
||||||
|
def pushback(self, s):
|
||||||
|
p = collections.deque(s)
|
||||||
|
p.extend(self.q)
|
||||||
|
self.q = p
|
||||||
|
|
||||||
|
def readescaped(self):
|
||||||
|
c = self.read(1)
|
||||||
|
if c in self.escape_chars:
|
||||||
|
self.escaped = True
|
||||||
|
n = self.peek(3)
|
||||||
|
if n.isdigit():
|
||||||
|
n = self.read(3)
|
||||||
|
if self.debug:
|
||||||
|
print("Escape: >%s<" % n)
|
||||||
|
return chr(int(n, 8))
|
||||||
|
elif n[0] in "x":
|
||||||
|
x = self.read(3)
|
||||||
|
if self.debug:
|
||||||
|
print("Escape: >%s<" % x)
|
||||||
|
return chr(int(x[1:], 16))
|
||||||
|
else:
|
||||||
|
c = self.read(1)
|
||||||
|
if self.debug:
|
||||||
|
print("Escape: >%s<" % c)
|
||||||
|
return self.escape.get(c, c)
|
||||||
|
else:
|
||||||
|
self.escaped = False
|
||||||
|
return c
|
||||||
|
|
||||||
|
def lexStart(self):
|
||||||
|
return (None, None)
|
||||||
81
copyparty/stolen/dnslib/ranges.py
Normal file
81
copyparty/stolen/dnslib/ranges.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if sys.version_info < (3,):
|
||||||
|
int_types = (
|
||||||
|
int,
|
||||||
|
long,
|
||||||
|
)
|
||||||
|
byte_types = (str, bytearray)
|
||||||
|
else:
|
||||||
|
int_types = (int,)
|
||||||
|
byte_types = (bytes, bytearray)
|
||||||
|
|
||||||
|
|
||||||
|
def check_instance(name, val, types):
|
||||||
|
if not isinstance(val, types):
|
||||||
|
raise ValueError(
|
||||||
|
"Attribute '%s' must be instance of %s [%s]" % (name, types, type(val))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_bytes(name, val):
|
||||||
|
return check_instance(name, val, byte_types)
|
||||||
|
|
||||||
|
|
||||||
|
def range_property(attr, min, max):
|
||||||
|
def getter(obj):
|
||||||
|
return getattr(obj, "_%s" % attr)
|
||||||
|
|
||||||
|
def setter(obj, val):
|
||||||
|
if isinstance(val, int_types) and min <= val <= max:
|
||||||
|
setattr(obj, "_%s" % attr, val)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Attribute '%s' must be between %d-%d [%s]" % (attr, min, max, val)
|
||||||
|
)
|
||||||
|
|
||||||
|
return property(getter, setter)
|
||||||
|
|
||||||
|
|
||||||
|
def B(attr):
|
||||||
|
return range_property(attr, 0, 255)
|
||||||
|
|
||||||
|
|
||||||
|
def H(attr):
|
||||||
|
return range_property(attr, 0, 65535)
|
||||||
|
|
||||||
|
|
||||||
|
def I(attr):
|
||||||
|
return range_property(attr, 0, 4294967295)
|
||||||
|
|
||||||
|
|
||||||
|
def ntuple_range(attr, n, min, max):
|
||||||
|
f = lambda x: isinstance(x, int_types) and min <= x <= max
|
||||||
|
|
||||||
|
def getter(obj):
|
||||||
|
return getattr(obj, "_%s" % attr)
|
||||||
|
|
||||||
|
def setter(obj, val):
|
||||||
|
if len(val) != n:
|
||||||
|
raise ValueError(
|
||||||
|
"Attribute '%s' must be tuple with %d elements [%s]" % (attr, n, val)
|
||||||
|
)
|
||||||
|
if all(map(f, val)):
|
||||||
|
setattr(obj, "_%s" % attr, val)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Attribute '%s' elements must be between %d-%d [%s]"
|
||||||
|
% (attr, min, max, val)
|
||||||
|
)
|
||||||
|
|
||||||
|
return property(getter, setter)
|
||||||
|
|
||||||
|
|
||||||
|
def IP4(attr):
|
||||||
|
return ntuple_range(attr, 4, 0, 255)
|
||||||
|
|
||||||
|
|
||||||
|
def IP6(attr):
|
||||||
|
return ntuple_range(attr, 16, 0, 255)
|
||||||
5
copyparty/stolen/ifaddr/README.md
Normal file
5
copyparty/stolen/ifaddr/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
`ifaddr` with py2.7 support enabled by make-sfx.sh which strips py3 hints using strip_hints and removes the `^if True:` blocks
|
||||||
|
|
||||||
|
L: BSD-2-Clause
|
||||||
|
Copyright (c) 2014 Stefan C. Mueller
|
||||||
|
https://github.com/pydron/ifaddr/
|
||||||
21
copyparty/stolen/ifaddr/__init__.py
Normal file
21
copyparty/stolen/ifaddr/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
"""
|
||||||
|
L: BSD-2-Clause
|
||||||
|
Copyright (c) 2014 Stefan C. Mueller
|
||||||
|
https://github.com/pydron/ifaddr/tree/0.2.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ._shared import IP, Adapter
|
||||||
|
|
||||||
|
if os.name == "nt":
|
||||||
|
from ._win32 import get_adapters
|
||||||
|
elif os.name == "posix":
|
||||||
|
from ._posix import get_adapters
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Unsupported Operating System: %s" % os.name)
|
||||||
|
|
||||||
|
__all__ = ["Adapter", "IP", "get_adapters"]
|
||||||
84
copyparty/stolen/ifaddr/_posix.py
Normal file
84
copyparty/stolen/ifaddr/_posix.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import ctypes.util
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Iterable, Optional
|
||||||
|
|
||||||
|
from . import _shared as shared
|
||||||
|
from ._shared import U
|
||||||
|
|
||||||
|
|
||||||
|
class ifaddrs(ctypes.Structure):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
ifaddrs._fields_ = [
|
||||||
|
("ifa_next", ctypes.POINTER(ifaddrs)),
|
||||||
|
("ifa_name", ctypes.c_char_p),
|
||||||
|
("ifa_flags", ctypes.c_uint),
|
||||||
|
("ifa_addr", ctypes.POINTER(shared.sockaddr)),
|
||||||
|
("ifa_netmask", ctypes.POINTER(shared.sockaddr)),
|
||||||
|
]
|
||||||
|
|
||||||
|
libc = ctypes.CDLL(ctypes.util.find_library("socket" if os.uname()[0] == "SunOS" else "c"), use_errno=True) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def get_adapters(include_unconfigured: bool = False) -> Iterable[shared.Adapter]:
|
||||||
|
|
||||||
|
addr0 = addr = ctypes.POINTER(ifaddrs)()
|
||||||
|
retval = libc.getifaddrs(ctypes.byref(addr))
|
||||||
|
if retval != 0:
|
||||||
|
eno = ctypes.get_errno()
|
||||||
|
raise OSError(eno, os.strerror(eno))
|
||||||
|
|
||||||
|
ips = collections.OrderedDict()
|
||||||
|
|
||||||
|
def add_ip(adapter_name: str, ip: Optional[shared.IP]) -> None:
|
||||||
|
if adapter_name not in ips:
|
||||||
|
index = None # type: Optional[int]
|
||||||
|
try:
|
||||||
|
# Mypy errors on this when the Windows CI runs:
|
||||||
|
# error: Module has no attribute "if_nametoindex"
|
||||||
|
index = socket.if_nametoindex(adapter_name) # type: ignore
|
||||||
|
except (OSError, AttributeError):
|
||||||
|
pass
|
||||||
|
ips[adapter_name] = shared.Adapter(
|
||||||
|
adapter_name, adapter_name, [], index=index
|
||||||
|
)
|
||||||
|
if ip is not None:
|
||||||
|
ips[adapter_name].ips.append(ip)
|
||||||
|
|
||||||
|
while addr:
|
||||||
|
name = addr[0].ifa_name.decode(encoding="UTF-8")
|
||||||
|
ip_addr = shared.sockaddr_to_ip(addr[0].ifa_addr)
|
||||||
|
if ip_addr:
|
||||||
|
if addr[0].ifa_netmask and not addr[0].ifa_netmask[0].sa_familiy:
|
||||||
|
addr[0].ifa_netmask[0].sa_familiy = addr[0].ifa_addr[0].sa_familiy
|
||||||
|
netmask = shared.sockaddr_to_ip(addr[0].ifa_netmask)
|
||||||
|
if isinstance(netmask, tuple):
|
||||||
|
netmaskStr = U(netmask[0])
|
||||||
|
prefixlen = shared.ipv6_prefixlength(ipaddress.IPv6Address(netmaskStr))
|
||||||
|
else:
|
||||||
|
if netmask is None:
|
||||||
|
t = "sockaddr_to_ip({}) returned None"
|
||||||
|
raise Exception(t.format(addr[0].ifa_netmask))
|
||||||
|
|
||||||
|
netmaskStr = U("0.0.0.0/" + netmask)
|
||||||
|
prefixlen = ipaddress.IPv4Network(netmaskStr).prefixlen
|
||||||
|
ip = shared.IP(ip_addr, prefixlen, name)
|
||||||
|
add_ip(name, ip)
|
||||||
|
else:
|
||||||
|
if include_unconfigured:
|
||||||
|
add_ip(name, None)
|
||||||
|
addr = addr[0].ifa_next
|
||||||
|
|
||||||
|
libc.freeifaddrs(addr0)
|
||||||
|
|
||||||
|
return ips.values()
|
||||||
203
copyparty/stolen/ifaddr/_shared.py
Normal file
203
copyparty/stolen/ifaddr/_shared.py
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import ctypes
|
||||||
|
import platform
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Callable, List, Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
PY2 = sys.version_info < (3,)
|
||||||
|
if not PY2:
|
||||||
|
U: Callable[[str], str] = str
|
||||||
|
else:
|
||||||
|
U = unicode # noqa: F821 # pylint: disable=undefined-variable,self-assigning-variable
|
||||||
|
|
||||||
|
|
||||||
|
class Adapter(object):
|
||||||
|
"""
|
||||||
|
Represents a network interface device controller (NIC), such as a
|
||||||
|
network card. An adapter can have multiple IPs.
|
||||||
|
|
||||||
|
On Linux aliasing (multiple IPs per physical NIC) is implemented
|
||||||
|
by creating 'virtual' adapters, each represented by an instance
|
||||||
|
of this class. Each of those 'virtual' adapters can have both
|
||||||
|
a IPv4 and an IPv6 IP address.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, name: str, nice_name: str, ips: List["IP"], index: Optional[int] = None
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
#: Unique name that identifies the adapter in the system.
|
||||||
|
#: On Linux this is of the form of `eth0` or `eth0:1`, on
|
||||||
|
#: Windows it is a UUID in string representation, such as
|
||||||
|
#: `{846EE342-7039-11DE-9D20-806E6F6E6963}`.
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
#: Human readable name of the adpater. On Linux this
|
||||||
|
#: is currently the same as :attr:`name`. On Windows
|
||||||
|
#: this is the name of the device.
|
||||||
|
self.nice_name = nice_name
|
||||||
|
|
||||||
|
#: List of :class:`ifaddr.IP` instances in the order they were
|
||||||
|
#: reported by the system.
|
||||||
|
self.ips = ips
|
||||||
|
|
||||||
|
#: Adapter index as used by some API (e.g. IPv6 multicast group join).
|
||||||
|
self.index = index
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return "Adapter(name={name}, nice_name={nice_name}, ips={ips}, index={index})".format(
|
||||||
|
name=repr(self.name),
|
||||||
|
nice_name=repr(self.nice_name),
|
||||||
|
ips=repr(self.ips),
|
||||||
|
index=repr(self.index),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if True:
|
||||||
|
# Type of an IPv4 address (a string in "xxx.xxx.xxx.xxx" format)
|
||||||
|
_IPv4Address = str
|
||||||
|
|
||||||
|
# Type of an IPv6 address (a three-tuple `(ip, flowinfo, scope_id)`)
|
||||||
|
_IPv6Address = tuple[str, int, int]
|
||||||
|
|
||||||
|
|
||||||
|
class IP(object):
|
||||||
|
"""
|
||||||
|
Represents an IP address of an adapter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, ip: Union[_IPv4Address, _IPv6Address], network_prefix: int, nice_name: str
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
#: IP address. For IPv4 addresses this is a string in
|
||||||
|
#: "xxx.xxx.xxx.xxx" format. For IPv6 addresses this
|
||||||
|
#: is a three-tuple `(ip, flowinfo, scope_id)`, where
|
||||||
|
#: `ip` is a string in the usual collon separated
|
||||||
|
#: hex format.
|
||||||
|
self.ip = ip
|
||||||
|
|
||||||
|
#: Number of bits of the IP that represent the
|
||||||
|
#: network. For a `255.255.255.0` netmask, this
|
||||||
|
#: number would be `24`.
|
||||||
|
self.network_prefix = network_prefix
|
||||||
|
|
||||||
|
#: Human readable name for this IP.
|
||||||
|
#: On Linux is this currently the same as the adapter name.
|
||||||
|
#: On Windows this is the name of the network connection
|
||||||
|
#: as configured in the system control panel.
|
||||||
|
self.nice_name = nice_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_IPv4(self) -> bool:
|
||||||
|
"""
|
||||||
|
Returns `True` if this IP is an IPv4 address and `False`
|
||||||
|
if it is an IPv6 address.
|
||||||
|
"""
|
||||||
|
return not isinstance(self.ip, tuple)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_IPv6(self) -> bool:
|
||||||
|
"""
|
||||||
|
Returns `True` if this IP is an IPv6 address and `False`
|
||||||
|
if it is an IPv4 address.
|
||||||
|
"""
|
||||||
|
return isinstance(self.ip, tuple)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return "IP(ip={ip}, network_prefix={network_prefix}, nice_name={nice_name})".format(
|
||||||
|
ip=repr(self.ip),
|
||||||
|
network_prefix=repr(self.network_prefix),
|
||||||
|
nice_name=repr(self.nice_name),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if platform.system() == "Darwin" or "BSD" in platform.system():
|
||||||
|
|
||||||
|
# BSD derived systems use marginally different structures
|
||||||
|
# than either Linux or Windows.
|
||||||
|
# I still keep it in `shared` since we can use
|
||||||
|
# both structures equally.
|
||||||
|
|
||||||
|
class sockaddr(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("sa_len", ctypes.c_uint8),
|
||||||
|
("sa_familiy", ctypes.c_uint8),
|
||||||
|
("sa_data", ctypes.c_uint8 * 14),
|
||||||
|
]
|
||||||
|
|
||||||
|
class sockaddr_in(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("sa_len", ctypes.c_uint8),
|
||||||
|
("sa_familiy", ctypes.c_uint8),
|
||||||
|
("sin_port", ctypes.c_uint16),
|
||||||
|
("sin_addr", ctypes.c_uint8 * 4),
|
||||||
|
("sin_zero", ctypes.c_uint8 * 8),
|
||||||
|
]
|
||||||
|
|
||||||
|
class sockaddr_in6(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("sa_len", ctypes.c_uint8),
|
||||||
|
("sa_familiy", ctypes.c_uint8),
|
||||||
|
("sin6_port", ctypes.c_uint16),
|
||||||
|
("sin6_flowinfo", ctypes.c_uint32),
|
||||||
|
("sin6_addr", ctypes.c_uint8 * 16),
|
||||||
|
("sin6_scope_id", ctypes.c_uint32),
|
||||||
|
]
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
class sockaddr(ctypes.Structure): # type: ignore
|
||||||
|
_fields_ = [("sa_familiy", ctypes.c_uint16), ("sa_data", ctypes.c_uint8 * 14)]
|
||||||
|
|
||||||
|
class sockaddr_in(ctypes.Structure): # type: ignore
|
||||||
|
_fields_ = [
|
||||||
|
("sin_familiy", ctypes.c_uint16),
|
||||||
|
("sin_port", ctypes.c_uint16),
|
||||||
|
("sin_addr", ctypes.c_uint8 * 4),
|
||||||
|
("sin_zero", ctypes.c_uint8 * 8),
|
||||||
|
]
|
||||||
|
|
||||||
|
class sockaddr_in6(ctypes.Structure): # type: ignore
|
||||||
|
_fields_ = [
|
||||||
|
("sin6_familiy", ctypes.c_uint16),
|
||||||
|
("sin6_port", ctypes.c_uint16),
|
||||||
|
("sin6_flowinfo", ctypes.c_uint32),
|
||||||
|
("sin6_addr", ctypes.c_uint8 * 16),
|
||||||
|
("sin6_scope_id", ctypes.c_uint32),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def sockaddr_to_ip(
|
||||||
|
sockaddr_ptr: "ctypes.pointer[sockaddr]",
|
||||||
|
) -> Optional[Union[_IPv4Address, _IPv6Address]]:
|
||||||
|
if sockaddr_ptr:
|
||||||
|
if sockaddr_ptr[0].sa_familiy == socket.AF_INET:
|
||||||
|
ipv4 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in))
|
||||||
|
ippacked = bytes(bytearray(ipv4[0].sin_addr))
|
||||||
|
ip = U(ipaddress.ip_address(ippacked))
|
||||||
|
return ip
|
||||||
|
elif sockaddr_ptr[0].sa_familiy == socket.AF_INET6:
|
||||||
|
ipv6 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in6))
|
||||||
|
flowinfo = ipv6[0].sin6_flowinfo
|
||||||
|
ippacked = bytes(bytearray(ipv6[0].sin6_addr))
|
||||||
|
ip = U(ipaddress.ip_address(ippacked))
|
||||||
|
scope_id = ipv6[0].sin6_scope_id
|
||||||
|
return (ip, flowinfo, scope_id)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def ipv6_prefixlength(address: ipaddress.IPv6Address) -> int:
|
||||||
|
prefix_length = 0
|
||||||
|
for i in range(address.max_prefixlen):
|
||||||
|
if int(address) >> i & 1:
|
||||||
|
prefix_length = prefix_length + 1
|
||||||
|
return prefix_length
|
||||||
135
copyparty/stolen/ifaddr/_win32.py
Normal file
135
copyparty/stolen/ifaddr/_win32.py
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import ctypes
|
||||||
|
from ctypes import wintypes
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from typing import Iterable, List
|
||||||
|
|
||||||
|
from . import _shared as shared
|
||||||
|
|
||||||
|
NO_ERROR = 0
|
||||||
|
ERROR_BUFFER_OVERFLOW = 111
|
||||||
|
MAX_ADAPTER_NAME_LENGTH = 256
|
||||||
|
MAX_ADAPTER_DESCRIPTION_LENGTH = 128
|
||||||
|
MAX_ADAPTER_ADDRESS_LENGTH = 8
|
||||||
|
AF_UNSPEC = 0
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKET_ADDRESS(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("lpSockaddr", ctypes.POINTER(shared.sockaddr)),
|
||||||
|
("iSockaddrLength", wintypes.INT),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class IP_ADAPTER_UNICAST_ADDRESS(ctypes.Structure):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
IP_ADAPTER_UNICAST_ADDRESS._fields_ = [
|
||||||
|
("Length", wintypes.ULONG),
|
||||||
|
("Flags", wintypes.DWORD),
|
||||||
|
("Next", ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)),
|
||||||
|
("Address", SOCKET_ADDRESS),
|
||||||
|
("PrefixOrigin", ctypes.c_uint),
|
||||||
|
("SuffixOrigin", ctypes.c_uint),
|
||||||
|
("DadState", ctypes.c_uint),
|
||||||
|
("ValidLifetime", wintypes.ULONG),
|
||||||
|
("PreferredLifetime", wintypes.ULONG),
|
||||||
|
("LeaseLifetime", wintypes.ULONG),
|
||||||
|
("OnLinkPrefixLength", ctypes.c_uint8),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class IP_ADAPTER_ADDRESSES(ctypes.Structure):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
IP_ADAPTER_ADDRESSES._fields_ = [
|
||||||
|
("Length", wintypes.ULONG),
|
||||||
|
("IfIndex", wintypes.DWORD),
|
||||||
|
("Next", ctypes.POINTER(IP_ADAPTER_ADDRESSES)),
|
||||||
|
("AdapterName", ctypes.c_char_p),
|
||||||
|
("FirstUnicastAddress", ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)),
|
||||||
|
("FirstAnycastAddress", ctypes.c_void_p),
|
||||||
|
("FirstMulticastAddress", ctypes.c_void_p),
|
||||||
|
("FirstDnsServerAddress", ctypes.c_void_p),
|
||||||
|
("DnsSuffix", ctypes.c_wchar_p),
|
||||||
|
("Description", ctypes.c_wchar_p),
|
||||||
|
("FriendlyName", ctypes.c_wchar_p),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
iphlpapi = ctypes.windll.LoadLibrary("Iphlpapi") # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def enumerate_interfaces_of_adapter(
|
||||||
|
nice_name: str, address: IP_ADAPTER_UNICAST_ADDRESS
|
||||||
|
) -> Iterable[shared.IP]:
|
||||||
|
|
||||||
|
# Iterate through linked list and fill list
|
||||||
|
addresses = [] # type: List[IP_ADAPTER_UNICAST_ADDRESS]
|
||||||
|
while True:
|
||||||
|
addresses.append(address)
|
||||||
|
if not address.Next:
|
||||||
|
break
|
||||||
|
address = address.Next[0]
|
||||||
|
|
||||||
|
for address in addresses:
|
||||||
|
ip = shared.sockaddr_to_ip(address.Address.lpSockaddr)
|
||||||
|
if ip is None:
|
||||||
|
t = "sockaddr_to_ip({}) returned None"
|
||||||
|
raise Exception(t.format(address.Address.lpSockaddr))
|
||||||
|
|
||||||
|
network_prefix = address.OnLinkPrefixLength
|
||||||
|
yield shared.IP(ip, network_prefix, nice_name)
|
||||||
|
|
||||||
|
|
||||||
|
def get_adapters(include_unconfigured: bool = False) -> Iterable[shared.Adapter]:
|
||||||
|
|
||||||
|
# Call GetAdaptersAddresses() with error and buffer size handling
|
||||||
|
|
||||||
|
addressbuffersize = wintypes.ULONG(15 * 1024)
|
||||||
|
retval = ERROR_BUFFER_OVERFLOW
|
||||||
|
while retval == ERROR_BUFFER_OVERFLOW:
|
||||||
|
addressbuffer = ctypes.create_string_buffer(addressbuffersize.value)
|
||||||
|
retval = iphlpapi.GetAdaptersAddresses(
|
||||||
|
wintypes.ULONG(AF_UNSPEC),
|
||||||
|
wintypes.ULONG(0),
|
||||||
|
None,
|
||||||
|
ctypes.byref(addressbuffer),
|
||||||
|
ctypes.byref(addressbuffersize),
|
||||||
|
)
|
||||||
|
if retval != NO_ERROR:
|
||||||
|
raise ctypes.WinError() # type: ignore
|
||||||
|
|
||||||
|
# Iterate through adapters fill array
|
||||||
|
address_infos = [] # type: List[IP_ADAPTER_ADDRESSES]
|
||||||
|
address_info = IP_ADAPTER_ADDRESSES.from_buffer(addressbuffer)
|
||||||
|
while True:
|
||||||
|
address_infos.append(address_info)
|
||||||
|
if not address_info.Next:
|
||||||
|
break
|
||||||
|
address_info = address_info.Next[0]
|
||||||
|
|
||||||
|
# Iterate through unicast addresses
|
||||||
|
result = [] # type: List[shared.Adapter]
|
||||||
|
for adapter_info in address_infos:
|
||||||
|
|
||||||
|
# We don't expect non-ascii characters here, so encoding shouldn't matter
|
||||||
|
name = adapter_info.AdapterName.decode()
|
||||||
|
nice_name = adapter_info.Description
|
||||||
|
index = adapter_info.IfIndex
|
||||||
|
|
||||||
|
if adapter_info.FirstUnicastAddress:
|
||||||
|
ips = enumerate_interfaces_of_adapter(
|
||||||
|
adapter_info.FriendlyName, adapter_info.FirstUnicastAddress[0]
|
||||||
|
)
|
||||||
|
ips = list(ips)
|
||||||
|
result.append(shared.Adapter(name, nice_name, ips, index=index))
|
||||||
|
elif include_unconfigured:
|
||||||
|
result.append(shared.Adapter(name, nice_name, [], index=index))
|
||||||
|
|
||||||
|
return result
|
||||||
591
copyparty/stolen/qrcodegen.py
Normal file
591
copyparty/stolen/qrcodegen.py
Normal file
@@ -0,0 +1,591 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
# modified copy of Project Nayuki's qrcodegen (MIT-licensed);
|
||||||
|
# https://github.com/nayuki/QR-Code-generator/blob/daa3114/python/qrcodegen.py
|
||||||
|
# the original ^ is extremely well commented so refer to that for explanations
|
||||||
|
|
||||||
|
# hacks: binary-only, auto-ecc, render, py2-compat
|
||||||
|
|
||||||
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
|
from collections.abc import Sequence
|
||||||
|
|
||||||
|
from typing import Callable, List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
|
||||||
|
def num_char_count_bits(ver: int) -> int:
|
||||||
|
return 16 if (ver + 7) // 17 else 8
|
||||||
|
|
||||||
|
|
||||||
|
class Ecc(object):
|
||||||
|
ordinal: int
|
||||||
|
formatbits: int
|
||||||
|
|
||||||
|
def __init__(self, i: int, fb: int) -> None:
|
||||||
|
self.ordinal = i
|
||||||
|
self.formatbits = fb
|
||||||
|
|
||||||
|
LOW: "Ecc"
|
||||||
|
MEDIUM: "Ecc"
|
||||||
|
QUARTILE: "Ecc"
|
||||||
|
HIGH: "Ecc"
|
||||||
|
|
||||||
|
|
||||||
|
Ecc.LOW = Ecc(0, 1)
|
||||||
|
Ecc.MEDIUM = Ecc(1, 0)
|
||||||
|
Ecc.QUARTILE = Ecc(2, 3)
|
||||||
|
Ecc.HIGH = Ecc(3, 2)
|
||||||
|
|
||||||
|
|
||||||
|
class QrSegment(object):
|
||||||
|
@staticmethod
|
||||||
|
def make_seg(data: Union[bytes, Sequence[int]]) -> "QrSegment":
|
||||||
|
bb = _BitBuffer()
|
||||||
|
for b in data:
|
||||||
|
bb.append_bits(b, 8)
|
||||||
|
return QrSegment(len(data), bb)
|
||||||
|
|
||||||
|
numchars: int # num bytes, not the same as the data's bit length
|
||||||
|
bitdata: List[int] # The data bits of this segment
|
||||||
|
|
||||||
|
def __init__(self, numch: int, bitdata: Sequence[int]) -> None:
|
||||||
|
if numch < 0:
|
||||||
|
raise ValueError()
|
||||||
|
self.numchars = numch
|
||||||
|
self.bitdata = list(bitdata)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_total_bits(segs: Sequence["QrSegment"], ver: int) -> Optional[int]:
|
||||||
|
result = 0
|
||||||
|
for seg in segs:
|
||||||
|
ccbits: int = num_char_count_bits(ver)
|
||||||
|
if seg.numchars >= (1 << ccbits):
|
||||||
|
return None # segment length doesn't fit the field's bit width
|
||||||
|
result += 4 + ccbits + len(seg.bitdata)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class QrCode(object):
|
||||||
|
@staticmethod
|
||||||
|
def encode_binary(data: Union[bytes, Sequence[int]]) -> "QrCode":
|
||||||
|
return QrCode.encode_segments([QrSegment.make_seg(data)])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def encode_segments(
|
||||||
|
segs: Sequence[QrSegment],
|
||||||
|
ecl: Ecc = Ecc.LOW,
|
||||||
|
minver: int = 2,
|
||||||
|
maxver: int = 40,
|
||||||
|
mask: int = -1,
|
||||||
|
) -> "QrCode":
|
||||||
|
for ver in range(minver, maxver + 1):
|
||||||
|
datacapacitybits: int = QrCode._get_num_data_codewords(ver, ecl) * 8
|
||||||
|
datausedbits: Optional[int] = QrSegment.get_total_bits(segs, ver)
|
||||||
|
if (datausedbits is not None) and (datausedbits <= datacapacitybits):
|
||||||
|
break
|
||||||
|
|
||||||
|
assert datausedbits
|
||||||
|
|
||||||
|
for newecl in (
|
||||||
|
Ecc.MEDIUM,
|
||||||
|
Ecc.QUARTILE,
|
||||||
|
Ecc.HIGH,
|
||||||
|
):
|
||||||
|
if datausedbits <= QrCode._get_num_data_codewords(ver, newecl) * 8:
|
||||||
|
ecl = newecl
|
||||||
|
|
||||||
|
# Concatenate all segments to create the data bit string
|
||||||
|
bb = _BitBuffer()
|
||||||
|
for seg in segs:
|
||||||
|
bb.append_bits(4, 4)
|
||||||
|
bb.append_bits(seg.numchars, num_char_count_bits(ver))
|
||||||
|
bb.extend(seg.bitdata)
|
||||||
|
assert len(bb) == datausedbits
|
||||||
|
|
||||||
|
# Add terminator and pad up to a byte if applicable
|
||||||
|
datacapacitybits = QrCode._get_num_data_codewords(ver, ecl) * 8
|
||||||
|
assert len(bb) <= datacapacitybits
|
||||||
|
bb.append_bits(0, min(4, datacapacitybits - len(bb)))
|
||||||
|
bb.append_bits(0, -len(bb) % 8)
|
||||||
|
assert len(bb) % 8 == 0
|
||||||
|
|
||||||
|
# Pad with alternating bytes until data capacity is reached
|
||||||
|
for padbyte in itertools.cycle((0xEC, 0x11)):
|
||||||
|
if len(bb) >= datacapacitybits:
|
||||||
|
break
|
||||||
|
bb.append_bits(padbyte, 8)
|
||||||
|
|
||||||
|
# Pack bits into bytes in big endian
|
||||||
|
datacodewords = bytearray([0] * (len(bb) // 8))
|
||||||
|
for (i, bit) in enumerate(bb):
|
||||||
|
datacodewords[i >> 3] |= bit << (7 - (i & 7))
|
||||||
|
|
||||||
|
return QrCode(ver, ecl, datacodewords, mask)
|
||||||
|
|
||||||
|
ver: int
|
||||||
|
size: int # w/h; 21..177 (ver * 4 + 17)
|
||||||
|
ecclvl: Ecc
|
||||||
|
mask: int # 0..7
|
||||||
|
modules: List[List[bool]]
|
||||||
|
unmaskable: List[List[bool]]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
ver: int,
|
||||||
|
ecclvl: Ecc,
|
||||||
|
datacodewords: Union[bytes, Sequence[int]],
|
||||||
|
msk: int,
|
||||||
|
) -> None:
|
||||||
|
self.ver = ver
|
||||||
|
self.size = ver * 4 + 17
|
||||||
|
self.ecclvl = ecclvl
|
||||||
|
|
||||||
|
self.modules = [[False] * self.size for _ in range(self.size)]
|
||||||
|
self.unmaskable = [[False] * self.size for _ in range(self.size)]
|
||||||
|
|
||||||
|
# Compute ECC, draw modules
|
||||||
|
self._draw_function_patterns()
|
||||||
|
allcodewords: bytes = self._add_ecc_and_interleave(bytearray(datacodewords))
|
||||||
|
self._draw_codewords(allcodewords)
|
||||||
|
|
||||||
|
if msk == -1: # automask
|
||||||
|
minpenalty: int = 1 << 32
|
||||||
|
for i in range(8):
|
||||||
|
self._apply_mask(i)
|
||||||
|
self._draw_format_bits(i)
|
||||||
|
penalty = self._get_penalty_score()
|
||||||
|
if penalty < minpenalty:
|
||||||
|
msk = i
|
||||||
|
minpenalty = penalty
|
||||||
|
self._apply_mask(i) # xor/undo
|
||||||
|
|
||||||
|
assert 0 <= msk <= 7
|
||||||
|
self.mask = msk
|
||||||
|
self._apply_mask(msk) # Apply the final choice of mask
|
||||||
|
self._draw_format_bits(msk) # Overwrite old format bits
|
||||||
|
|
||||||
|
def render(self, zoom=1, pad=4) -> str:
|
||||||
|
tab = self.modules
|
||||||
|
sz = self.size
|
||||||
|
if sz % 2 and zoom == 1:
|
||||||
|
tab.append([False] * sz)
|
||||||
|
|
||||||
|
tab = [[False] * sz] * pad + tab + [[False] * sz] * pad
|
||||||
|
tab = [[False] * pad + x + [False] * pad for x in tab]
|
||||||
|
|
||||||
|
rows: list[str] = []
|
||||||
|
if zoom == 1:
|
||||||
|
for y in range(0, len(tab), 2):
|
||||||
|
row = ""
|
||||||
|
for x in range(len(tab[y])):
|
||||||
|
v = 2 if tab[y][x] else 0
|
||||||
|
v += 1 if tab[y + 1][x] else 0
|
||||||
|
row += " ▄▀█"[v]
|
||||||
|
rows.append(row)
|
||||||
|
else:
|
||||||
|
for tr in tab:
|
||||||
|
row = ""
|
||||||
|
for zb in tr:
|
||||||
|
row += " █"[int(zb)] * 2
|
||||||
|
rows.append(row)
|
||||||
|
|
||||||
|
return "\n".join(rows)
|
||||||
|
|
||||||
|
def _draw_function_patterns(self) -> None:
|
||||||
|
# Draw horizontal and vertical timing patterns
|
||||||
|
for i in range(self.size):
|
||||||
|
self._set_function_module(6, i, i % 2 == 0)
|
||||||
|
self._set_function_module(i, 6, i % 2 == 0)
|
||||||
|
|
||||||
|
# Draw 3 finder patterns (all corners except bottom right; overwrites some timing modules)
|
||||||
|
self._draw_finder_pattern(3, 3)
|
||||||
|
self._draw_finder_pattern(self.size - 4, 3)
|
||||||
|
self._draw_finder_pattern(3, self.size - 4)
|
||||||
|
|
||||||
|
# Draw numerous alignment patterns
|
||||||
|
alignpatpos: List[int] = self._get_alignment_pattern_positions()
|
||||||
|
numalign: int = len(alignpatpos)
|
||||||
|
skips: Sequence[Tuple[int, int]] = (
|
||||||
|
(0, 0),
|
||||||
|
(0, numalign - 1),
|
||||||
|
(numalign - 1, 0),
|
||||||
|
)
|
||||||
|
for i in range(numalign):
|
||||||
|
for j in range(numalign):
|
||||||
|
if (i, j) not in skips: # avoid finder corners
|
||||||
|
self._draw_alignment_pattern(alignpatpos[i], alignpatpos[j])
|
||||||
|
|
||||||
|
# draw config data with dummy mask value; ctor overwrites it
|
||||||
|
self._draw_format_bits(0)
|
||||||
|
self._draw_ver()
|
||||||
|
|
||||||
|
def _draw_format_bits(self, mask: int) -> None:
|
||||||
|
# Calculate error correction code and pack bits; ecclvl is uint2, mask is uint3
|
||||||
|
data: int = self.ecclvl.formatbits << 3 | mask
|
||||||
|
rem: int = data
|
||||||
|
for _ in range(10):
|
||||||
|
rem = (rem << 1) ^ ((rem >> 9) * 0x537)
|
||||||
|
bits: int = (data << 10 | rem) ^ 0x5412 # uint15
|
||||||
|
assert bits >> 15 == 0
|
||||||
|
|
||||||
|
# first copy
|
||||||
|
for i in range(0, 6):
|
||||||
|
self._set_function_module(8, i, _get_bit(bits, i))
|
||||||
|
self._set_function_module(8, 7, _get_bit(bits, 6))
|
||||||
|
self._set_function_module(8, 8, _get_bit(bits, 7))
|
||||||
|
self._set_function_module(7, 8, _get_bit(bits, 8))
|
||||||
|
for i in range(9, 15):
|
||||||
|
self._set_function_module(14 - i, 8, _get_bit(bits, i))
|
||||||
|
|
||||||
|
# second copy
|
||||||
|
for i in range(0, 8):
|
||||||
|
self._set_function_module(self.size - 1 - i, 8, _get_bit(bits, i))
|
||||||
|
for i in range(8, 15):
|
||||||
|
self._set_function_module(8, self.size - 15 + i, _get_bit(bits, i))
|
||||||
|
self._set_function_module(8, self.size - 8, True) # Always dark
|
||||||
|
|
||||||
|
def _draw_ver(self) -> None:
|
||||||
|
if self.ver < 7:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate error correction code and pack bits
|
||||||
|
rem: int = self.ver # ver is uint6, 7..40
|
||||||
|
for _ in range(12):
|
||||||
|
rem = (rem << 1) ^ ((rem >> 11) * 0x1F25)
|
||||||
|
bits: int = self.ver << 12 | rem # uint18
|
||||||
|
assert bits >> 18 == 0
|
||||||
|
|
||||||
|
# Draw two copies
|
||||||
|
for i in range(18):
|
||||||
|
bit: bool = _get_bit(bits, i)
|
||||||
|
a: int = self.size - 11 + i % 3
|
||||||
|
b: int = i // 3
|
||||||
|
self._set_function_module(a, b, bit)
|
||||||
|
self._set_function_module(b, a, bit)
|
||||||
|
|
||||||
|
def _draw_finder_pattern(self, x: int, y: int) -> None:
|
||||||
|
for dy in range(-4, 5):
|
||||||
|
for dx in range(-4, 5):
|
||||||
|
xx, yy = x + dx, y + dy
|
||||||
|
if (0 <= xx < self.size) and (0 <= yy < self.size):
|
||||||
|
# Chebyshev/infinity norm
|
||||||
|
self._set_function_module(
|
||||||
|
xx, yy, max(abs(dx), abs(dy)) not in (2, 4)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _draw_alignment_pattern(self, x: int, y: int) -> None:
|
||||||
|
for dy in range(-2, 3):
|
||||||
|
for dx in range(-2, 3):
|
||||||
|
self._set_function_module(x + dx, y + dy, max(abs(dx), abs(dy)) != 1)
|
||||||
|
|
||||||
|
def _set_function_module(self, x: int, y: int, isdark: bool) -> None:
|
||||||
|
self.modules[y][x] = isdark
|
||||||
|
self.unmaskable[y][x] = True
|
||||||
|
|
||||||
|
def _add_ecc_and_interleave(self, data: bytearray) -> bytes:
|
||||||
|
ver: int = self.ver
|
||||||
|
assert len(data) == QrCode._get_num_data_codewords(ver, self.ecclvl)
|
||||||
|
|
||||||
|
# Calculate parameter numbers
|
||||||
|
numblocks: int = QrCode._NUM_ERROR_CORRECTION_BLOCKS[self.ecclvl.ordinal][ver]
|
||||||
|
blockecclen: int = QrCode._ECC_CODEWORDS_PER_BLOCK[self.ecclvl.ordinal][ver]
|
||||||
|
rawcodewords: int = QrCode._get_num_raw_data_modules(ver) // 8
|
||||||
|
numshortblocks: int = numblocks - rawcodewords % numblocks
|
||||||
|
shortblocklen: int = rawcodewords // numblocks
|
||||||
|
|
||||||
|
# Split data into blocks and append ECC to each block
|
||||||
|
blocks: List[bytes] = []
|
||||||
|
rsdiv: bytes = QrCode._reed_solomon_compute_divisor(blockecclen)
|
||||||
|
k: int = 0
|
||||||
|
for i in range(numblocks):
|
||||||
|
dat: bytearray = data[
|
||||||
|
k : k + shortblocklen - blockecclen + (0 if i < numshortblocks else 1)
|
||||||
|
]
|
||||||
|
k += len(dat)
|
||||||
|
ecc: bytes = QrCode._reed_solomon_compute_remainder(dat, rsdiv)
|
||||||
|
if i < numshortblocks:
|
||||||
|
dat.append(0)
|
||||||
|
blocks.append(dat + ecc)
|
||||||
|
assert k == len(data)
|
||||||
|
|
||||||
|
# Interleave (not concatenate) the bytes from every block into a single sequence
|
||||||
|
result = bytearray()
|
||||||
|
for i in range(len(blocks[0])):
|
||||||
|
for (j, blk) in enumerate(blocks):
|
||||||
|
# Skip the padding byte in short blocks
|
||||||
|
if (i != shortblocklen - blockecclen) or (j >= numshortblocks):
|
||||||
|
result.append(blk[i])
|
||||||
|
assert len(result) == rawcodewords
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _draw_codewords(self, data: bytes) -> None:
|
||||||
|
assert len(data) == QrCode._get_num_raw_data_modules(self.ver) // 8
|
||||||
|
|
||||||
|
i: int = 0 # Bit index into the data
|
||||||
|
for right in range(self.size - 1, 0, -2):
|
||||||
|
# idx of right column in each column pair
|
||||||
|
if right <= 6:
|
||||||
|
right -= 1
|
||||||
|
for vert in range(self.size): # Vertical counter
|
||||||
|
for j in range(2):
|
||||||
|
x: int = right - j
|
||||||
|
upward: bool = (right + 1) & 2 == 0
|
||||||
|
y: int = (self.size - 1 - vert) if upward else vert
|
||||||
|
if (not self.unmaskable[y][x]) and (i < len(data) * 8):
|
||||||
|
self.modules[y][x] = _get_bit(data[i >> 3], 7 - (i & 7))
|
||||||
|
i += 1
|
||||||
|
# any remainder bits (0..7) were set 0/false/light by ctor
|
||||||
|
|
||||||
|
assert i == len(data) * 8
|
||||||
|
|
||||||
|
def _apply_mask(self, mask: int) -> None:
|
||||||
|
masker: Callable[[int, int], int] = QrCode._MASK_PATTERNS[mask]
|
||||||
|
for y in range(self.size):
|
||||||
|
for x in range(self.size):
|
||||||
|
self.modules[y][x] ^= (masker(x, y) == 0) and (
|
||||||
|
not self.unmaskable[y][x]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_penalty_score(self) -> int:
|
||||||
|
result: int = 0
|
||||||
|
size: int = self.size
|
||||||
|
modules: List[List[bool]] = self.modules
|
||||||
|
|
||||||
|
# Adjacent modules in row having same color, and finder-like patterns
|
||||||
|
for y in range(size):
|
||||||
|
runcolor: bool = False
|
||||||
|
runx: int = 0
|
||||||
|
runhistory = collections.deque([0] * 7, 7)
|
||||||
|
for x in range(size):
|
||||||
|
if modules[y][x] == runcolor:
|
||||||
|
runx += 1
|
||||||
|
if runx == 5:
|
||||||
|
result += QrCode._PENALTY_N1
|
||||||
|
elif runx > 5:
|
||||||
|
result += 1
|
||||||
|
else:
|
||||||
|
self._finder_penalty_add_history(runx, runhistory)
|
||||||
|
if not runcolor:
|
||||||
|
result += (
|
||||||
|
self._finder_penalty_count_patterns(runhistory)
|
||||||
|
* QrCode._PENALTY_N3
|
||||||
|
)
|
||||||
|
runcolor = modules[y][x]
|
||||||
|
runx = 1
|
||||||
|
result += (
|
||||||
|
self._finder_penalty_terminate_and_count(runcolor, runx, runhistory)
|
||||||
|
* QrCode._PENALTY_N3
|
||||||
|
)
|
||||||
|
|
||||||
|
# Adjacent modules in column having same color, and finder-like patterns
|
||||||
|
for x in range(size):
|
||||||
|
runcolor = False
|
||||||
|
runy = 0
|
||||||
|
runhistory = collections.deque([0] * 7, 7)
|
||||||
|
for y in range(size):
|
||||||
|
if modules[y][x] == runcolor:
|
||||||
|
runy += 1
|
||||||
|
if runy == 5:
|
||||||
|
result += QrCode._PENALTY_N1
|
||||||
|
elif runy > 5:
|
||||||
|
result += 1
|
||||||
|
else:
|
||||||
|
self._finder_penalty_add_history(runy, runhistory)
|
||||||
|
if not runcolor:
|
||||||
|
result += (
|
||||||
|
self._finder_penalty_count_patterns(runhistory)
|
||||||
|
* QrCode._PENALTY_N3
|
||||||
|
)
|
||||||
|
runcolor = modules[y][x]
|
||||||
|
runy = 1
|
||||||
|
result += (
|
||||||
|
self._finder_penalty_terminate_and_count(runcolor, runy, runhistory)
|
||||||
|
* QrCode._PENALTY_N3
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2*2 blocks of modules having same color
|
||||||
|
for y in range(size - 1):
|
||||||
|
for x in range(size - 1):
|
||||||
|
if (
|
||||||
|
modules[y][x]
|
||||||
|
== modules[y][x + 1]
|
||||||
|
== modules[y + 1][x]
|
||||||
|
== modules[y + 1][x + 1]
|
||||||
|
):
|
||||||
|
result += QrCode._PENALTY_N2
|
||||||
|
|
||||||
|
# Balance of dark and light modules
|
||||||
|
dark: int = sum((1 if cell else 0) for row in modules for cell in row)
|
||||||
|
total: int = size ** 2 # Note that size is odd, so dark/total != 1/2
|
||||||
|
|
||||||
|
# Compute the smallest integer k >= 0 such that (45-5k)% <= dark/total <= (55+5k)%
|
||||||
|
k: int = (abs(dark * 20 - total * 10) + total - 1) // total - 1
|
||||||
|
assert 0 <= k <= 9
|
||||||
|
result += k * QrCode._PENALTY_N4
|
||||||
|
assert 0 <= result <= 2568888
|
||||||
|
# ^ Non-tight upper bound based on default values of PENALTY_N1, ..., N4
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_alignment_pattern_positions(self) -> List[int]:
|
||||||
|
ver: int = self.ver
|
||||||
|
if ver == 1:
|
||||||
|
return []
|
||||||
|
|
||||||
|
numalign: int = ver // 7 + 2
|
||||||
|
step: int = (
|
||||||
|
26
|
||||||
|
if (ver == 32)
|
||||||
|
else (ver * 4 + numalign * 2 + 1) // (numalign * 2 - 2) * 2
|
||||||
|
)
|
||||||
|
result: List[int] = [
|
||||||
|
(self.size - 7 - i * step) for i in range(numalign - 1)
|
||||||
|
] + [6]
|
||||||
|
return list(reversed(result))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_num_raw_data_modules(ver: int) -> int:
|
||||||
|
result: int = (16 * ver + 128) * ver + 64
|
||||||
|
if ver >= 2:
|
||||||
|
numalign: int = ver // 7 + 2
|
||||||
|
result -= (25 * numalign - 10) * numalign - 55
|
||||||
|
if ver >= 7:
|
||||||
|
result -= 36
|
||||||
|
assert 208 <= result <= 29648
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_num_data_codewords(ver: int, ecl: Ecc) -> int:
|
||||||
|
return (
|
||||||
|
QrCode._get_num_raw_data_modules(ver) // 8
|
||||||
|
- QrCode._ECC_CODEWORDS_PER_BLOCK[ecl.ordinal][ver]
|
||||||
|
* QrCode._NUM_ERROR_CORRECTION_BLOCKS[ecl.ordinal][ver]
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _reed_solomon_compute_divisor(degree: int) -> bytes:
|
||||||
|
if not (1 <= degree <= 255):
|
||||||
|
raise ValueError("Degree out of range")
|
||||||
|
|
||||||
|
# Polynomial coefficients are stored from highest to lowest power, excluding the leading term which is always 1.
|
||||||
|
# For example the polynomial x^3 + 255x^2 + 8x + 93 is stored as the uint8 array [255, 8, 93].
|
||||||
|
result = bytearray([0] * (degree - 1) + [1]) # start with monomial x^0
|
||||||
|
|
||||||
|
# Compute the product polynomial (x - r^0) * (x - r^1) * (x - r^2) * ... * (x - r^{degree-1}),
|
||||||
|
# and drop the highest monomial term which is always 1x^degree.
|
||||||
|
# Note that r = 0x02, which is a generator element of this field GF(2^8/0x11D).
|
||||||
|
root: int = 1
|
||||||
|
for _ in range(degree):
|
||||||
|
# Multiply the current product by (x - r^i)
|
||||||
|
for j in range(degree):
|
||||||
|
result[j] = QrCode._reed_solomon_multiply(result[j], root)
|
||||||
|
if j + 1 < degree:
|
||||||
|
result[j] ^= result[j + 1]
|
||||||
|
root = QrCode._reed_solomon_multiply(root, 0x02)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _reed_solomon_compute_remainder(data: bytes, divisor: bytes) -> bytes:
|
||||||
|
result = bytearray([0] * len(divisor))
|
||||||
|
for b in data: # Polynomial division
|
||||||
|
factor: int = b ^ result.pop(0)
|
||||||
|
result.append(0)
|
||||||
|
for (i, coef) in enumerate(divisor):
|
||||||
|
result[i] ^= QrCode._reed_solomon_multiply(coef, factor)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _reed_solomon_multiply(x: int, y: int) -> int:
|
||||||
|
if (x >> 8 != 0) or (y >> 8 != 0):
|
||||||
|
raise ValueError("Byte out of range")
|
||||||
|
z: int = 0 # Russian peasant multiplication
|
||||||
|
for i in reversed(range(8)):
|
||||||
|
z = (z << 1) ^ ((z >> 7) * 0x11D)
|
||||||
|
z ^= ((y >> i) & 1) * x
|
||||||
|
assert z >> 8 == 0
|
||||||
|
return z
|
||||||
|
|
||||||
|
def _finder_penalty_count_patterns(self, runhistory: collections.deque[int]) -> int:
|
||||||
|
n: int = runhistory[1]
|
||||||
|
assert n <= self.size * 3
|
||||||
|
core: bool = (
|
||||||
|
n > 0
|
||||||
|
and (runhistory[2] == runhistory[4] == runhistory[5] == n)
|
||||||
|
and runhistory[3] == n * 3
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
1 if (core and runhistory[0] >= n * 4 and runhistory[6] >= n) else 0
|
||||||
|
) + (1 if (core and runhistory[6] >= n * 4 and runhistory[0] >= n) else 0)
|
||||||
|
|
||||||
|
def _finder_penalty_terminate_and_count(
|
||||||
|
self,
|
||||||
|
currentruncolor: bool,
|
||||||
|
currentrunlength: int,
|
||||||
|
runhistory: collections.deque[int],
|
||||||
|
) -> int:
|
||||||
|
if currentruncolor: # Terminate dark run
|
||||||
|
self._finder_penalty_add_history(currentrunlength, runhistory)
|
||||||
|
currentrunlength = 0
|
||||||
|
currentrunlength += self.size # Add light border to final run
|
||||||
|
self._finder_penalty_add_history(currentrunlength, runhistory)
|
||||||
|
return self._finder_penalty_count_patterns(runhistory)
|
||||||
|
|
||||||
|
def _finder_penalty_add_history(
|
||||||
|
self, currentrunlength: int, runhistory: collections.deque[int]
|
||||||
|
) -> None:
|
||||||
|
if runhistory[0] == 0:
|
||||||
|
currentrunlength += self.size # Add light border to initial run
|
||||||
|
|
||||||
|
runhistory.appendleft(currentrunlength)
|
||||||
|
|
||||||
|
_PENALTY_N1: int = 3
|
||||||
|
_PENALTY_N2: int = 3
|
||||||
|
_PENALTY_N3: int = 40
|
||||||
|
_PENALTY_N4: int = 10
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
_ECC_CODEWORDS_PER_BLOCK: Sequence[Sequence[int]] = (
|
||||||
|
(-1, 7, 10, 15, 20, 26, 18, 20, 24, 30, 18, 20, 24, 26, 30, 22, 24, 28, 30, 28, 28, 28, 28, 30, 30, 26, 28, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30), # noqa: E241 # L
|
||||||
|
(-1, 10, 16, 26, 18, 24, 16, 18, 22, 22, 26, 30, 22, 22, 24, 24, 28, 28, 26, 26, 26, 26, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28), # noqa: E241 # M
|
||||||
|
(-1, 13, 22, 18, 26, 18, 24, 18, 22, 20, 24, 28, 26, 24, 20, 30, 24, 28, 28, 26, 30, 28, 30, 30, 30, 30, 28, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30), # noqa: E241 # Q
|
||||||
|
(-1, 17, 28, 22, 16, 22, 28, 26, 26, 24, 28, 24, 28, 22, 24, 24, 30, 28, 28, 26, 28, 30, 24, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30)) # noqa: E241 # H
|
||||||
|
|
||||||
|
_NUM_ERROR_CORRECTION_BLOCKS: Sequence[Sequence[int]] = (
|
||||||
|
(-1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4, 4, 4, 4, 4, 6, 6, 6, 6, 7, 8, 8, 9, 9, 10, 12, 12, 12, 13, 14, 15, 16, 17, 18, 19, 19, 20, 21, 22, 24, 25), # noqa: E241 # L
|
||||||
|
(-1, 1, 1, 1, 2, 2, 4, 4, 4, 5, 5, 5, 8, 9, 9, 10, 10, 11, 13, 14, 16, 17, 17, 18, 20, 21, 23, 25, 26, 28, 29, 31, 33, 35, 37, 38, 40, 43, 45, 47, 49), # noqa: E241 # M
|
||||||
|
(-1, 1, 1, 2, 2, 4, 4, 6, 6, 8, 8, 8, 10, 12, 16, 12, 17, 16, 18, 21, 20, 23, 23, 25, 27, 29, 34, 34, 35, 38, 40, 43, 45, 48, 51, 53, 56, 59, 62, 65, 68), # noqa: E241 # Q
|
||||||
|
(-1, 1, 1, 2, 4, 4, 4, 5, 6, 8, 8, 11, 11, 16, 16, 18, 16, 19, 21, 25, 25, 25, 34, 30, 32, 35, 37, 40, 42, 45, 48, 51, 54, 57, 60, 63, 66, 70, 74, 77, 81)) # noqa: E241 # H
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
_MASK_PATTERNS: Sequence[Callable[[int, int], int]] = (
|
||||||
|
(lambda x, y: (x + y) % 2),
|
||||||
|
(lambda x, y: y % 2),
|
||||||
|
(lambda x, y: x % 3),
|
||||||
|
(lambda x, y: (x + y) % 3),
|
||||||
|
(lambda x, y: (x // 3 + y // 2) % 2),
|
||||||
|
(lambda x, y: x * y % 2 + x * y % 3),
|
||||||
|
(lambda x, y: (x * y % 2 + x * y % 3) % 2),
|
||||||
|
(lambda x, y: ((x + y) % 2 + x * y % 3) % 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class _BitBuffer(list): # type: ignore
|
||||||
|
def append_bits(self, val: int, n: int) -> None:
|
||||||
|
if (n < 0) or (val >> n != 0):
|
||||||
|
raise ValueError("Value out of range")
|
||||||
|
|
||||||
|
self.extend(((val >> i) & 1) for i in reversed(range(n)))
|
||||||
|
|
||||||
|
|
||||||
|
def _get_bit(x: int, i: int) -> bool:
|
||||||
|
return (x >> i) & 1 != 0
|
||||||
|
|
||||||
|
|
||||||
|
class DataTooLongError(ValueError):
|
||||||
|
pass
|
||||||
@@ -20,10 +20,8 @@ PY3 = sys.version_info > (3,)
|
|||||||
WINDOWS = platform.system() == "Windows"
|
WINDOWS = platform.system() == "Windows"
|
||||||
FS_ERRORS = "surrogateescape"
|
FS_ERRORS = "surrogateescape"
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any
|
from typing import Any
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
if PY3:
|
if PY3:
|
||||||
|
|||||||
@@ -6,12 +6,10 @@ from datetime import datetime
|
|||||||
|
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Generator, Optional
|
from typing import Any, Generator, Optional
|
||||||
|
|
||||||
from .util import NamedLogger
|
from .util import NamedLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class StreamArc(object):
|
class StreamArc(object):
|
||||||
@@ -25,7 +23,7 @@ class StreamArc(object):
|
|||||||
self.fgen = fgen
|
self.fgen = fgen
|
||||||
|
|
||||||
def gen(self) -> Generator[Optional[bytes], None, None]:
|
def gen(self) -> Generator[Optional[bytes], None, None]:
|
||||||
pass
|
raise Exception("override me")
|
||||||
|
|
||||||
|
|
||||||
def errdesc(errors: list[tuple[str, str]]) -> tuple[dict[str, Any], list[str]]:
|
def errdesc(errors: list[tuple[str, str]]) -> tuple[dict[str, Any], list[str]]:
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import argparse
|
|||||||
import base64
|
import base64
|
||||||
import calendar
|
import calendar
|
||||||
import gzip
|
import gzip
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import shlex
|
import shlex
|
||||||
@@ -16,15 +17,17 @@ import threading
|
|||||||
import time
|
import time
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
try:
|
# from inspect import currentframe
|
||||||
|
# print(currentframe().f_lineno)
|
||||||
|
|
||||||
|
|
||||||
|
if True: # pylint: disable=using-constant-test
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
|
|
||||||
import typing
|
import typing
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
from .__init__ import ANYWIN, MACOS, VT100, EnvParams, unicode
|
from .__init__ import ANYWIN, MACOS, TYPE_CHECKING, VT100, EnvParams, unicode
|
||||||
from .authsrv import AuthSrv
|
from .authsrv import AuthSrv
|
||||||
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE
|
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE
|
||||||
from .tcpsrv import TcpSrv
|
from .tcpsrv import TcpSrv
|
||||||
@@ -32,6 +35,9 @@ from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv
|
|||||||
from .up2k import Up2k
|
from .up2k import Up2k
|
||||||
from .util import (
|
from .util import (
|
||||||
VERSIONS,
|
VERSIONS,
|
||||||
|
Daemon,
|
||||||
|
Garda,
|
||||||
|
HLog,
|
||||||
HMaccas,
|
HMaccas,
|
||||||
alltrace,
|
alltrace,
|
||||||
ansi_re,
|
ansi_re,
|
||||||
@@ -41,6 +47,13 @@ from .util import (
|
|||||||
start_stackmon,
|
start_stackmon,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
try:
|
||||||
|
from .mdns import MDNS
|
||||||
|
from .ssdp import SSDPd
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SvcHub(object):
|
class SvcHub(object):
|
||||||
"""
|
"""
|
||||||
@@ -75,8 +88,14 @@ class SvcHub(object):
|
|||||||
|
|
||||||
self.iphash = HMaccas(os.path.join(self.E.cfg, "iphash"), 8)
|
self.iphash = HMaccas(os.path.join(self.E.cfg, "iphash"), 8)
|
||||||
|
|
||||||
|
# for non-http clients (ftp)
|
||||||
|
self.bans: dict[str, int] = {}
|
||||||
|
self.gpwd = Garda(self.args.ban_pw)
|
||||||
|
self.g404 = Garda(self.args.ban_404)
|
||||||
|
|
||||||
if args.sss or args.s >= 3:
|
if args.sss or args.s >= 3:
|
||||||
args.ss = True
|
args.ss = True
|
||||||
|
args.no_dav = True
|
||||||
args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz"
|
args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz"
|
||||||
args.ls = args.ls or "**,*,ln,p,r"
|
args.ls = args.ls or "**,*,ln,p,r"
|
||||||
|
|
||||||
@@ -103,6 +122,11 @@ class SvcHub(object):
|
|||||||
if args.lo:
|
if args.lo:
|
||||||
self._setup_logfile(printed)
|
self._setup_logfile(printed)
|
||||||
|
|
||||||
|
lg = logging.getLogger()
|
||||||
|
lh = HLog(self.log)
|
||||||
|
lg.handlers = [lh]
|
||||||
|
lg.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
if args.stackmon:
|
if args.stackmon:
|
||||||
start_stackmon(args.stackmon, 0)
|
start_stackmon(args.stackmon, 0)
|
||||||
|
|
||||||
@@ -137,6 +161,14 @@ class SvcHub(object):
|
|||||||
if args.ls:
|
if args.ls:
|
||||||
self.asrv.dbg_ls()
|
self.asrv.dbg_ls()
|
||||||
|
|
||||||
|
if not ANYWIN:
|
||||||
|
self._setlimits()
|
||||||
|
|
||||||
|
self.log("root", "max clients: {}".format(self.args.nc))
|
||||||
|
|
||||||
|
if not self._process_config():
|
||||||
|
raise Exception("bad config")
|
||||||
|
|
||||||
self.tcpsrv = TcpSrv(self)
|
self.tcpsrv = TcpSrv(self)
|
||||||
self.up2k = Up2k(self)
|
self.up2k = Up2k(self)
|
||||||
|
|
||||||
@@ -177,10 +209,35 @@ class SvcHub(object):
|
|||||||
|
|
||||||
args.th_poke = min(args.th_poke, args.th_maxage, args.ac_maxage)
|
args.th_poke = min(args.th_poke, args.th_maxage, args.ac_maxage)
|
||||||
|
|
||||||
|
zms = ""
|
||||||
|
if not args.https_only:
|
||||||
|
zms += "d"
|
||||||
|
if not args.http_only:
|
||||||
|
zms += "D"
|
||||||
|
|
||||||
if args.ftp or args.ftps:
|
if args.ftp or args.ftps:
|
||||||
from .ftpd import Ftpd
|
from .ftpd import Ftpd
|
||||||
|
|
||||||
self.ftpd = Ftpd(self)
|
self.ftpd = Ftpd(self)
|
||||||
|
zms += "f" if args.ftp else "F"
|
||||||
|
|
||||||
|
if args.smb:
|
||||||
|
# impacket.dcerpc is noisy about listen timeouts
|
||||||
|
sto = socket.getdefaulttimeout()
|
||||||
|
socket.setdefaulttimeout(None)
|
||||||
|
|
||||||
|
from .smbd import SMB
|
||||||
|
|
||||||
|
self.smbd = SMB(self)
|
||||||
|
socket.setdefaulttimeout(sto)
|
||||||
|
self.smbd.start()
|
||||||
|
zms += "s"
|
||||||
|
|
||||||
|
if not args.zms:
|
||||||
|
args.zms = zms
|
||||||
|
|
||||||
|
self.mdns: Optional["MDNS"] = None
|
||||||
|
self.ssdp: Optional["SSDPd"] = None
|
||||||
|
|
||||||
# decide which worker impl to use
|
# decide which worker impl to use
|
||||||
if self.check_mp_enable():
|
if self.check_mp_enable():
|
||||||
@@ -222,12 +279,78 @@ class SvcHub(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
time.sleep(0.1) # purely cosmetic dw
|
time.sleep(0.1) # purely cosmetic dw
|
||||||
|
if self.tcpsrv.qr:
|
||||||
|
self.log("qr-code", self.tcpsrv.qr)
|
||||||
|
else:
|
||||||
self.log("root", "workers OK\n")
|
self.log("root", "workers OK\n")
|
||||||
|
|
||||||
self.up2k.init_vols()
|
self.up2k.init_vols()
|
||||||
|
|
||||||
thr = threading.Thread(target=self.sd_notify, name="sd-notify")
|
Daemon(self.sd_notify, "sd-notify")
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
def _process_config(self) -> bool:
|
||||||
|
al = self.args
|
||||||
|
|
||||||
|
al.zm_on = al.zm_on or al.z_on
|
||||||
|
al.zs_on = al.zs_on or al.z_on
|
||||||
|
al.zm_off = al.zm_off or al.z_off
|
||||||
|
al.zs_off = al.zs_off or al.z_off
|
||||||
|
for n in ("zm_on", "zm_off", "zs_on", "zs_off"):
|
||||||
|
vs = getattr(al, n).split(",")
|
||||||
|
vs = [x.strip() for x in vs]
|
||||||
|
vs = [x for x in vs if x]
|
||||||
|
setattr(al, n, vs)
|
||||||
|
|
||||||
|
R = al.rp_loc
|
||||||
|
if "//" in R or ":" in R:
|
||||||
|
t = "found URL in --rp-loc; it should be just the location, for example /foo/bar"
|
||||||
|
raise Exception(t)
|
||||||
|
|
||||||
|
al.R = R = R.strip("/")
|
||||||
|
al.SR = "/" + R if R else ""
|
||||||
|
al.RS = R + "/" if R else ""
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _setlimits(self) -> None:
|
||||||
|
try:
|
||||||
|
import resource
|
||||||
|
|
||||||
|
soft, hard = [
|
||||||
|
x if x > 0 else 1024 * 1024
|
||||||
|
for x in list(resource.getrlimit(resource.RLIMIT_NOFILE))
|
||||||
|
]
|
||||||
|
except:
|
||||||
|
self.log("root", "failed to read rlimits from os", 6)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not soft or not hard:
|
||||||
|
t = "got bogus rlimits from os ({}, {})"
|
||||||
|
self.log("root", t.format(soft, hard), 6)
|
||||||
|
return
|
||||||
|
|
||||||
|
want = self.args.nc * 4
|
||||||
|
new_soft = min(hard, want)
|
||||||
|
if new_soft < soft:
|
||||||
|
return
|
||||||
|
|
||||||
|
# t = "requesting rlimit_nofile({}), have {}"
|
||||||
|
# self.log("root", t.format(new_soft, soft), 6)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import resource
|
||||||
|
|
||||||
|
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
|
||||||
|
soft = new_soft
|
||||||
|
except:
|
||||||
|
t = "rlimit denied; max open files: {}"
|
||||||
|
self.log("root", t.format(soft), 3)
|
||||||
|
return
|
||||||
|
|
||||||
|
if soft < want:
|
||||||
|
t = "max open files: {} (wanted {} for -nc {})"
|
||||||
|
self.log("root", t.format(soft, want, self.args.nc), 3)
|
||||||
|
self.args.nc = min(self.args.nc, soft // 2)
|
||||||
|
|
||||||
def _logname(self) -> str:
|
def _logname(self) -> str:
|
||||||
dt = datetime.utcnow()
|
dt = datetime.utcnow()
|
||||||
@@ -252,10 +375,12 @@ class SvcHub(object):
|
|||||||
fn = sel_fn
|
fn = sel_fn
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if fn.lower().endswith(".xz"):
|
||||||
import lzma
|
import lzma
|
||||||
|
|
||||||
lh = lzma.open(fn, "wt", encoding="utf-8", errors="replace", preset=0)
|
lh = lzma.open(fn, "wt", encoding="utf-8", errors="replace", preset=0)
|
||||||
|
else:
|
||||||
|
lh = open(fn, "wt", encoding="utf-8", errors="replace")
|
||||||
except:
|
except:
|
||||||
import codecs
|
import codecs
|
||||||
|
|
||||||
@@ -269,7 +394,8 @@ class SvcHub(object):
|
|||||||
|
|
||||||
msg = "[+] opened logfile [{}]\n".format(fn)
|
msg = "[+] opened logfile [{}]\n".format(fn)
|
||||||
printed += msg
|
printed += msg
|
||||||
lh.write("t0: {:.3f}\nargv: {}\n\n{}".format(self.E.t0, " ".join(argv), printed))
|
t = "t0: {:.3f}\nargv: {}\n\n{}"
|
||||||
|
lh.write(t.format(self.E.t0, " ".join(argv), printed))
|
||||||
self.logf = lh
|
self.logf = lh
|
||||||
self.logf_base_fn = base_fn
|
self.logf_base_fn = base_fn
|
||||||
print(msg, end="")
|
print(msg, end="")
|
||||||
@@ -277,9 +403,25 @@ class SvcHub(object):
|
|||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
self.tcpsrv.run()
|
self.tcpsrv.run()
|
||||||
|
|
||||||
thr = threading.Thread(target=self.thr_httpsrv_up, name="sig-hsrv-up2")
|
if getattr(self.args, "zm", False):
|
||||||
thr.daemon = True
|
try:
|
||||||
thr.start()
|
from .mdns import MDNS
|
||||||
|
|
||||||
|
self.mdns = MDNS(self)
|
||||||
|
Daemon(self.mdns.run, "mdns")
|
||||||
|
except:
|
||||||
|
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
|
||||||
|
|
||||||
|
if getattr(self.args, "zs", False):
|
||||||
|
try:
|
||||||
|
from .ssdp import SSDPd
|
||||||
|
|
||||||
|
self.ssdp = SSDPd(self)
|
||||||
|
Daemon(self.ssdp.run, "ssdp")
|
||||||
|
except:
|
||||||
|
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
|
||||||
|
|
||||||
|
Daemon(self.thr_httpsrv_up, "sig-hsrv-up2")
|
||||||
|
|
||||||
sigs = [signal.SIGINT, signal.SIGTERM]
|
sigs = [signal.SIGINT, signal.SIGTERM]
|
||||||
if not ANYWIN:
|
if not ANYWIN:
|
||||||
@@ -294,9 +436,7 @@ class SvcHub(object):
|
|||||||
# never lucky
|
# never lucky
|
||||||
if ANYWIN:
|
if ANYWIN:
|
||||||
# msys-python probably fine but >msys-python
|
# msys-python probably fine but >msys-python
|
||||||
thr = threading.Thread(target=self.stop_thr, name="svchub-sig")
|
Daemon(self.stop_thr, "svchub-sig")
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while not self.stop_req:
|
while not self.stop_req:
|
||||||
@@ -316,9 +456,7 @@ class SvcHub(object):
|
|||||||
return "cannot reload; already in progress"
|
return "cannot reload; already in progress"
|
||||||
|
|
||||||
self.reloading = True
|
self.reloading = True
|
||||||
t = threading.Thread(target=self._reload, name="reloading")
|
Daemon(self._reload, "reloading")
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
return "reload initiated"
|
return "reload initiated"
|
||||||
|
|
||||||
def _reload(self) -> None:
|
def _reload(self) -> None:
|
||||||
@@ -341,6 +479,17 @@ class SvcHub(object):
|
|||||||
|
|
||||||
self.shutdown()
|
self.shutdown()
|
||||||
|
|
||||||
|
def kill9(self, delay: float = 0.0) -> None:
|
||||||
|
if delay > 0.01:
|
||||||
|
time.sleep(delay)
|
||||||
|
print("component stuck; issuing sigkill")
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
if ANYWIN:
|
||||||
|
os.system("taskkill /f /pid {}".format(os.getpid()))
|
||||||
|
else:
|
||||||
|
os.kill(os.getpid(), signal.SIGKILL)
|
||||||
|
|
||||||
def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None:
|
def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None:
|
||||||
if self.stopping:
|
if self.stopping:
|
||||||
if self.nsigs <= 0:
|
if self.nsigs <= 0:
|
||||||
@@ -350,10 +499,7 @@ class SvcHub(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if ANYWIN:
|
self.kill9()
|
||||||
os.system("taskkill /f /pid {}".format(os.getpid()))
|
|
||||||
else:
|
|
||||||
os.kill(os.getpid(), signal.SIGKILL)
|
|
||||||
else:
|
else:
|
||||||
self.nsigs -= 1
|
self.nsigs -= 1
|
||||||
return
|
return
|
||||||
@@ -380,8 +526,18 @@ class SvcHub(object):
|
|||||||
ret = 1
|
ret = 1
|
||||||
try:
|
try:
|
||||||
self.pr("OPYTHAT")
|
self.pr("OPYTHAT")
|
||||||
self.tcpsrv.shutdown()
|
slp = 0.0
|
||||||
|
|
||||||
|
if self.mdns:
|
||||||
|
Daemon(self.mdns.stop)
|
||||||
|
slp = time.time() + 0.5
|
||||||
|
|
||||||
|
if self.ssdp:
|
||||||
|
Daemon(self.ssdp.stop)
|
||||||
|
slp = time.time() + 0.5
|
||||||
|
|
||||||
self.broker.shutdown()
|
self.broker.shutdown()
|
||||||
|
self.tcpsrv.shutdown()
|
||||||
self.up2k.shutdown()
|
self.up2k.shutdown()
|
||||||
if self.thumbsrv:
|
if self.thumbsrv:
|
||||||
self.thumbsrv.shutdown()
|
self.thumbsrv.shutdown()
|
||||||
@@ -394,6 +550,14 @@ class SvcHub(object):
|
|||||||
if n == 3:
|
if n == 3:
|
||||||
self.pr("waiting for thumbsrv (10sec)...")
|
self.pr("waiting for thumbsrv (10sec)...")
|
||||||
|
|
||||||
|
if hasattr(self, "smbd"):
|
||||||
|
slp = max(slp, time.time() + 0.5)
|
||||||
|
Daemon(self.kill9, a=(1,))
|
||||||
|
Daemon(self.smbd.stop)
|
||||||
|
|
||||||
|
while time.time() < slp:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
self.pr("nailed it", end="")
|
self.pr("nailed it", end="")
|
||||||
ret = self.retcode
|
ret = self.retcode
|
||||||
except:
|
except:
|
||||||
@@ -417,7 +581,7 @@ class SvcHub(object):
|
|||||||
|
|
||||||
with self.log_mutex:
|
with self.log_mutex:
|
||||||
ts = datetime.utcnow().strftime("%Y-%m%d-%H%M%S.%f")[:-3]
|
ts = datetime.utcnow().strftime("%Y-%m%d-%H%M%S.%f")[:-3]
|
||||||
self.logf.write("@{} [{}] {}\n".format(ts, src, msg))
|
self.logf.write("@{} [{}\033[0m] {}\n".format(ts, src, msg))
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if now >= self.next_day:
|
if now >= self.next_day:
|
||||||
|
|||||||
@@ -3,18 +3,17 @@ from __future__ import print_function, unicode_literals
|
|||||||
|
|
||||||
import calendar
|
import calendar
|
||||||
import time
|
import time
|
||||||
|
import stat
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .sutil import StreamArc, errdesc
|
from .sutil import StreamArc, errdesc
|
||||||
from .util import min_ex, sanitize_fn, spack, sunpack, yieldfile
|
from .util import min_ex, sanitize_fn, spack, sunpack, yieldfile
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Generator, Optional
|
from typing import Any, Generator, Optional
|
||||||
|
|
||||||
from .util import NamedLogger
|
from .util import NamedLogger
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def dostime2unix(buf: bytes) -> int:
|
def dostime2unix(buf: bytes) -> int:
|
||||||
@@ -240,6 +239,9 @@ class StreamZip(StreamArc):
|
|||||||
src = f["ap"]
|
src = f["ap"]
|
||||||
st = f["st"]
|
st = f["st"]
|
||||||
|
|
||||||
|
if stat.S_ISDIR(st.st_mode):
|
||||||
|
return
|
||||||
|
|
||||||
sz = st.st_size
|
sz = st.st_size
|
||||||
ts = st.st_mtime
|
ts = st.st_mtime
|
||||||
|
|
||||||
@@ -271,6 +273,7 @@ class StreamZip(StreamArc):
|
|||||||
yield self._ct(buf)
|
yield self._ct(buf)
|
||||||
|
|
||||||
def gen(self) -> Generator[bytes, None, None]:
|
def gen(self) -> Generator[bytes, None, None]:
|
||||||
|
errf: dict[str, Any] = {}
|
||||||
errors = []
|
errors = []
|
||||||
try:
|
try:
|
||||||
for f in self.fgen:
|
for f in self.fgen:
|
||||||
@@ -311,5 +314,5 @@ class StreamZip(StreamArc):
|
|||||||
ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end)
|
ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end)
|
||||||
yield self._ct(ecdr)
|
yield self._ct(ecdr)
|
||||||
finally:
|
finally:
|
||||||
if errors:
|
if errf:
|
||||||
bos.unlink(errf["ap"])
|
bos.unlink(errf["ap"])
|
||||||
|
|||||||
@@ -6,12 +6,28 @@ import re
|
|||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .__init__ import ANYWIN, MACOS, TYPE_CHECKING, unicode
|
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, unicode
|
||||||
from .util import chkcmd
|
from .stolen.qrcodegen import QrCode
|
||||||
|
from .util import (
|
||||||
|
E_ACCESS,
|
||||||
|
E_ADDR_IN_USE,
|
||||||
|
E_ADDR_NOT_AVAIL,
|
||||||
|
E_UNREACH,
|
||||||
|
Netdev,
|
||||||
|
min_ex,
|
||||||
|
sunpack,
|
||||||
|
termsize,
|
||||||
|
)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
from typing import Generator
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
|
|
||||||
|
if not hasattr(socket, "IPPROTO_IPV6"):
|
||||||
|
setattr(socket, "IPPROTO_IPV6", 41)
|
||||||
|
|
||||||
|
|
||||||
class TcpSrv(object):
|
class TcpSrv(object):
|
||||||
"""
|
"""
|
||||||
@@ -29,44 +45,112 @@ class TcpSrv(object):
|
|||||||
|
|
||||||
self.stopping = False
|
self.stopping = False
|
||||||
self.srv: list[socket.socket] = []
|
self.srv: list[socket.socket] = []
|
||||||
|
self.bound: list[tuple[str, int]] = []
|
||||||
self.nsrv = 0
|
self.nsrv = 0
|
||||||
|
self.qr = ""
|
||||||
|
pad = False
|
||||||
ok: dict[str, list[int]] = {}
|
ok: dict[str, list[int]] = {}
|
||||||
for ip in self.args.i:
|
for ip in self.args.i:
|
||||||
ok[ip] = []
|
if ip == "::":
|
||||||
|
if socket.has_ipv6:
|
||||||
|
ips = ["::", "0.0.0.0"]
|
||||||
|
dual = True
|
||||||
|
else:
|
||||||
|
ips = ["0.0.0.0"]
|
||||||
|
dual = False
|
||||||
|
else:
|
||||||
|
ips = [ip]
|
||||||
|
dual = False
|
||||||
|
|
||||||
|
for ipa in ips:
|
||||||
|
ok[ipa] = []
|
||||||
|
|
||||||
for port in self.args.p:
|
for port in self.args.p:
|
||||||
self.nsrv += 1
|
successful_binds = 0
|
||||||
try:
|
try:
|
||||||
self._listen(ip, port)
|
for ipa in ips:
|
||||||
ok[ip].append(port)
|
try:
|
||||||
|
self._listen(ipa, port)
|
||||||
|
ok[ipa].append(port)
|
||||||
|
successful_binds += 1
|
||||||
|
except:
|
||||||
|
if dual and ":" in ipa:
|
||||||
|
t = "listen on IPv6 [{}] failed; trying IPv4 {}...\n{}"
|
||||||
|
self.log("tcpsrv", t.format(ipa, ips[1], min_ex()), 3)
|
||||||
|
pad = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
# binding 0.0.0.0 after :: fails on dualstack
|
||||||
|
# but is necessary on non-dualstakc
|
||||||
|
if successful_binds:
|
||||||
|
continue
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
if self.args.ign_ebind or self.args.ign_ebind_all:
|
if self.args.ign_ebind or self.args.ign_ebind_all:
|
||||||
t = "could not listen on {}:{}: {}"
|
t = "could not listen on {}:{}: {}"
|
||||||
self.log("tcpsrv", t.format(ip, port, ex), c=3)
|
self.log("tcpsrv", t.format(ip, port, ex), c=3)
|
||||||
|
pad = True
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if not self.srv and not self.args.ign_ebind_all:
|
if not self.srv and not self.args.ign_ebind_all:
|
||||||
raise Exception("could not listen on any of the given interfaces")
|
raise Exception("could not listen on any of the given interfaces")
|
||||||
|
|
||||||
if self.nsrv != len(self.srv):
|
if pad:
|
||||||
self.log("tcpsrv", "")
|
self.log("tcpsrv", "")
|
||||||
|
|
||||||
ip = "127.0.0.1"
|
eps = {
|
||||||
eps = {ip: "local only"}
|
"127.0.0.1": Netdev("127.0.0.1", 0, "", "local only"),
|
||||||
nonlocals = [x for x in self.args.i if x != ip]
|
"::1": Netdev("::1", 0, "", "local only"),
|
||||||
|
}
|
||||||
|
nonlocals = [x for x in self.args.i if x not in [k.split("/")[0] for k in eps]]
|
||||||
if nonlocals:
|
if nonlocals:
|
||||||
eps = self.detect_interfaces(self.args.i)
|
try:
|
||||||
|
self.netdevs = self.detect_interfaces(self.args.i)
|
||||||
|
except:
|
||||||
|
t = "failed to discover server IP addresses\n"
|
||||||
|
self.log("tcpsrv", t + min_ex(), 3)
|
||||||
|
self.netdevs = {}
|
||||||
|
|
||||||
|
eps.update({k.split("/")[0]: v for k, v in self.netdevs.items()})
|
||||||
if not eps:
|
if not eps:
|
||||||
for x in nonlocals:
|
for x in nonlocals:
|
||||||
eps[x] = "external"
|
eps[x] = Netdev(x, 0, "", "external")
|
||||||
|
else:
|
||||||
|
self.netdevs = {}
|
||||||
|
|
||||||
|
# keep IPv6 LL-only nics
|
||||||
|
ll_ok: set[str] = set()
|
||||||
|
for ip, nd in self.netdevs.items():
|
||||||
|
if not ip.startswith("fe80"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
just_ll = True
|
||||||
|
for ip2, nd2 in self.netdevs.items():
|
||||||
|
if nd == nd2 and ":" in ip2 and not ip2.startswith("fe80"):
|
||||||
|
just_ll = False
|
||||||
|
|
||||||
|
if just_ll or self.args.ll:
|
||||||
|
ll_ok.add(ip.split("/")[0])
|
||||||
|
|
||||||
|
qr1: dict[str, list[int]] = {}
|
||||||
|
qr2: dict[str, list[int]] = {}
|
||||||
msgs = []
|
msgs = []
|
||||||
title_tab: dict[str, dict[str, int]] = {}
|
title_tab: dict[str, dict[str, int]] = {}
|
||||||
title_vars = [x[1:] for x in self.args.wintitle.split(" ") if x.startswith("$")]
|
title_vars = [x[1:] for x in self.args.wintitle.split(" ") if x.startswith("$")]
|
||||||
t = "available @ {}://{}:{}/ (\033[33m{}\033[0m)"
|
t = "available @ {}://{}:{}/ (\033[33m{}\033[0m)"
|
||||||
for ip, desc in sorted(eps.items(), key=lambda x: x[1]):
|
for ip, desc in sorted(eps.items(), key=lambda x: x[1]):
|
||||||
|
if ip.startswith("fe80") and ip not in ll_ok:
|
||||||
|
continue
|
||||||
|
|
||||||
for port in sorted(self.args.p):
|
for port in sorted(self.args.p):
|
||||||
if port not in ok.get(ip, ok.get("0.0.0.0", [])):
|
if (
|
||||||
|
port not in ok.get(ip, [])
|
||||||
|
and port not in ok.get("::", [])
|
||||||
|
and port not in ok.get("0.0.0.0", [])
|
||||||
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
proto = " http"
|
proto = " http"
|
||||||
@@ -75,7 +159,15 @@ class TcpSrv(object):
|
|||||||
elif self.args.https_only or port == 443:
|
elif self.args.https_only or port == 443:
|
||||||
proto = "https"
|
proto = "https"
|
||||||
|
|
||||||
msgs.append(t.format(proto, ip, port, desc))
|
hip = "[{}]".format(ip) if ":" in ip else ip
|
||||||
|
msgs.append(t.format(proto, hip, port, desc))
|
||||||
|
|
||||||
|
is_ext = "external" in unicode(desc)
|
||||||
|
qrt = qr1 if is_ext else qr2
|
||||||
|
try:
|
||||||
|
qrt[ip].append(port)
|
||||||
|
except:
|
||||||
|
qrt[ip] = [port]
|
||||||
|
|
||||||
if not self.args.wintitle:
|
if not self.args.wintitle:
|
||||||
continue
|
continue
|
||||||
@@ -86,7 +178,7 @@ class TcpSrv(object):
|
|||||||
ep = "{}:{}".format(ip, port)
|
ep = "{}:{}".format(ip, port)
|
||||||
|
|
||||||
hits = []
|
hits = []
|
||||||
if "pub" in title_vars and "external" in unicode(desc):
|
if "pub" in title_vars and is_ext:
|
||||||
hits.append(("pub", ep))
|
hits.append(("pub", ep))
|
||||||
|
|
||||||
if "pub" in title_vars or "all" in title_vars:
|
if "pub" in title_vars or "all" in title_vars:
|
||||||
@@ -103,42 +195,81 @@ class TcpSrv(object):
|
|||||||
title_tab[tk] = {tv: 1}
|
title_tab[tk] = {tv: 1}
|
||||||
|
|
||||||
if msgs:
|
if msgs:
|
||||||
msgs[-1] += "\n"
|
|
||||||
for t in msgs:
|
for t in msgs:
|
||||||
self.log("tcpsrv", t)
|
self.log("tcpsrv", t)
|
||||||
|
|
||||||
if self.args.wintitle:
|
if self.args.wintitle:
|
||||||
self._set_wintitle(title_tab)
|
self._set_wintitle(title_tab)
|
||||||
|
else:
|
||||||
|
print("\n", end="")
|
||||||
|
|
||||||
|
if self.args.qr or self.args.qrs:
|
||||||
|
self.qr = self._qr(qr1, qr2)
|
||||||
|
|
||||||
def _listen(self, ip: str, port: int) -> None:
|
def _listen(self, ip: str, port: int) -> None:
|
||||||
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
ipv = socket.AF_INET6 if ":" in ip else socket.AF_INET
|
||||||
|
srv = socket.socket(ipv, socket.SOCK_STREAM)
|
||||||
|
|
||||||
|
if not ANYWIN or self.args.reuseaddr:
|
||||||
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
|
||||||
srv.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
srv.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
srv.settimeout(None) # < does not inherit, ^ does
|
srv.settimeout(None) # < does not inherit, ^ opts above do
|
||||||
|
|
||||||
|
try:
|
||||||
|
srv.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
|
||||||
|
except:
|
||||||
|
pass # will create another ipv4 socket instead
|
||||||
|
|
||||||
try:
|
try:
|
||||||
srv.bind((ip, port))
|
srv.bind((ip, port))
|
||||||
self.srv.append(srv)
|
self.srv.append(srv)
|
||||||
except (OSError, socket.error) as ex:
|
except (OSError, socket.error) as ex:
|
||||||
if ex.errno in [98, 48]:
|
if ex.errno in E_ADDR_IN_USE:
|
||||||
e = "\033[1;31mport {} is busy on interface {}\033[0m".format(port, ip)
|
e = "\033[1;31mport {} is busy on interface {}\033[0m".format(port, ip)
|
||||||
elif ex.errno in [99, 49]:
|
elif ex.errno in E_ADDR_NOT_AVAIL:
|
||||||
e = "\033[1;31minterface {} does not exist\033[0m".format(ip)
|
e = "\033[1;31minterface {} does not exist\033[0m".format(ip)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
raise Exception(e)
|
raise Exception(e)
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
|
all_eps = [x.getsockname()[:2] for x in self.srv]
|
||||||
|
bound: list[tuple[str, int]] = []
|
||||||
|
srvs: list[socket.socket] = []
|
||||||
for srv in self.srv:
|
for srv in self.srv:
|
||||||
|
ip, port = srv.getsockname()[:2]
|
||||||
|
try:
|
||||||
srv.listen(self.args.nc)
|
srv.listen(self.args.nc)
|
||||||
ip, port = srv.getsockname()
|
except:
|
||||||
|
if ip == "0.0.0.0" and ("::", port) in bound:
|
||||||
|
# dualstack
|
||||||
|
srv.close()
|
||||||
|
continue
|
||||||
|
|
||||||
|
if ip == "::" and ("0.0.0.0", port) in all_eps:
|
||||||
|
# no ipv6
|
||||||
|
srv.close()
|
||||||
|
continue
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
|
bound.append((ip, port))
|
||||||
|
srvs.append(srv)
|
||||||
fno = srv.fileno()
|
fno = srv.fileno()
|
||||||
msg = "listening @ {}:{} f{} p{}".format(ip, port, fno, os.getpid())
|
hip = "[{}]".format(ip) if ":" in ip else ip
|
||||||
|
msg = "listening @ {}:{} f{} p{}".format(hip, port, fno, os.getpid())
|
||||||
self.log("tcpsrv", msg)
|
self.log("tcpsrv", msg)
|
||||||
if self.args.q:
|
if self.args.q:
|
||||||
print(msg)
|
print(msg)
|
||||||
|
|
||||||
self.hub.broker.say("listen", srv)
|
self.hub.broker.say("listen", srv)
|
||||||
|
|
||||||
|
self.srv = srvs
|
||||||
|
self.bound = bound
|
||||||
|
self.nsrv = len(srvs)
|
||||||
|
self.hub.broker.say("set_netdevs", self.netdevs)
|
||||||
|
|
||||||
def shutdown(self) -> None:
|
def shutdown(self) -> None:
|
||||||
self.stopping = True
|
self.stopping = True
|
||||||
try:
|
try:
|
||||||
@@ -149,180 +280,84 @@ class TcpSrv(object):
|
|||||||
|
|
||||||
self.log("tcpsrv", "ok bye")
|
self.log("tcpsrv", "ok bye")
|
||||||
|
|
||||||
def ips_linux_ifconfig(self) -> dict[str, str]:
|
def detect_interfaces(self, listen_ips: list[str]) -> dict[str, Netdev]:
|
||||||
# for termux
|
from .stolen.ifaddr import get_adapters
|
||||||
|
|
||||||
|
nics = get_adapters(True)
|
||||||
|
eps: dict[str, Netdev] = {}
|
||||||
|
for nic in nics:
|
||||||
|
for nip in nic.ips:
|
||||||
|
ipa = nip.ip[0] if ":" in str(nip.ip) else nip.ip
|
||||||
|
sip = "{}/{}".format(ipa, nip.network_prefix)
|
||||||
|
nd = Netdev(sip, nic.index or 0, nic.nice_name, "")
|
||||||
|
eps[sip] = nd
|
||||||
try:
|
try:
|
||||||
txt, _ = chkcmd(["ifconfig"])
|
idx = socket.if_nametoindex(nd.name)
|
||||||
except:
|
if idx and idx != nd.idx:
|
||||||
return {}
|
t = "netdev idx mismatch; ifaddr={} cpython={}"
|
||||||
|
self.log("tcpsrv", t.format(nd.idx, idx), 3)
|
||||||
eps: dict[str, str] = {}
|
nd.idx = idx
|
||||||
dev = None
|
|
||||||
ip = None
|
|
||||||
up = None
|
|
||||||
for ln in (txt + "\n").split("\n"):
|
|
||||||
if not ln.strip() and dev and ip:
|
|
||||||
eps[ip] = dev + ("" if up else ", \033[31mLINK-DOWN")
|
|
||||||
dev = ip = up = None
|
|
||||||
continue
|
|
||||||
|
|
||||||
if ln == ln.lstrip():
|
|
||||||
dev = re.split(r"[: ]", ln)[0]
|
|
||||||
|
|
||||||
if "UP" in re.split(r"[<>, \t]", ln):
|
|
||||||
up = True
|
|
||||||
|
|
||||||
m = re.match(r"^\s+inet\s+([^ ]+)", ln)
|
|
||||||
if m:
|
|
||||||
ip = m.group(1)
|
|
||||||
|
|
||||||
return eps
|
|
||||||
|
|
||||||
def ips_linux(self) -> dict[str, str]:
|
|
||||||
try:
|
|
||||||
txt, _ = chkcmd(["ip", "addr"])
|
|
||||||
except:
|
|
||||||
return self.ips_linux_ifconfig()
|
|
||||||
|
|
||||||
r = re.compile(r"^\s+inet ([^ ]+)/.* (.*)")
|
|
||||||
ri = re.compile(r"^\s*[0-9]+\s*:.*")
|
|
||||||
up = False
|
|
||||||
eps: dict[str, str] = {}
|
|
||||||
for ln in txt.split("\n"):
|
|
||||||
if ri.match(ln):
|
|
||||||
up = "UP" in re.split("[>,< ]", ln)
|
|
||||||
|
|
||||||
try:
|
|
||||||
ip, dev = r.match(ln.rstrip()).groups() # type: ignore
|
|
||||||
eps[ip] = dev + ("" if up else ", \033[31mLINK-DOWN")
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return eps
|
if "0.0.0.0" not in listen_ips and "::" not in listen_ips:
|
||||||
|
eps = {k: v for k, v in eps.items() if k.split("/")[0] in listen_ips}
|
||||||
|
|
||||||
def ips_macos(self) -> dict[str, str]:
|
|
||||||
eps: dict[str, str] = {}
|
|
||||||
try:
|
try:
|
||||||
txt, _ = chkcmd(["ifconfig"])
|
ext_devs = list(self._extdevs_nix())
|
||||||
|
ext_ips = [k for k, v in eps.items() if v.name in ext_devs]
|
||||||
|
ext_ips = [x.split("/")[0] for x in ext_ips]
|
||||||
|
if not ext_ips:
|
||||||
|
raise Exception()
|
||||||
except:
|
except:
|
||||||
return eps
|
rt = self._defroute()
|
||||||
|
ext_ips = [rt] if rt else []
|
||||||
|
|
||||||
rdev = re.compile(r"^([^ ]+):")
|
for lip in listen_ips:
|
||||||
rip = re.compile(r"^\tinet ([0-9\.]+) ")
|
if not ext_ips or lip not in ["0.0.0.0", "::"] + ext_ips:
|
||||||
dev = "UNKNOWN"
|
continue
|
||||||
for ln in txt.split("\n"):
|
|
||||||
m = rdev.match(ln)
|
|
||||||
if m:
|
|
||||||
dev = m.group(1)
|
|
||||||
|
|
||||||
m = rip.match(ln)
|
desc = "\033[32mexternal"
|
||||||
if m:
|
ips = ext_ips if lip in ["0.0.0.0", "::"] else [lip]
|
||||||
eps[m.group(1)] = dev
|
for ip in ips:
|
||||||
dev = "UNKNOWN"
|
ip = next((x for x in eps if x.startswith(ip + "/")), "")
|
||||||
|
if ip and "external" not in eps[ip].desc:
|
||||||
|
eps[ip].desc += ", " + desc
|
||||||
|
|
||||||
return eps
|
return eps
|
||||||
|
|
||||||
def ips_windows_ipconfig(self) -> tuple[dict[str, str], set[str]]:
|
def _extdevs_nix(self) -> Generator[str, None, None]:
|
||||||
eps: dict[str, str] = {}
|
with open("/proc/net/route", "rb") as f:
|
||||||
offs: set[str] = set()
|
next(f)
|
||||||
try:
|
for ln in f:
|
||||||
txt, _ = chkcmd(["ipconfig"])
|
r = ln.decode("utf-8").strip().split()
|
||||||
except:
|
if r[1] == "0" * 8 and int(r[3], 16) & 2:
|
||||||
return eps, offs
|
yield r[0]
|
||||||
|
|
||||||
rdev = re.compile(r"(^[^ ].*):$")
|
def _defroute(self) -> str:
|
||||||
rip = re.compile(r"^ +IPv?4? [^:]+: *([0-9\.]{7,15})$")
|
ret = ""
|
||||||
roff = re.compile(r".*: Media disconnected$")
|
|
||||||
dev = None
|
|
||||||
for ln in txt.replace("\r", "").split("\n"):
|
|
||||||
m = rdev.match(ln)
|
|
||||||
if m:
|
|
||||||
if dev and dev not in eps.values():
|
|
||||||
offs.add(dev)
|
|
||||||
|
|
||||||
dev = m.group(1).split(" adapter ", 1)[-1]
|
|
||||||
|
|
||||||
if dev and roff.match(ln):
|
|
||||||
offs.add(dev)
|
|
||||||
dev = None
|
|
||||||
|
|
||||||
m = rip.match(ln)
|
|
||||||
if m and dev:
|
|
||||||
eps[m.group(1)] = dev
|
|
||||||
dev = None
|
|
||||||
|
|
||||||
if dev and dev not in eps.values():
|
|
||||||
offs.add(dev)
|
|
||||||
|
|
||||||
return eps, offs
|
|
||||||
|
|
||||||
def ips_windows_netsh(self) -> dict[str, str]:
|
|
||||||
eps: dict[str, str] = {}
|
|
||||||
try:
|
|
||||||
txt, _ = chkcmd("netsh interface ip show address".split())
|
|
||||||
except:
|
|
||||||
return eps
|
|
||||||
|
|
||||||
rdev = re.compile(r'.* "([^"]+)"$')
|
|
||||||
rip = re.compile(r".* IP\b.*: +([0-9\.]{7,15})$")
|
|
||||||
dev = None
|
|
||||||
for ln in txt.replace("\r", "").split("\n"):
|
|
||||||
m = rdev.match(ln)
|
|
||||||
if m:
|
|
||||||
dev = m.group(1)
|
|
||||||
|
|
||||||
m = rip.match(ln)
|
|
||||||
if m and dev:
|
|
||||||
eps[m.group(1)] = dev
|
|
||||||
|
|
||||||
return eps
|
|
||||||
|
|
||||||
def detect_interfaces(self, listen_ips: list[str]) -> dict[str, str]:
|
|
||||||
if MACOS:
|
|
||||||
eps = self.ips_macos()
|
|
||||||
elif ANYWIN:
|
|
||||||
eps, off = self.ips_windows_ipconfig() # sees more interfaces + link state
|
|
||||||
eps.update(self.ips_windows_netsh()) # has better names
|
|
||||||
for k, v in eps.items():
|
|
||||||
if v in off:
|
|
||||||
eps[k] += ", \033[31mLINK-DOWN"
|
|
||||||
else:
|
|
||||||
eps = self.ips_linux()
|
|
||||||
|
|
||||||
if "0.0.0.0" not in listen_ips:
|
|
||||||
eps = {k: v for k, v in eps.items() if k in listen_ips}
|
|
||||||
|
|
||||||
default_route = None
|
|
||||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
for ip in [
|
for ip in [
|
||||||
"10.255.255.255",
|
"10.254.39.23",
|
||||||
"172.31.255.255",
|
"172.31.39.23",
|
||||||
"192.168.255.255",
|
"192.168.39.23",
|
||||||
"239.255.255.255",
|
"239.254.39.23",
|
||||||
|
"169.254.39.23",
|
||||||
# could add 1.1.1.1 as a final fallback
|
# could add 1.1.1.1 as a final fallback
|
||||||
# but external connections is kinshi
|
# but external connections is kinshi
|
||||||
]:
|
]:
|
||||||
try:
|
try:
|
||||||
s.connect((ip, 1))
|
s.connect((ip, 1))
|
||||||
default_route = s.getsockname()[0]
|
ret = s.getsockname()[0]
|
||||||
break
|
break
|
||||||
except (OSError, socket.error) as ex:
|
except (OSError, socket.error) as ex:
|
||||||
if ex.errno == 13:
|
if ex.errno in E_ACCESS:
|
||||||
self.log("tcpsrv", "eaccess {} (trying next)".format(ip))
|
self.log("tcpsrv", "eaccess {} (trying next)".format(ip))
|
||||||
elif ex.errno not in [101, 10065, 10051]:
|
elif ex.errno not in E_UNREACH:
|
||||||
self.log("tcpsrv", "route lookup failed; err {}".format(ex.errno))
|
self.log("tcpsrv", "route lookup failed; err {}".format(ex.errno))
|
||||||
|
|
||||||
s.close()
|
s.close()
|
||||||
|
return ret
|
||||||
for lip in listen_ips:
|
|
||||||
if default_route and lip in ["0.0.0.0", default_route]:
|
|
||||||
desc = "\033[32mexternal"
|
|
||||||
try:
|
|
||||||
eps[default_route] += ", " + desc
|
|
||||||
except:
|
|
||||||
eps[default_route] = desc
|
|
||||||
|
|
||||||
return eps
|
|
||||||
|
|
||||||
def _set_wintitle(self, vs: dict[str, dict[str, int]]) -> None:
|
def _set_wintitle(self, vs: dict[str, dict[str, int]]) -> None:
|
||||||
vs["all"] = vs.get("all", {"Local-Only": 1})
|
vs["all"] = vs.get("all", {"Local-Only": 1})
|
||||||
@@ -330,19 +365,108 @@ class TcpSrv(object):
|
|||||||
|
|
||||||
vs2 = {}
|
vs2 = {}
|
||||||
for k, eps in vs.items():
|
for k, eps in vs.items():
|
||||||
vs2[k] = {
|
filt = {ep: 1 for ep in eps if ":" not in ep}
|
||||||
ep: 1
|
have = set(filt)
|
||||||
for ep in eps.keys()
|
for ep in sorted(eps):
|
||||||
if ":" not in ep or ep.split(":")[0] not in eps
|
ip = ep.split(":")[0]
|
||||||
}
|
if ip not in have:
|
||||||
|
have.add(ip)
|
||||||
|
filt[ep] = 1
|
||||||
|
|
||||||
|
lo = [x for x in filt if x.startswith("127.")]
|
||||||
|
if len(filt) > 3 and lo:
|
||||||
|
for ip in lo:
|
||||||
|
filt.pop(ip)
|
||||||
|
|
||||||
|
vs2[k] = filt
|
||||||
|
|
||||||
title = ""
|
title = ""
|
||||||
vs = vs2
|
vs = vs2
|
||||||
for p in self.args.wintitle.split(" "):
|
for p in self.args.wintitle.split(" "):
|
||||||
if p.startswith("$"):
|
if p.startswith("$"):
|
||||||
p = " and ".join(sorted(vs.get(p[1:], {"(None)": 1}).keys()))
|
seps = list(sorted(vs.get(p[1:], {"(None)": 1}).keys()))
|
||||||
|
p = ", ".join(seps[:3])
|
||||||
|
if len(seps) > 3:
|
||||||
|
p += ", ..."
|
||||||
|
|
||||||
title += "{} ".format(p)
|
title += "{} ".format(p)
|
||||||
|
|
||||||
print("\033]0;{}\033\\".format(title), file=sys.stderr, end="")
|
print("\033]0;{}\033\\\n".format(title), file=sys.stderr, end="")
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
def _qr(self, t1: dict[str, list[int]], t2: dict[str, list[int]]) -> str:
|
||||||
|
ip = None
|
||||||
|
ips = list(t1) + list(t2)
|
||||||
|
qri = self.args.qri
|
||||||
|
if self.args.zm and not qri:
|
||||||
|
name = self.args.name + ".local"
|
||||||
|
t1[name] = next(v for v in (t1 or t2).values())
|
||||||
|
ips = [name] + ips
|
||||||
|
|
||||||
|
for ip in ips:
|
||||||
|
if ip.startswith(qri) or qri == ".":
|
||||||
|
break
|
||||||
|
ip = ""
|
||||||
|
|
||||||
|
if not ip:
|
||||||
|
# maybe /bin/ip is missing or smth
|
||||||
|
ip = qri
|
||||||
|
|
||||||
|
if not ip:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
if ":" in ip:
|
||||||
|
ip = "[{}]".format(ip)
|
||||||
|
|
||||||
|
if self.args.http_only:
|
||||||
|
https = ""
|
||||||
|
elif self.args.https_only:
|
||||||
|
https = "s"
|
||||||
|
else:
|
||||||
|
https = "s" if self.args.qrs else ""
|
||||||
|
|
||||||
|
ports = t1.get(ip, t2.get(ip, []))
|
||||||
|
dport = 443 if https else 80
|
||||||
|
port = "" if dport in ports or not ports else ":{}".format(ports[0])
|
||||||
|
txt = "http{}://{}{}/{}".format(https, ip, port, self.args.qrl)
|
||||||
|
|
||||||
|
btxt = txt.encode("utf-8")
|
||||||
|
if PY2:
|
||||||
|
btxt = sunpack(b"B" * len(btxt), btxt)
|
||||||
|
|
||||||
|
fg = self.args.qr_fg
|
||||||
|
bg = self.args.qr_bg
|
||||||
|
pad = self.args.qrp
|
||||||
|
zoom = self.args.qrz
|
||||||
|
qrc = QrCode.encode_binary(btxt)
|
||||||
|
if zoom == 0:
|
||||||
|
try:
|
||||||
|
tw, th = termsize()
|
||||||
|
tsz = min(tw // 2, th)
|
||||||
|
zoom = 1 if qrc.size + pad * 2 >= tsz else 2
|
||||||
|
except:
|
||||||
|
zoom = 1
|
||||||
|
|
||||||
|
qr = qrc.render(zoom, pad)
|
||||||
|
if not VT100:
|
||||||
|
return "{}\n{}".format(txt, qr)
|
||||||
|
|
||||||
|
halfc = "\033[40;48;5;{0}m{1}\033[47;48;5;{2}m"
|
||||||
|
if not fg:
|
||||||
|
halfc = "\033[0;40m{1}\033[0;47m"
|
||||||
|
|
||||||
|
def ansify(m: re.Match) -> str:
|
||||||
|
return halfc.format(fg, " " * len(m.group(1)), bg)
|
||||||
|
|
||||||
|
if zoom > 1:
|
||||||
|
qr = re.sub("(█+)", ansify, qr)
|
||||||
|
|
||||||
|
qr = qr.replace("\n", "\033[K\n") + "\033[K" # win10do
|
||||||
|
cc = " \033[0;38;5;{0};47;48;5;{1}m" if fg else " \033[0;30;47m"
|
||||||
|
t = cc + "\n{2}\033[999G\033[0m\033[J"
|
||||||
|
t = t.format(fg, bg, qr)
|
||||||
|
if ANYWIN:
|
||||||
|
# prevent color loss on terminal resize
|
||||||
|
t = t.replace("\n", "`\n`")
|
||||||
|
|
||||||
|
return txt + t
|
||||||
|
|||||||
@@ -9,10 +9,8 @@ from .bos import bos
|
|||||||
from .th_srv import HAVE_WEBP, thumb_path
|
from .th_srv import HAVE_WEBP, thumb_path
|
||||||
from .util import Cooldown
|
from .util import Cooldown
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .httpsrv import HttpSrv
|
from .httpsrv import HttpSrv
|
||||||
@@ -30,6 +28,8 @@ class ThumbCli(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
c = hsrv.th_cfg
|
c = hsrv.th_cfg
|
||||||
|
if not c:
|
||||||
|
raise Exception()
|
||||||
except:
|
except:
|
||||||
c = {k: {} for k in ["thumbable", "pil", "vips", "ffi", "ffv", "ffa"]}
|
c = {k: {} for k in ["thumbable", "pil", "vips", "ffi", "ffv", "ffa"]}
|
||||||
|
|
||||||
|
|||||||
@@ -14,12 +14,20 @@ from queue import Queue
|
|||||||
from .__init__ import TYPE_CHECKING
|
from .__init__ import TYPE_CHECKING
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE, ffprobe
|
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE, ffprobe
|
||||||
from .util import BytesIO, Cooldown, Pebkac, fsenc, min_ex, runcmd, statdir, vsplit
|
from .util import (
|
||||||
|
BytesIO,
|
||||||
|
Cooldown,
|
||||||
|
Daemon,
|
||||||
|
Pebkac,
|
||||||
|
fsenc,
|
||||||
|
min_ex,
|
||||||
|
runcmd,
|
||||||
|
statdir,
|
||||||
|
vsplit,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
@@ -106,11 +114,7 @@ class ThumbSrv(object):
|
|||||||
|
|
||||||
self.q: Queue[Optional[tuple[str, str]]] = Queue(self.nthr * 4)
|
self.q: Queue[Optional[tuple[str, str]]] = Queue(self.nthr * 4)
|
||||||
for n in range(self.nthr):
|
for n in range(self.nthr):
|
||||||
thr = threading.Thread(
|
Daemon(self.worker, "thumb-{}-{}".format(n, self.nthr))
|
||||||
target=self.worker, name="thumb-{}-{}".format(n, self.nthr)
|
|
||||||
)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
want_ff = not self.args.no_vthumb or not self.args.no_athumb
|
want_ff = not self.args.no_vthumb or not self.args.no_athumb
|
||||||
if want_ff and (not HAVE_FFMPEG or not HAVE_FFPROBE):
|
if want_ff and (not HAVE_FFMPEG or not HAVE_FFPROBE):
|
||||||
@@ -126,9 +130,7 @@ class ThumbSrv(object):
|
|||||||
self.log(msg, c=3)
|
self.log(msg, c=3)
|
||||||
|
|
||||||
if self.args.th_clean:
|
if self.args.th_clean:
|
||||||
t = threading.Thread(target=self.cleaner, name="thumb.cln")
|
Daemon(self.cleaner, "thumb.cln")
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
self.fmt_pil, self.fmt_vips, self.fmt_ffi, self.fmt_ffv, self.fmt_ffa = [
|
self.fmt_pil, self.fmt_vips, self.fmt_ffi, self.fmt_ffv, self.fmt_ffa = [
|
||||||
set(y.split(","))
|
set(y.split(","))
|
||||||
@@ -269,7 +271,7 @@ class ThumbSrv(object):
|
|||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = "{} could not create thumbnail of {}\n{}"
|
msg = "{} could not create thumbnail of {}\n{}"
|
||||||
msg = msg.format(fun.__name__, abspath, min_ex())
|
msg = msg.format(fun.__name__, abspath, min_ex())
|
||||||
c: Union[str, int] = 1 if "<Signals.SIG" in msg else "1;30"
|
c: Union[str, int] = 1 if "<Signals.SIG" in msg else "90"
|
||||||
self.log(msg, c)
|
self.log(msg, c)
|
||||||
if getattr(ex, "returncode", 0) != 321:
|
if getattr(ex, "returncode", 0) != 321:
|
||||||
with open(tpath, "wb") as _:
|
with open(tpath, "wb") as _:
|
||||||
@@ -323,7 +325,7 @@ class ThumbSrv(object):
|
|||||||
try:
|
try:
|
||||||
im = self.fancy_pillow(im)
|
im = self.fancy_pillow(im)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.log("fancy_pillow {}".format(ex), "1;30")
|
self.log("fancy_pillow {}".format(ex), "90")
|
||||||
im.thumbnail(self.res)
|
im.thumbnail(self.res)
|
||||||
|
|
||||||
fmts = ["RGB", "L"]
|
fmts = ["RGB", "L"]
|
||||||
@@ -423,7 +425,7 @@ class ThumbSrv(object):
|
|||||||
if not ret:
|
if not ret:
|
||||||
return
|
return
|
||||||
|
|
||||||
c: Union[str, int] = "1;30"
|
c: Union[str, int] = "90"
|
||||||
t = "FFmpeg failed (probably a corrupt video file):\n"
|
t = "FFmpeg failed (probably a corrupt video file):\n"
|
||||||
if (
|
if (
|
||||||
(not self.args.th_ff_jpg or time.time() - int(self.args.th_ff_jpg) < 60)
|
(not self.args.th_ff_jpg or time.time() - int(self.args.th_ff_jpg) < 60)
|
||||||
@@ -621,7 +623,7 @@ class ThumbSrv(object):
|
|||||||
|
|
||||||
def _clean(self, cat: str, thumbpath: str) -> int:
|
def _clean(self, cat: str, thumbpath: str) -> int:
|
||||||
# self.log("cln {}".format(thumbpath))
|
# self.log("cln {}".format(thumbpath))
|
||||||
exts = ["jpg", "webp"] if cat == "th" else ["opus", "caf"]
|
exts = ["jpg", "webp", "png"] if cat == "th" else ["opus", "caf"]
|
||||||
maxage = getattr(self.args, cat + "_maxage")
|
maxage = getattr(self.args, cat + "_maxage")
|
||||||
now = time.time()
|
now = time.time()
|
||||||
prev_b64 = None
|
prev_b64 = None
|
||||||
|
|||||||
@@ -11,7 +11,16 @@ from operator import itemgetter
|
|||||||
from .__init__ import ANYWIN, TYPE_CHECKING, unicode
|
from .__init__ import ANYWIN, TYPE_CHECKING, unicode
|
||||||
from .bos import bos
|
from .bos import bos
|
||||||
from .up2k import up2k_wark_from_hashlist
|
from .up2k import up2k_wark_from_hashlist
|
||||||
from .util import HAVE_SQLITE3, Pebkac, absreal, gen_filekey, min_ex, quotep, s3dec
|
from .util import (
|
||||||
|
HAVE_SQLITE3,
|
||||||
|
Daemon,
|
||||||
|
Pebkac,
|
||||||
|
absreal,
|
||||||
|
gen_filekey,
|
||||||
|
min_ex,
|
||||||
|
quotep,
|
||||||
|
s3dec,
|
||||||
|
)
|
||||||
|
|
||||||
if HAVE_SQLITE3:
|
if HAVE_SQLITE3:
|
||||||
import sqlite3
|
import sqlite3
|
||||||
@@ -21,10 +30,8 @@ try:
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .httpconn import HttpConn
|
from .httpconn import HttpConn
|
||||||
@@ -90,14 +97,17 @@ class U2idx(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
cur = None
|
cur = None
|
||||||
if ANYWIN:
|
if ANYWIN and not bos.path.exists(db_path + "-wal"):
|
||||||
uri = ""
|
uri = ""
|
||||||
try:
|
try:
|
||||||
uri = "{}?mode=ro&nolock=1".format(Path(db_path).as_uri())
|
uri = "{}?mode=ro&nolock=1".format(Path(db_path).as_uri())
|
||||||
cur = sqlite3.connect(uri, 2, uri=True).cursor()
|
cur = sqlite3.connect(uri, 2, uri=True).cursor()
|
||||||
|
cur.execute('pragma table_info("up")').fetchone()
|
||||||
self.log("ro: {}".format(db_path))
|
self.log("ro: {}".format(db_path))
|
||||||
except:
|
except:
|
||||||
self.log("could not open read-only: {}\n{}".format(uri, min_ex()))
|
self.log("could not open read-only: {}\n{}".format(uri, min_ex()))
|
||||||
|
# may not fail until the pragma so unset it
|
||||||
|
cur = None
|
||||||
|
|
||||||
if not cur:
|
if not cur:
|
||||||
# on windows, this steals the write-lock from up2k.deferred_init --
|
# on windows, this steals the write-lock from up2k.deferred_init --
|
||||||
@@ -190,7 +200,7 @@ class U2idx(object):
|
|||||||
v = "exists(select 1 from mt where mt.w = mtw and " + vq
|
v = "exists(select 1 from mt where mt.w = mtw and " + vq
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise Pebkac(400, "invalid key [" + v + "]")
|
raise Pebkac(400, "invalid key [{}]".format(v))
|
||||||
|
|
||||||
q += v + " "
|
q += v + " "
|
||||||
continue
|
continue
|
||||||
@@ -270,16 +280,7 @@ class U2idx(object):
|
|||||||
self.active_id = "{:.6f}_{}".format(
|
self.active_id = "{:.6f}_{}".format(
|
||||||
time.time(), threading.current_thread().ident
|
time.time(), threading.current_thread().ident
|
||||||
)
|
)
|
||||||
thr = threading.Thread(
|
Daemon(self.terminator, "u2idx-terminator", (self.active_id, done_flag))
|
||||||
target=self.terminator,
|
|
||||||
args=(
|
|
||||||
self.active_id,
|
|
||||||
done_flag,
|
|
||||||
),
|
|
||||||
name="u2idx-terminator",
|
|
||||||
)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
if not uq or not uv:
|
if not uq or not uv:
|
||||||
uq = "select * from up"
|
uq = "select * from up"
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
from __future__ import print_function, unicode_literals
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
import errno
|
||||||
import gzip
|
import gzip
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
@@ -28,6 +29,7 @@ from .mtag import MParser, MTag
|
|||||||
from .util import (
|
from .util import (
|
||||||
HAVE_SQLITE3,
|
HAVE_SQLITE3,
|
||||||
SYMTIME,
|
SYMTIME,
|
||||||
|
Daemon,
|
||||||
MTHash,
|
MTHash,
|
||||||
Pebkac,
|
Pebkac,
|
||||||
ProgressPrinter,
|
ProgressPrinter,
|
||||||
@@ -36,6 +38,7 @@ from .util import (
|
|||||||
db_ex_chk,
|
db_ex_chk,
|
||||||
djoin,
|
djoin,
|
||||||
fsenc,
|
fsenc,
|
||||||
|
hidedir,
|
||||||
min_ex,
|
min_ex,
|
||||||
quotep,
|
quotep,
|
||||||
ren_open,
|
ren_open,
|
||||||
@@ -58,10 +61,8 @@ if HAVE_SQLITE3:
|
|||||||
|
|
||||||
DB_VER = 5
|
DB_VER = 5
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
from typing import Any, Optional, Pattern, Union
|
from typing import Any, Optional, Pattern, Union
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .svchub import SvcHub
|
from .svchub import SvcHub
|
||||||
@@ -152,9 +153,8 @@ class Up2k(object):
|
|||||||
if ANYWIN:
|
if ANYWIN:
|
||||||
# usually fails to set lastmod too quickly
|
# usually fails to set lastmod too quickly
|
||||||
self.lastmod_q: list[tuple[str, int, tuple[int, int], bool]] = []
|
self.lastmod_q: list[tuple[str, int, tuple[int, int], bool]] = []
|
||||||
thr = threading.Thread(target=self._lastmodder, name="up2k-lastmod")
|
self.lastmod_q2 = self.lastmod_q[:]
|
||||||
thr.daemon = True
|
Daemon(self._lastmodder, "up2k-lastmod")
|
||||||
thr.start()
|
|
||||||
|
|
||||||
self.fstab = Fstab(self.log_func)
|
self.fstab = Fstab(self.log_func)
|
||||||
|
|
||||||
@@ -170,9 +170,7 @@ class Up2k(object):
|
|||||||
if self.args.no_fastboot:
|
if self.args.no_fastboot:
|
||||||
return
|
return
|
||||||
|
|
||||||
t = threading.Thread(target=self.deferred_init, name="up2k-deferred-init")
|
Daemon(self.deferred_init, "up2k-deferred-init")
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def reload(self) -> None:
|
def reload(self) -> None:
|
||||||
self.gid += 1
|
self.gid += 1
|
||||||
@@ -184,35 +182,35 @@ class Up2k(object):
|
|||||||
all_vols = self.asrv.vfs.all_vols
|
all_vols = self.asrv.vfs.all_vols
|
||||||
have_e2d = self.init_indexes(all_vols, [])
|
have_e2d = self.init_indexes(all_vols, [])
|
||||||
|
|
||||||
|
if self.stop:
|
||||||
|
# up-mt consistency not guaranteed if init is interrupted;
|
||||||
|
# drop caches for a full scan on next boot
|
||||||
|
self._drop_caches()
|
||||||
|
|
||||||
|
if self.pp:
|
||||||
|
self.pp.end = True
|
||||||
|
self.pp = None
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
if not self.pp and self.args.exit == "idx":
|
if not self.pp and self.args.exit == "idx":
|
||||||
return self.hub.sigterm()
|
return self.hub.sigterm()
|
||||||
|
|
||||||
thr = threading.Thread(target=self._snapshot, name="up2k-snapshot")
|
Daemon(self._snapshot, "up2k-snapshot")
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
if have_e2d:
|
if have_e2d:
|
||||||
thr = threading.Thread(target=self._hasher, name="up2k-hasher")
|
Daemon(self._hasher, "up2k-hasher")
|
||||||
thr.daemon = True
|
Daemon(self._sched_rescan, "up2k-rescan")
|
||||||
thr.start()
|
|
||||||
|
|
||||||
thr = threading.Thread(target=self._sched_rescan, name="up2k-rescan")
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
if self.mtag:
|
if self.mtag:
|
||||||
for n in range(max(1, self.args.mtag_mt)):
|
for n in range(max(1, self.args.mtag_mt)):
|
||||||
name = "tagger-{}".format(n)
|
Daemon(self._tagger, "tagger-{}".format(n))
|
||||||
thr = threading.Thread(target=self._tagger, name=name)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
thr = threading.Thread(target=self._run_all_mtp, name="up2k-mtp-init")
|
Daemon(self._run_all_mtp, "up2k-mtp-init")
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
self.log_func("up2k", msg + "\033[K", c)
|
if self.pp:
|
||||||
|
msg += "\033[K"
|
||||||
|
|
||||||
|
self.log_func("up2k", msg, c)
|
||||||
|
|
||||||
def _block(self, why: str) -> None:
|
def _block(self, why: str) -> None:
|
||||||
self.blocked = why
|
self.blocked = why
|
||||||
@@ -255,13 +253,11 @@ class Up2k(object):
|
|||||||
return "cannot initiate; scan is already in progress"
|
return "cannot initiate; scan is already in progress"
|
||||||
|
|
||||||
args = (all_vols, scan_vols)
|
args = (all_vols, scan_vols)
|
||||||
t = threading.Thread(
|
Daemon(
|
||||||
target=self.init_indexes,
|
self.init_indexes,
|
||||||
args=args,
|
"up2k-rescan-{}".format(scan_vols[0] if scan_vols else "all"),
|
||||||
name="up2k-rescan-{}".format(scan_vols[0] if scan_vols else "all"),
|
args,
|
||||||
)
|
)
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def _sched_rescan(self) -> None:
|
def _sched_rescan(self) -> None:
|
||||||
@@ -371,7 +367,7 @@ class Up2k(object):
|
|||||||
if vp:
|
if vp:
|
||||||
fvp = "{}/{}".format(vp, fvp)
|
fvp = "{}/{}".format(vp, fvp)
|
||||||
|
|
||||||
self._handle_rm(LEELOO_DALLAS, "", fvp)
|
self._handle_rm(LEELOO_DALLAS, "", fvp, [])
|
||||||
nrm += 1
|
nrm += 1
|
||||||
|
|
||||||
if nrm:
|
if nrm:
|
||||||
@@ -480,6 +476,10 @@ class Up2k(object):
|
|||||||
if next((zv for zv in vols if "e2ds" in zv.flags), None):
|
if next((zv for zv in vols if "e2ds" in zv.flags), None):
|
||||||
self._block("indexing")
|
self._block("indexing")
|
||||||
|
|
||||||
|
if self.args.re_dhash:
|
||||||
|
self.args.re_dhash = False
|
||||||
|
self._drop_caches()
|
||||||
|
|
||||||
for vol in vols:
|
for vol in vols:
|
||||||
if self.stop:
|
if self.stop:
|
||||||
break
|
break
|
||||||
@@ -567,6 +567,34 @@ class Up2k(object):
|
|||||||
if self.stop:
|
if self.stop:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
for vol in all_vols.values():
|
||||||
|
if vol.flags["dbd"] == "acid":
|
||||||
|
continue
|
||||||
|
|
||||||
|
reg = self.register_vpath(vol.realpath, vol.flags)
|
||||||
|
try:
|
||||||
|
assert reg
|
||||||
|
cur, db_path = reg
|
||||||
|
if bos.path.getsize(db_path + "-wal") < 1024 * 1024 * 5:
|
||||||
|
continue
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
with self.mutex:
|
||||||
|
cur.execute("pragma wal_checkpoint(truncate)")
|
||||||
|
try:
|
||||||
|
cur.execute("commit") # absolutely necessary! for some reason
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
cur.connection.commit() # this one maybe not
|
||||||
|
except Exception as ex:
|
||||||
|
self.log("checkpoint failed: {}".format(ex), 3)
|
||||||
|
|
||||||
|
if self.stop:
|
||||||
|
return False
|
||||||
|
|
||||||
self.pp.end = True
|
self.pp.end = True
|
||||||
|
|
||||||
msg = "{} volumes in {:.2f} sec"
|
msg = "{} volumes in {:.2f} sec"
|
||||||
@@ -580,7 +608,7 @@ class Up2k(object):
|
|||||||
if self.mtag:
|
if self.mtag:
|
||||||
t = "online (running mtp)"
|
t = "online (running mtp)"
|
||||||
if scan_vols:
|
if scan_vols:
|
||||||
thr = threading.Thread(target=self._run_all_mtp, name="up2k-mtp-scan")
|
thr = Daemon(self._run_all_mtp, "up2k-mtp-scan", r=False)
|
||||||
else:
|
else:
|
||||||
self.pp = None
|
self.pp = None
|
||||||
t = "online, idle"
|
t = "online, idle"
|
||||||
@@ -589,7 +617,6 @@ class Up2k(object):
|
|||||||
self.volstate[vol.vpath] = t
|
self.volstate[vol.vpath] = t
|
||||||
|
|
||||||
if thr:
|
if thr:
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
thr.start()
|
||||||
|
|
||||||
return have_e2d
|
return have_e2d
|
||||||
@@ -613,7 +640,7 @@ class Up2k(object):
|
|||||||
|
|
||||||
ft = "\033[0;32m{}{:.0}"
|
ft = "\033[0;32m{}{:.0}"
|
||||||
ff = "\033[0;35m{}{:.0}"
|
ff = "\033[0;35m{}{:.0}"
|
||||||
fv = "\033[0;36m{}:\033[1;30m{}"
|
fv = "\033[0;36m{}:\033[90m{}"
|
||||||
fx = set(("html_head",))
|
fx = set(("html_head",))
|
||||||
a = [
|
a = [
|
||||||
(ft if v is True else ff if v is False else fv).format(k, str(v))
|
(ft if v is True else ff if v is False else fv).format(k, str(v))
|
||||||
@@ -672,11 +699,45 @@ class Up2k(object):
|
|||||||
if not HAVE_SQLITE3 or "e2d" not in flags or "d2d" in flags:
|
if not HAVE_SQLITE3 or "e2d" not in flags or "d2d" in flags:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
bos.makedirs(histpath)
|
if bos.makedirs(histpath):
|
||||||
|
hidedir(histpath)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cur = self._open_db(db_path)
|
cur = self._open_db(db_path)
|
||||||
self.cur[ptop] = cur
|
self.cur[ptop] = cur
|
||||||
|
|
||||||
|
# speeds measured uploading 520 small files on a WD20SPZX (SMR 2.5" 5400rpm 4kb)
|
||||||
|
dbd = flags["dbd"]
|
||||||
|
if dbd == "acid":
|
||||||
|
# 217.5s; python-defaults
|
||||||
|
zs = "delete"
|
||||||
|
sync = "full"
|
||||||
|
elif dbd == "swal":
|
||||||
|
# 88.0s; still 99.9% safe (can lose a bit of on OS crash)
|
||||||
|
zs = "wal"
|
||||||
|
sync = "full"
|
||||||
|
elif dbd == "yolo":
|
||||||
|
# 2.7s; may lose entire db on OS crash
|
||||||
|
zs = "wal"
|
||||||
|
sync = "off"
|
||||||
|
else:
|
||||||
|
# 4.1s; corruption-safe but more likely to lose wal
|
||||||
|
zs = "wal"
|
||||||
|
sync = "normal"
|
||||||
|
|
||||||
|
try:
|
||||||
|
amode = cur.execute("pragma journal_mode=" + zs).fetchone()[0]
|
||||||
|
if amode.lower() != zs.lower():
|
||||||
|
t = "sqlite failed to set journal_mode {}; got {}"
|
||||||
|
raise Exception(t.format(zs, amode))
|
||||||
|
except Exception as ex:
|
||||||
|
if sync != "off":
|
||||||
|
sync = "full"
|
||||||
|
t = "reverting to sync={} because {}"
|
||||||
|
self.log(t.format(sync, ex))
|
||||||
|
|
||||||
|
cur.execute("pragma synchronous=" + sync)
|
||||||
|
cur.connection.commit()
|
||||||
return cur, db_path
|
return cur, db_path
|
||||||
except:
|
except:
|
||||||
msg = "cannot use database at [{}]:\n{}"
|
msg = "cannot use database at [{}]:\n{}"
|
||||||
@@ -956,6 +1017,7 @@ class Up2k(object):
|
|||||||
if n:
|
if n:
|
||||||
t = "forgetting {} shadowed autoindexed files in [{}] > [{}]"
|
t = "forgetting {} shadowed autoindexed files in [{}] > [{}]"
|
||||||
self.log(t.format(n, top, sh_rd))
|
self.log(t.format(n, top, sh_rd))
|
||||||
|
assert sh_erd
|
||||||
|
|
||||||
q = "delete from dh where (d = ? or d like ?||'%')"
|
q = "delete from dh where (d = ? or d like ?||'%')"
|
||||||
db.c.execute(q, (sh_erd, sh_erd + "/"))
|
db.c.execute(q, (sh_erd, sh_erd + "/"))
|
||||||
@@ -1196,6 +1258,18 @@ class Up2k(object):
|
|||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def _drop_caches(self) -> None:
|
||||||
|
self.log("dropping caches for a full filesystem scan")
|
||||||
|
for vol in self.asrv.vfs.all_vols.values():
|
||||||
|
reg = self.register_vpath(vol.realpath, vol.flags)
|
||||||
|
if not reg:
|
||||||
|
continue
|
||||||
|
|
||||||
|
cur, _ = reg
|
||||||
|
self._set_tagscan(cur, True)
|
||||||
|
cur.execute("delete from dh")
|
||||||
|
cur.connection.commit()
|
||||||
|
|
||||||
def _set_tagscan(self, cur: "sqlite3.Cursor", need: bool) -> bool:
|
def _set_tagscan(self, cur: "sqlite3.Cursor", need: bool) -> bool:
|
||||||
if self.args.no_dhash:
|
if self.args.no_dhash:
|
||||||
return False
|
return False
|
||||||
@@ -1440,7 +1514,7 @@ class Up2k(object):
|
|||||||
|
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "parsers for {}: \033[0m{}"
|
t = "parsers for {}: \033[0m{}"
|
||||||
self.log(t.format(ptop, list(parsers.keys())), "1;30")
|
self.log(t.format(ptop, list(parsers.keys())), "90")
|
||||||
|
|
||||||
self.mtp_parsers[ptop] = parsers
|
self.mtp_parsers[ptop] = parsers
|
||||||
|
|
||||||
@@ -1571,7 +1645,7 @@ class Up2k(object):
|
|||||||
all_parsers = self.mtp_parsers[ptop]
|
all_parsers = self.mtp_parsers[ptop]
|
||||||
except:
|
except:
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
self.log("no mtp defined for {}".format(ptop), "1;30")
|
self.log("no mtp defined for {}".format(ptop), "90")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
entags = self.entags[ptop]
|
entags = self.entags[ptop]
|
||||||
@@ -1582,14 +1656,14 @@ class Up2k(object):
|
|||||||
# is audio, require non-audio?
|
# is audio, require non-audio?
|
||||||
if v.audio == "n":
|
if v.audio == "n":
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "skip mtp {}; is no-audio, have audio"
|
t = "skip mtp {}; want no-audio, got audio"
|
||||||
self.log(t.format(k), "1;30")
|
self.log(t.format(k), "90")
|
||||||
continue
|
continue
|
||||||
# is not audio, require audio?
|
# is not audio, require audio?
|
||||||
elif v.audio == "y":
|
elif v.audio == "y":
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "skip mtp {}; is audio, have no-audio"
|
t = "skip mtp {}; want audio, got no-audio"
|
||||||
self.log(t.format(k), "1;30")
|
self.log(t.format(k), "90")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if v.ext:
|
if v.ext:
|
||||||
@@ -1601,8 +1675,8 @@ class Up2k(object):
|
|||||||
|
|
||||||
if not match:
|
if not match:
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "skip mtp {}; need file-ext {}, have {}"
|
t = "skip mtp {}; want file-ext {}, got {}"
|
||||||
self.log(t.format(k, v.ext, abspath.rsplit(".")[-1]), "1;30")
|
self.log(t.format(k, v.ext, abspath.rsplit(".")[-1]), "90")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
parsers[k] = v
|
parsers[k] = v
|
||||||
@@ -1621,11 +1695,7 @@ class Up2k(object):
|
|||||||
|
|
||||||
mpool: Queue[Mpqe] = Queue(nw)
|
mpool: Queue[Mpqe] = Queue(nw)
|
||||||
for _ in range(nw):
|
for _ in range(nw):
|
||||||
thr = threading.Thread(
|
Daemon(self._tag_thr, "up2k-mpool", (mpool,))
|
||||||
target=self._tag_thr, args=(mpool,), name="up2k-mpool"
|
|
||||||
)
|
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
return mpool
|
return mpool
|
||||||
|
|
||||||
@@ -1650,13 +1720,13 @@ class Up2k(object):
|
|||||||
if not qe.mtp:
|
if not qe.mtp:
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "tag-thr: {}({})"
|
t = "tag-thr: {}({})"
|
||||||
self.log(t.format(self.mtag.backend, qe.abspath), "1;30")
|
self.log(t.format(self.mtag.backend, qe.abspath), "90")
|
||||||
|
|
||||||
tags = self.mtag.get(qe.abspath)
|
tags = self.mtag.get(qe.abspath)
|
||||||
else:
|
else:
|
||||||
if self.args.mtag_vv:
|
if self.args.mtag_vv:
|
||||||
t = "tag-thr: {}({})"
|
t = "tag-thr: {}({})"
|
||||||
self.log(t.format(list(qe.mtp.keys()), qe.abspath), "1;30")
|
self.log(t.format(list(qe.mtp.keys()), qe.abspath), "90")
|
||||||
|
|
||||||
tags = self.mtag.get_bin(qe.mtp, qe.abspath, qe.oth_tags)
|
tags = self.mtag.get_bin(qe.mtp, qe.abspath, qe.oth_tags)
|
||||||
vtags = [
|
vtags = [
|
||||||
@@ -1748,9 +1818,13 @@ class Up2k(object):
|
|||||||
self._set_tagscan(write_cur, True)
|
self._set_tagscan(write_cur, True)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def _trace(self, msg: str) -> None:
|
||||||
|
self.log("ST: {}".format(msg))
|
||||||
|
|
||||||
def _orz(self, db_path: str) -> "sqlite3.Cursor":
|
def _orz(self, db_path: str) -> "sqlite3.Cursor":
|
||||||
return sqlite3.connect(db_path, self.timeout, check_same_thread=False).cursor()
|
c = sqlite3.connect(db_path, self.timeout, check_same_thread=False).cursor()
|
||||||
# x.set_trace_callback(trace)
|
# c.connection.set_trace_callback(self._trace)
|
||||||
|
return c
|
||||||
|
|
||||||
def _open_db(self, db_path: str) -> "sqlite3.Cursor":
|
def _open_db(self, db_path: str) -> "sqlite3.Cursor":
|
||||||
existed = bos.path.exists(db_path)
|
existed = bos.path.exists(db_path)
|
||||||
@@ -1922,12 +1996,23 @@ class Up2k(object):
|
|||||||
sprs = self.fstab.get(pdir) != "ng"
|
sprs = self.fstab.get(pdir) != "ng"
|
||||||
|
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
cur = self.cur.get(cj["ptop"])
|
ptop = cj["ptop"]
|
||||||
reg = self.registry[cj["ptop"]]
|
jcur = self.cur.get(ptop)
|
||||||
|
reg = self.registry[ptop]
|
||||||
vfs = self.asrv.vfs.all_vols[cj["vtop"]]
|
vfs = self.asrv.vfs.all_vols[cj["vtop"]]
|
||||||
n4g = vfs.flags.get("noforget")
|
n4g = vfs.flags.get("noforget")
|
||||||
lost: list[tuple[str, str]] = []
|
lost: list[tuple["sqlite3.Cursor", str, str]] = []
|
||||||
if cur:
|
|
||||||
|
vols = [(ptop, jcur)] if jcur else []
|
||||||
|
if vfs.flags.get("xlink"):
|
||||||
|
vols += [(k, v) for k, v in self.cur.items() if k != ptop]
|
||||||
|
|
||||||
|
alts: list[tuple[int, int, dict[str, Any]]] = []
|
||||||
|
for ptop, cur in vols:
|
||||||
|
allv = self.asrv.vfs.all_vols
|
||||||
|
cvfs = next((v for v in allv.values() if v.realpath == ptop), vfs)
|
||||||
|
vtop = cj["vtop"] if cur == jcur else cvfs.vpath
|
||||||
|
|
||||||
if self.no_expr_idx:
|
if self.no_expr_idx:
|
||||||
q = r"select * from up where w = ?"
|
q = r"select * from up where w = ?"
|
||||||
argv = [wark]
|
argv = [wark]
|
||||||
@@ -1935,13 +2020,12 @@ class Up2k(object):
|
|||||||
q = r"select * from up where substr(w,1,16) = ? and w = ?"
|
q = r"select * from up where substr(w,1,16) = ? and w = ?"
|
||||||
argv = [wark[:16], wark]
|
argv = [wark[:16], wark]
|
||||||
|
|
||||||
alts: list[tuple[int, int, dict[str, Any]]] = []
|
c2 = cur.execute(q, tuple(argv))
|
||||||
cur = cur.execute(q, tuple(argv))
|
for _, dtime, dsize, dp_dir, dp_fn, ip, at in c2:
|
||||||
for _, dtime, dsize, dp_dir, dp_fn, ip, at in cur:
|
|
||||||
if dp_dir.startswith("//") or dp_fn.startswith("//"):
|
if dp_dir.startswith("//") or dp_fn.startswith("//"):
|
||||||
dp_dir, dp_fn = s3dec(dp_dir, dp_fn)
|
dp_dir, dp_fn = s3dec(dp_dir, dp_fn)
|
||||||
|
|
||||||
dp_abs = "/".join([cj["ptop"], dp_dir, dp_fn])
|
dp_abs = "/".join([ptop, dp_dir, dp_fn])
|
||||||
try:
|
try:
|
||||||
st = bos.stat(dp_abs)
|
st = bos.stat(dp_abs)
|
||||||
if stat.S_ISLNK(st.st_mode):
|
if stat.S_ISLNK(st.st_mode):
|
||||||
@@ -1951,14 +2035,14 @@ class Up2k(object):
|
|||||||
if n4g:
|
if n4g:
|
||||||
st = os.stat_result((0, -1, -1, 0, 0, 0, 0, 0, 0, 0))
|
st = os.stat_result((0, -1, -1, 0, 0, 0, 0, 0, 0, 0))
|
||||||
else:
|
else:
|
||||||
lost.append((dp_dir, dp_fn))
|
lost.append((cur, dp_dir, dp_fn))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
j = {
|
j = {
|
||||||
"name": dp_fn,
|
"name": dp_fn,
|
||||||
"prel": dp_dir,
|
"prel": dp_dir,
|
||||||
"vtop": cj["vtop"],
|
"vtop": vtop,
|
||||||
"ptop": cj["ptop"],
|
"ptop": ptop,
|
||||||
"sprs": sprs, # dontcare; finished anyways
|
"sprs": sprs, # dontcare; finished anyways
|
||||||
"size": dsize,
|
"size": dsize,
|
||||||
"lmod": dtime,
|
"lmod": dtime,
|
||||||
@@ -1985,14 +2069,27 @@ class Up2k(object):
|
|||||||
del reg[wark]
|
del reg[wark]
|
||||||
|
|
||||||
if lost:
|
if lost:
|
||||||
for dp_dir, dp_fn in lost:
|
c2 = None
|
||||||
|
for cur, dp_dir, dp_fn in lost:
|
||||||
self.db_rm(cur, dp_dir, dp_fn)
|
self.db_rm(cur, dp_dir, dp_fn)
|
||||||
|
if c2 and c2 != cur:
|
||||||
|
c2.connection.commit()
|
||||||
|
|
||||||
cur.connection.commit()
|
c2 = cur
|
||||||
|
|
||||||
|
assert c2
|
||||||
|
c2.connection.commit()
|
||||||
|
|
||||||
|
cur = jcur
|
||||||
|
ptop = None # use cj or job as appropriate
|
||||||
|
|
||||||
if job or wark in reg:
|
if job or wark in reg:
|
||||||
job = job or reg[wark]
|
job = job or reg[wark]
|
||||||
if job["prel"] == cj["prel"] and job["name"] == cj["name"]:
|
if (
|
||||||
|
job["ptop"] == cj["ptop"]
|
||||||
|
and job["prel"] == cj["prel"]
|
||||||
|
and job["name"] == cj["name"]
|
||||||
|
):
|
||||||
# ensure the files haven't been deleted manually
|
# ensure the files haven't been deleted manually
|
||||||
names = [job[x] for x in ["name", "tnam"] if x in job]
|
names = [job[x] for x in ["name", "tnam"] if x in job]
|
||||||
for fn in names:
|
for fn in names:
|
||||||
@@ -2026,13 +2123,13 @@ class Up2k(object):
|
|||||||
except:
|
except:
|
||||||
self.dupesched[src] = [dupe]
|
self.dupesched[src] = [dupe]
|
||||||
|
|
||||||
raise Pebkac(400, err)
|
raise Pebkac(422, err)
|
||||||
|
|
||||||
elif "nodupe" in self.flags[job["ptop"]]:
|
elif "nodupe" in self.flags[cj["ptop"]]:
|
||||||
self.log("dupe-reject:\n {0}\n {1}".format(src, dst))
|
self.log("dupe-reject:\n {0}\n {1}".format(src, dst))
|
||||||
err = "upload rejected, file already exists:\n"
|
err = "upload rejected, file already exists:\n"
|
||||||
err += "/" + quotep(vsrc) + " "
|
err += "/" + quotep(vsrc) + " "
|
||||||
raise Pebkac(400, err)
|
raise Pebkac(409, err)
|
||||||
else:
|
else:
|
||||||
# symlink to the client-provided name,
|
# symlink to the client-provided name,
|
||||||
# returning the previous upload info
|
# returning the previous upload info
|
||||||
@@ -2041,7 +2138,7 @@ class Up2k(object):
|
|||||||
job[k] = cj[k]
|
job[k] = cj[k]
|
||||||
|
|
||||||
pdir = djoin(cj["ptop"], cj["prel"])
|
pdir = djoin(cj["ptop"], cj["prel"])
|
||||||
job["name"] = self._untaken(pdir, cj["name"], now, cj["addr"])
|
job["name"] = self._untaken(pdir, cj, now)
|
||||||
dst = os.path.join(job["ptop"], job["prel"], job["name"])
|
dst = os.path.join(job["ptop"], job["prel"], job["name"])
|
||||||
if not self.args.nw:
|
if not self.args.nw:
|
||||||
bos.unlink(dst) # TODO ed pls
|
bos.unlink(dst) # TODO ed pls
|
||||||
@@ -2088,7 +2185,7 @@ class Up2k(object):
|
|||||||
]:
|
]:
|
||||||
job[k] = cj[k]
|
job[k] = cj[k]
|
||||||
|
|
||||||
for k in ["life"]:
|
for k in ["life", "replace"]:
|
||||||
if k in cj:
|
if k in cj:
|
||||||
job[k] = cj[k]
|
job[k] = cj[k]
|
||||||
|
|
||||||
@@ -2120,10 +2217,18 @@ class Up2k(object):
|
|||||||
"wark": wark,
|
"wark": wark,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _untaken(self, fdir: str, fname: str, ts: float, ip: str) -> str:
|
def _untaken(self, fdir: str, job: dict[str, Any], ts: float) -> str:
|
||||||
|
fname = job["name"]
|
||||||
|
ip = job["addr"]
|
||||||
|
|
||||||
if self.args.nw:
|
if self.args.nw:
|
||||||
return fname
|
return fname
|
||||||
|
|
||||||
|
fp = os.path.join(fdir, fname)
|
||||||
|
if job.get("replace") and bos.path.exists(fp):
|
||||||
|
self.log("replacing existing file at {}".format(fp))
|
||||||
|
bos.unlink(fp)
|
||||||
|
|
||||||
if self.args.plain_ip:
|
if self.args.plain_ip:
|
||||||
dip = ip.replace(":", ".")
|
dip = ip.replace(":", ".")
|
||||||
else:
|
else:
|
||||||
@@ -2315,7 +2420,7 @@ class Up2k(object):
|
|||||||
flt = job["life"]
|
flt = job["life"]
|
||||||
vfs = self.asrv.vfs.all_vols[job["vtop"]]
|
vfs = self.asrv.vfs.all_vols[job["vtop"]]
|
||||||
vlt = vfs.flags["lifetime"]
|
vlt = vfs.flags["lifetime"]
|
||||||
if vlt and flt < vlt:
|
if vlt and flt > 1 and flt < vlt:
|
||||||
upt -= vlt - flt
|
upt -= vlt - flt
|
||||||
wake_sr = True
|
wake_sr = True
|
||||||
t = "using client lifetime; at={:.0f} ({}-{})"
|
t = "using client lifetime; at={:.0f} ({}-{})"
|
||||||
@@ -2426,12 +2531,16 @@ class Up2k(object):
|
|||||||
v = (wark, int(ts), sz, rd, fn, ip or "", int(at or 0))
|
v = (wark, int(ts), sz, rd, fn, ip or "", int(at or 0))
|
||||||
db.execute(sql, v)
|
db.execute(sql, v)
|
||||||
|
|
||||||
def handle_rm(self, uname: str, ip: str, vpaths: list[str]) -> str:
|
def handle_rm(self, uname: str, ip: str, vpaths: list[str], lim: list[int]) -> str:
|
||||||
n_files = 0
|
n_files = 0
|
||||||
ok = {}
|
ok = {}
|
||||||
ng = {}
|
ng = {}
|
||||||
for vp in vpaths:
|
for vp in vpaths:
|
||||||
a, b, c = self._handle_rm(uname, ip, vp)
|
if lim and lim[0] <= 0:
|
||||||
|
self.log("hit delete limit of {} files".format(lim[1]), 3)
|
||||||
|
break
|
||||||
|
|
||||||
|
a, b, c = self._handle_rm(uname, ip, vp, lim)
|
||||||
n_files += a
|
n_files += a
|
||||||
for k in b:
|
for k in b:
|
||||||
ok[k] = 1
|
ok[k] = 1
|
||||||
@@ -2445,7 +2554,7 @@ class Up2k(object):
|
|||||||
return "deleted {} files (and {}/{} folders)".format(n_files, iok, iok + ing)
|
return "deleted {} files (and {}/{} folders)".format(n_files, iok, iok + ing)
|
||||||
|
|
||||||
def _handle_rm(
|
def _handle_rm(
|
||||||
self, uname: str, ip: str, vpath: str
|
self, uname: str, ip: str, vpath: str, lim: list[int]
|
||||||
) -> tuple[int, list[str], list[str]]:
|
) -> tuple[int, list[str], list[str]]:
|
||||||
self.db_act = time.time()
|
self.db_act = time.time()
|
||||||
try:
|
try:
|
||||||
@@ -2504,6 +2613,12 @@ class Up2k(object):
|
|||||||
n_files = 0
|
n_files = 0
|
||||||
for dbv, vrem, _, adir, files, rd, vd in g:
|
for dbv, vrem, _, adir, files, rd, vd in g:
|
||||||
for fn in [x[0] for x in files]:
|
for fn in [x[0] for x in files]:
|
||||||
|
if lim:
|
||||||
|
lim[0] -= 1
|
||||||
|
if lim[0] < 0:
|
||||||
|
self.log("hit delete limit of {} files".format(lim[1]), 3)
|
||||||
|
break
|
||||||
|
|
||||||
n_files += 1
|
n_files += 1
|
||||||
abspath = os.path.join(adir, fn)
|
abspath = os.path.join(adir, fn)
|
||||||
volpath = "{}/{}".format(vrem, fn).strip("/")
|
volpath = "{}/{}".format(vrem, fn).strip("/")
|
||||||
@@ -2536,6 +2651,7 @@ class Up2k(object):
|
|||||||
svn, srem = self.asrv.vfs.get(svp, uname, True, False, True)
|
svn, srem = self.asrv.vfs.get(svp, uname, True, False, True)
|
||||||
svn, srem = svn.get_dbv(srem)
|
svn, srem = svn.get_dbv(srem)
|
||||||
sabs = svn.canonical(srem, False)
|
sabs = svn.canonical(srem, False)
|
||||||
|
curs: set["sqlite3.Cursor"] = set()
|
||||||
|
|
||||||
if not srem:
|
if not srem:
|
||||||
raise Pebkac(400, "mv: cannot move a mountpoint")
|
raise Pebkac(400, "mv: cannot move a mountpoint")
|
||||||
@@ -2543,7 +2659,13 @@ class Up2k(object):
|
|||||||
st = bos.lstat(sabs)
|
st = bos.lstat(sabs)
|
||||||
if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
|
if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
return self._mv_file(uname, svp, dvp)
|
try:
|
||||||
|
ret = self._mv_file(uname, svp, dvp, curs)
|
||||||
|
finally:
|
||||||
|
for v in curs:
|
||||||
|
v.connection.commit()
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
jail = svn.get_dbv(srem)[0]
|
jail = svn.get_dbv(srem)[0]
|
||||||
permsets = [[True, False, True]]
|
permsets = [[True, False, True]]
|
||||||
@@ -2562,20 +2684,29 @@ class Up2k(object):
|
|||||||
# the actual check (avoid toctou)
|
# the actual check (avoid toctou)
|
||||||
raise Pebkac(400, "mv: source folder contains other volumes")
|
raise Pebkac(400, "mv: source folder contains other volumes")
|
||||||
|
|
||||||
|
with self.mutex:
|
||||||
|
try:
|
||||||
for fn in files:
|
for fn in files:
|
||||||
|
self.db_act = time.time()
|
||||||
svpf = "/".join(x for x in [dbv.vpath, vrem, fn[0]] if x)
|
svpf = "/".join(x for x in [dbv.vpath, vrem, fn[0]] if x)
|
||||||
if not svpf.startswith(svp + "/"): # assert
|
if not svpf.startswith(svp + "/"): # assert
|
||||||
raise Pebkac(500, "mv: bug at {}, top {}".format(svpf, svp))
|
raise Pebkac(500, "mv: bug at {}, top {}".format(svpf, svp))
|
||||||
|
|
||||||
dvpf = dvp + svpf[len(svp) :]
|
dvpf = dvp + svpf[len(svp) :]
|
||||||
with self.mutex:
|
self._mv_file(uname, svpf, dvpf, curs)
|
||||||
self._mv_file(uname, svpf, dvpf)
|
finally:
|
||||||
|
for v in curs:
|
||||||
|
v.connection.commit()
|
||||||
|
|
||||||
|
curs.clear()
|
||||||
|
|
||||||
rmdirs(self.log_func, scandir, True, sabs, 1)
|
rmdirs(self.log_func, scandir, True, sabs, 1)
|
||||||
rmdirs_up(os.path.dirname(sabs))
|
rmdirs_up(os.path.dirname(sabs))
|
||||||
return "k"
|
return "k"
|
||||||
|
|
||||||
def _mv_file(self, uname: str, svp: str, dvp: str) -> str:
|
def _mv_file(
|
||||||
|
self, uname: str, svp: str, dvp: str, curs: set["sqlite3.Cursor"]
|
||||||
|
) -> str:
|
||||||
svn, srem = self.asrv.vfs.get(svp, uname, True, False, True)
|
svn, srem = self.asrv.vfs.get(svp, uname, True, False, True)
|
||||||
svn, srem = svn.get_dbv(srem)
|
svn, srem = svn.get_dbv(srem)
|
||||||
|
|
||||||
@@ -2633,18 +2764,18 @@ class Up2k(object):
|
|||||||
|
|
||||||
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
|
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
|
||||||
self._relink(w, svn.realpath, srem, dabs)
|
self._relink(w, svn.realpath, srem, dabs)
|
||||||
c1.connection.commit()
|
curs.add(c1)
|
||||||
|
|
||||||
if c2:
|
if c2:
|
||||||
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
|
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
|
||||||
c2.connection.commit()
|
curs.add(c2)
|
||||||
else:
|
else:
|
||||||
self.log("not found in src db: [{}]".format(svp))
|
self.log("not found in src db: [{}]".format(svp))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
atomic_move(sabs, dabs)
|
atomic_move(sabs, dabs)
|
||||||
except OSError as ex:
|
except OSError as ex:
|
||||||
if ex.errno != 18:
|
if ex.errno != errno.EXDEV:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self.log("cross-device move:\n {}\n {}".format(sabs, dabs))
|
self.log("cross-device move:\n {}\n {}".format(sabs, dabs))
|
||||||
@@ -2872,7 +3003,7 @@ class Up2k(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
self.registry[job["ptop"]][job["wark"]] = job
|
self.registry[job["ptop"]][job["wark"]] = job
|
||||||
job["name"] = self._untaken(pdir, job["name"], job["t0"], job["addr"])
|
job["name"] = self._untaken(pdir, job, job["t0"])
|
||||||
# if len(job["name"].split(".")) > 8:
|
# if len(job["name"].split(".")) > 8:
|
||||||
# raise Exception("aaa")
|
# raise Exception("aaa")
|
||||||
|
|
||||||
@@ -2940,11 +3071,11 @@ class Up2k(object):
|
|||||||
|
|
||||||
def _lastmodder(self) -> None:
|
def _lastmodder(self) -> None:
|
||||||
while True:
|
while True:
|
||||||
ready = self.lastmod_q
|
ready = self.lastmod_q2
|
||||||
|
self.lastmod_q2 = self.lastmod_q
|
||||||
self.lastmod_q = []
|
self.lastmod_q = []
|
||||||
|
|
||||||
# self.log("lmod: got {}".format(len(ready)))
|
time.sleep(1)
|
||||||
time.sleep(5)
|
|
||||||
for path, sz, times, sparse in ready:
|
for path, sz, times, sparse in ready:
|
||||||
self.log("lmod: setting times {} on {}".format(times, path))
|
self.log("lmod: setting times {} on {}".format(times, path))
|
||||||
try:
|
try:
|
||||||
@@ -2986,6 +3117,9 @@ class Up2k(object):
|
|||||||
if x["need"] and now - x["poke"] > self.snap_discard_interval
|
if x["need"] and now - x["poke"] > self.snap_discard_interval
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if self.args.nw:
|
||||||
|
lost = []
|
||||||
|
else:
|
||||||
lost = [
|
lost = [
|
||||||
x
|
x
|
||||||
for x in reg.values()
|
for x in reg.values()
|
||||||
@@ -3017,7 +3151,7 @@ class Up2k(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if self.args.nw:
|
if self.args.nw or self.args.no_snap:
|
||||||
return
|
return
|
||||||
|
|
||||||
path = os.path.join(histpath, "up2k.snap")
|
path = os.path.join(histpath, "up2k.snap")
|
||||||
@@ -3033,7 +3167,8 @@ class Up2k(object):
|
|||||||
if etag == self.snap_prev.get(ptop):
|
if etag == self.snap_prev.get(ptop):
|
||||||
return
|
return
|
||||||
|
|
||||||
bos.makedirs(histpath)
|
if bos.makedirs(histpath):
|
||||||
|
hidedir(histpath)
|
||||||
|
|
||||||
path2 = "{}.{}".format(path, os.getpid())
|
path2 = "{}.{}".format(path, os.getpid())
|
||||||
body = {"droppable": self.droppable[ptop], "registry": reg}
|
body = {"droppable": self.droppable[ptop], "registry": reg}
|
||||||
@@ -3146,12 +3281,24 @@ class Up2k(object):
|
|||||||
if self.mth:
|
if self.mth:
|
||||||
self.mth.stop = True
|
self.mth.stop = True
|
||||||
|
|
||||||
|
# in case we're killed early
|
||||||
for x in list(self.spools):
|
for x in list(self.spools):
|
||||||
self._unspool(x)
|
self._unspool(x)
|
||||||
|
|
||||||
|
if not self.args.no_snap:
|
||||||
self.log("writing snapshot")
|
self.log("writing snapshot")
|
||||||
self.do_snapshot()
|
self.do_snapshot()
|
||||||
|
|
||||||
|
t0 = time.time()
|
||||||
|
while self.pp:
|
||||||
|
time.sleep(0.1)
|
||||||
|
if time.time() - t0 >= 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
# if there is time
|
||||||
|
for x in list(self.spools):
|
||||||
|
self._unspool(x)
|
||||||
|
|
||||||
|
|
||||||
def up2k_chunksize(filesize: int) -> int:
|
def up2k_chunksize(filesize: int) -> int:
|
||||||
chunksize = 1024 * 1024
|
chunksize = 1024 * 1024
|
||||||
@@ -3159,7 +3306,7 @@ def up2k_chunksize(filesize: int) -> int:
|
|||||||
while True:
|
while True:
|
||||||
for mul in [1, 2]:
|
for mul in [1, 2]:
|
||||||
nchunks = math.ceil(filesize * 1.0 / chunksize)
|
nchunks = math.ceil(filesize * 1.0 / chunksize)
|
||||||
if nchunks <= 256 or chunksize >= 32 * 1024 * 1024:
|
if nchunks <= 256 or (chunksize >= 32 * 1024 * 1024 and nchunks <= 4096):
|
||||||
return chunksize
|
return chunksize
|
||||||
|
|
||||||
chunksize += stepsize
|
chunksize += stepsize
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ from __future__ import print_function, unicode_literals
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import errno
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
import logging
|
||||||
import math
|
import math
|
||||||
import mimetypes
|
import mimetypes
|
||||||
import os
|
import os
|
||||||
@@ -22,13 +24,37 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from email.utils import formatdate
|
||||||
|
|
||||||
|
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
|
|
||||||
from .__init__ import ANYWIN, MACOS, PY2, TYPE_CHECKING, VT100, WINDOWS
|
from .__init__ import ANYWIN, MACOS, PY2, TYPE_CHECKING, VT100, WINDOWS
|
||||||
from .__version__ import S_BUILD_DT, S_VERSION
|
from .__version__ import S_BUILD_DT, S_VERSION
|
||||||
from .stolen import surrogateescape
|
from .stolen import surrogateescape
|
||||||
|
|
||||||
|
|
||||||
|
def _ens(want: str) -> tuple[int, ...]:
|
||||||
|
ret: list[int] = []
|
||||||
|
for v in want.split():
|
||||||
|
try:
|
||||||
|
ret.append(getattr(errno, v))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return tuple(ret)
|
||||||
|
|
||||||
|
|
||||||
|
# WSAECONNRESET - foribly closed by remote
|
||||||
|
# WSAENOTSOCK - no longer a socket
|
||||||
|
# EUNATCH - can't assign requested address (wifi down)
|
||||||
|
E_SCK = _ens("ENOTCONN EUNATCH EBADF WSAENOTSOCK WSAECONNRESET")
|
||||||
|
E_ADDR_NOT_AVAIL = _ens("EADDRNOTAVAIL WSAEADDRNOTAVAIL")
|
||||||
|
E_ADDR_IN_USE = _ens("EADDRINUSE WSAEADDRINUSE")
|
||||||
|
E_ACCESS = _ens("EACCES WSAEACCES")
|
||||||
|
E_UNREACH = _ens("EHOSTUNREACH WSAEHOSTUNREACH ENETUNREACH WSAENETUNREACH")
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import ctypes
|
import ctypes
|
||||||
import fcntl
|
import fcntl
|
||||||
@@ -36,11 +62,6 @@ try:
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
|
||||||
from ipaddress import IPv6Address
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
HAVE_SQLITE3 = True
|
HAVE_SQLITE3 = True
|
||||||
import sqlite3 # pylint: disable=unused-import # typechk
|
import sqlite3 # pylint: disable=unused-import # typechk
|
||||||
@@ -53,7 +74,7 @@ try:
|
|||||||
except:
|
except:
|
||||||
HAVE_PSUTIL = False
|
HAVE_PSUTIL = False
|
||||||
|
|
||||||
try:
|
if True: # pylint: disable=using-constant-test
|
||||||
import types
|
import types
|
||||||
from collections.abc import Callable, Iterable
|
from collections.abc import Callable, Iterable
|
||||||
|
|
||||||
@@ -68,8 +89,6 @@ try:
|
|||||||
def __call__(self, msg: str, c: Union[int, str] = 0) -> None:
|
def __call__(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
import magic
|
import magic
|
||||||
@@ -79,10 +98,9 @@ if TYPE_CHECKING:
|
|||||||
FAKE_MP = False
|
FAKE_MP = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not FAKE_MP:
|
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
else:
|
|
||||||
import multiprocessing.dummy as mp # type: ignore
|
# import multiprocessing.dummy as mp
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# support jython
|
# support jython
|
||||||
mp = None # type: ignore
|
mp = None # type: ignore
|
||||||
@@ -122,24 +140,29 @@ else:
|
|||||||
|
|
||||||
SYMTIME = sys.version_info > (3, 6) and os.utime in os.supports_follow_symlinks
|
SYMTIME = sys.version_info > (3, 6) and os.utime in os.supports_follow_symlinks
|
||||||
|
|
||||||
HTTP_TS_FMT = "%a, %d %b %Y %H:%M:%S GMT"
|
|
||||||
|
|
||||||
META_NOBOTS = '<meta name="robots" content="noindex, nofollow">'
|
META_NOBOTS = '<meta name="robots" content="noindex, nofollow">'
|
||||||
|
|
||||||
HTTPCODE = {
|
HTTPCODE = {
|
||||||
200: "OK",
|
200: "OK",
|
||||||
|
201: "Created",
|
||||||
204: "No Content",
|
204: "No Content",
|
||||||
206: "Partial Content",
|
206: "Partial Content",
|
||||||
|
207: "Multi-Status",
|
||||||
|
301: "Moved Permanently",
|
||||||
302: "Found",
|
302: "Found",
|
||||||
304: "Not Modified",
|
304: "Not Modified",
|
||||||
400: "Bad Request",
|
400: "Bad Request",
|
||||||
|
401: "Unauthorized",
|
||||||
403: "Forbidden",
|
403: "Forbidden",
|
||||||
404: "Not Found",
|
404: "Not Found",
|
||||||
405: "Method Not Allowed",
|
405: "Method Not Allowed",
|
||||||
|
409: "Conflict",
|
||||||
411: "Length Required",
|
411: "Length Required",
|
||||||
|
412: "Precondition Failed",
|
||||||
413: "Payload Too Large",
|
413: "Payload Too Large",
|
||||||
416: "Requested Range Not Satisfiable",
|
416: "Requested Range Not Satisfiable",
|
||||||
422: "Unprocessable Entity",
|
422: "Unprocessable Entity",
|
||||||
|
423: "Locked",
|
||||||
429: "Too Many Requests",
|
429: "Too Many Requests",
|
||||||
500: "Internal Server Error",
|
500: "Internal Server Error",
|
||||||
501: "Not Implemented",
|
501: "Not Implemented",
|
||||||
@@ -156,7 +179,27 @@ IMPLICATIONS = [
|
|||||||
["e2vu", "e2v"],
|
["e2vu", "e2v"],
|
||||||
["e2vp", "e2v"],
|
["e2vp", "e2v"],
|
||||||
["e2v", "e2d"],
|
["e2v", "e2d"],
|
||||||
|
["smbw", "smb"],
|
||||||
|
["smb1", "smb"],
|
||||||
|
["smbvvv", "smbvv"],
|
||||||
|
["smbvv", "smbv"],
|
||||||
|
["smbv", "smb"],
|
||||||
|
["zv", "zmv"],
|
||||||
|
["zv", "zsv"],
|
||||||
|
["z", "zm"],
|
||||||
|
["z", "zs"],
|
||||||
|
["zmvv", "zmv"],
|
||||||
|
["zm4", "zm"],
|
||||||
|
["zm6", "zm"],
|
||||||
|
["zmv", "zm"],
|
||||||
|
["zms", "zm"],
|
||||||
|
["zsv", "zs"],
|
||||||
]
|
]
|
||||||
|
if ANYWIN:
|
||||||
|
IMPLICATIONS.extend([["z", "zm4"]])
|
||||||
|
|
||||||
|
|
||||||
|
UNPLICATIONS = [["no_dav", "daw"]]
|
||||||
|
|
||||||
|
|
||||||
MIMES = {
|
MIMES = {
|
||||||
@@ -258,7 +301,7 @@ def py_desc() -> str:
|
|||||||
bitness = struct.calcsize("P") * 8
|
bitness = struct.calcsize("P") * 8
|
||||||
|
|
||||||
host_os = platform.system()
|
host_os = platform.system()
|
||||||
compiler = platform.python_compiler()
|
compiler = platform.python_compiler().split("http")[0]
|
||||||
|
|
||||||
m = re.search(r"([0-9]+\.[0-9\.]+)", platform.version())
|
m = re.search(r"([0-9]+\.[0-9\.]+)", platform.version())
|
||||||
os_ver = m.group(1) if m else ""
|
os_ver = m.group(1) if m else ""
|
||||||
@@ -311,6 +354,37 @@ _: Any = (mp, BytesIO, quote, unquote, SQLITE_VER, JINJA_VER, PYFTPD_VER)
|
|||||||
__all__ = ["mp", "BytesIO", "quote", "unquote", "SQLITE_VER", "JINJA_VER", "PYFTPD_VER"]
|
__all__ = ["mp", "BytesIO", "quote", "unquote", "SQLITE_VER", "JINJA_VER", "PYFTPD_VER"]
|
||||||
|
|
||||||
|
|
||||||
|
class Daemon(threading.Thread):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
target: Any,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
a: Optional[Iterable[Any]] = None,
|
||||||
|
r: bool = True,
|
||||||
|
) -> None:
|
||||||
|
threading.Thread.__init__(self, target=target, name=name, args=a or ())
|
||||||
|
self.daemon = True
|
||||||
|
if r:
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
|
||||||
|
class Netdev(object):
|
||||||
|
def __init__(self, ip: str, idx: int, name: str, desc: str):
|
||||||
|
self.ip = ip
|
||||||
|
self.idx = idx
|
||||||
|
self.name = name
|
||||||
|
self.desc = desc
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{}-{}{}".format(self.idx, self.name, self.desc)
|
||||||
|
|
||||||
|
def __lt__(self, rhs):
|
||||||
|
return str(self) < str(rhs)
|
||||||
|
|
||||||
|
def __eq__(self, rhs):
|
||||||
|
return str(self) == str(rhs)
|
||||||
|
|
||||||
|
|
||||||
class Cooldown(object):
|
class Cooldown(object):
|
||||||
def __init__(self, maxage: float) -> None:
|
def __init__(self, maxage: float) -> None:
|
||||||
self.maxage = maxage
|
self.maxage = maxage
|
||||||
@@ -337,6 +411,96 @@ class Cooldown(object):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class HLog(logging.Handler):
|
||||||
|
def __init__(self, log_func: "RootLogger") -> None:
|
||||||
|
logging.Handler.__init__(self)
|
||||||
|
self.log_func = log_func
|
||||||
|
self.ptn_ftp = re.compile(r"^([0-9a-f:\.]+:[0-9]{1,5})-\[")
|
||||||
|
self.ptn_smb_ign = re.compile(r"^(Callback added|Config file parsed)")
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
level = logging.getLevelName(self.level)
|
||||||
|
return "<%s cpp(%s)>" % (self.__class__.__name__, level)
|
||||||
|
|
||||||
|
def flush(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def emit(self, record: logging.LogRecord) -> None:
|
||||||
|
msg = self.format(record)
|
||||||
|
lv = record.levelno
|
||||||
|
if lv < logging.INFO:
|
||||||
|
c = 6
|
||||||
|
elif lv < logging.WARNING:
|
||||||
|
c = 0
|
||||||
|
elif lv < logging.ERROR:
|
||||||
|
c = 3
|
||||||
|
else:
|
||||||
|
c = 1
|
||||||
|
|
||||||
|
if record.name.startswith("PIL") and lv < logging.WARNING:
|
||||||
|
return
|
||||||
|
elif record.name == "pyftpdlib":
|
||||||
|
m = self.ptn_ftp.match(msg)
|
||||||
|
if m:
|
||||||
|
ip = m.group(1)
|
||||||
|
msg = msg[len(ip) + 1 :]
|
||||||
|
if ip.startswith("::ffff:"):
|
||||||
|
record.name = ip[7:]
|
||||||
|
else:
|
||||||
|
record.name = ip
|
||||||
|
elif record.name.startswith("impacket"):
|
||||||
|
if self.ptn_smb_ign.match(msg):
|
||||||
|
return
|
||||||
|
|
||||||
|
self.log_func(record.name[-21:], msg, c)
|
||||||
|
|
||||||
|
|
||||||
|
class NetMap(object):
|
||||||
|
def __init__(self, ips: list[str], netdevs: dict[str, Netdev]) -> None:
|
||||||
|
if "::" in ips:
|
||||||
|
ips = [x for x in ips if x != "::"] + list(
|
||||||
|
[x.split("/")[0] for x in netdevs if ":" in x]
|
||||||
|
)
|
||||||
|
ips.append("0.0.0.0")
|
||||||
|
|
||||||
|
if "0.0.0.0" in ips:
|
||||||
|
ips = [x for x in ips if x != "0.0.0.0"] + list(
|
||||||
|
[x.split("/")[0] for x in netdevs if ":" not in x]
|
||||||
|
)
|
||||||
|
|
||||||
|
ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
|
||||||
|
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips]
|
||||||
|
|
||||||
|
self.cache: dict[str, str] = {}
|
||||||
|
self.b2sip: dict[bytes, str] = {}
|
||||||
|
self.b2net: dict[bytes, Union[IPv4Network, IPv6Network]] = {}
|
||||||
|
self.bip: list[bytes] = []
|
||||||
|
for ip in ips:
|
||||||
|
v6 = ":" in ip
|
||||||
|
fam = socket.AF_INET6 if v6 else socket.AF_INET
|
||||||
|
bip = socket.inet_pton(fam, ip.split("/")[0])
|
||||||
|
self.bip.append(bip)
|
||||||
|
self.b2sip[bip] = ip.split("/")[0]
|
||||||
|
self.b2net[bip] = (IPv6Network if v6 else IPv4Network)(ip, False)
|
||||||
|
|
||||||
|
self.bip.sort(reverse=True)
|
||||||
|
|
||||||
|
def map(self, ip: str) -> str:
|
||||||
|
try:
|
||||||
|
return self.cache[ip]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
v6 = ":" in ip
|
||||||
|
ci = IPv6Address(ip) if v6 else IPv4Address(ip)
|
||||||
|
bip = next((x for x in self.bip if ci in self.b2net[x]), None)
|
||||||
|
ret = self.b2sip[bip] if bip else ""
|
||||||
|
if len(self.cache) > 9000:
|
||||||
|
self.cache = {}
|
||||||
|
self.cache[ip] = ret
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
class UnrecvEOF(OSError):
|
class UnrecvEOF(OSError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -357,7 +521,16 @@ class _Unrecv(object):
|
|||||||
self.buf = self.buf[nbytes:]
|
self.buf = self.buf[nbytes:]
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
ret = self.s.recv(nbytes)
|
ret = self.s.recv(nbytes)
|
||||||
|
break
|
||||||
|
except socket.timeout:
|
||||||
|
continue
|
||||||
|
except:
|
||||||
|
ret = b""
|
||||||
|
break
|
||||||
|
|
||||||
if not ret:
|
if not ret:
|
||||||
raise UnrecvEOF("client stopped sending data")
|
raise UnrecvEOF("client stopped sending data")
|
||||||
|
|
||||||
@@ -444,6 +617,27 @@ class _LUnrecv(object):
|
|||||||
Unrecv = _Unrecv
|
Unrecv = _Unrecv
|
||||||
|
|
||||||
|
|
||||||
|
class CachedSet(object):
|
||||||
|
def __init__(self, maxage: float) -> None:
|
||||||
|
self.c: dict[Any, float] = {}
|
||||||
|
self.maxage = maxage
|
||||||
|
self.oldest = 0.0
|
||||||
|
|
||||||
|
def add(self, v: Any) -> None:
|
||||||
|
self.c[v] = time.time()
|
||||||
|
|
||||||
|
def cln(self) -> None:
|
||||||
|
now = time.time()
|
||||||
|
if now - self.oldest < self.maxage:
|
||||||
|
return
|
||||||
|
|
||||||
|
c = self.c = {k: v for k, v in self.c.items() if now - v < self.maxage}
|
||||||
|
try:
|
||||||
|
self.oldest = c[min(c, key=c.get)]
|
||||||
|
except:
|
||||||
|
self.oldest = now
|
||||||
|
|
||||||
|
|
||||||
class FHC(object):
|
class FHC(object):
|
||||||
class CE(object):
|
class CE(object):
|
||||||
def __init__(self, fh: typing.BinaryIO) -> None:
|
def __init__(self, fh: typing.BinaryIO) -> None:
|
||||||
@@ -540,9 +734,7 @@ class MTHash(object):
|
|||||||
self.done_q: Queue[tuple[int, str, int, int]] = Queue()
|
self.done_q: Queue[tuple[int, str, int, int]] = Queue()
|
||||||
self.thrs = []
|
self.thrs = []
|
||||||
for n in range(cores):
|
for n in range(cores):
|
||||||
t = threading.Thread(target=self.worker, name="mth-" + str(n))
|
t = Daemon(self.worker, "mth-" + str(n))
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
self.thrs.append(t)
|
self.thrs.append(t)
|
||||||
|
|
||||||
def hash(
|
def hash(
|
||||||
@@ -643,6 +835,9 @@ class HMaccas(object):
|
|||||||
try:
|
try:
|
||||||
return self.cache[msg]
|
return self.cache[msg]
|
||||||
except:
|
except:
|
||||||
|
if len(self.cache) > 9000:
|
||||||
|
self.cache = {}
|
||||||
|
|
||||||
zb = hmac.new(self.key, msg, hashlib.sha512).digest()
|
zb = hmac.new(self.key, msg, hashlib.sha512).digest()
|
||||||
zs = base64.urlsafe_b64encode(zb)[: self.retlen].decode("utf-8")
|
zs = base64.urlsafe_b64encode(zb)[: self.retlen].decode("utf-8")
|
||||||
self.cache[msg] = zs
|
self.cache[msg] = zs
|
||||||
@@ -743,7 +938,7 @@ class Garda(object):
|
|||||||
if not self.lim:
|
if not self.lim:
|
||||||
return 0, ip
|
return 0, ip
|
||||||
|
|
||||||
if ":" in ip and not PY2:
|
if ":" in ip:
|
||||||
# assume /64 clients; drop 4 groups
|
# assume /64 clients; drop 4 groups
|
||||||
ip = IPv6Address(ip).exploded[:-20]
|
ip = IPv6Address(ip).exploded[:-20]
|
||||||
|
|
||||||
@@ -845,20 +1040,14 @@ def alltrace() -> str:
|
|||||||
else:
|
else:
|
||||||
rret += ret
|
rret += ret
|
||||||
|
|
||||||
return "\n".join(rret + bret)
|
return "\n".join(rret + bret) + "\n"
|
||||||
|
|
||||||
|
|
||||||
def start_stackmon(arg_str: str, nid: int) -> None:
|
def start_stackmon(arg_str: str, nid: int) -> None:
|
||||||
suffix = "-{}".format(nid) if nid else ""
|
suffix = "-{}".format(nid) if nid else ""
|
||||||
fp, f = arg_str.rsplit(",", 1)
|
fp, f = arg_str.rsplit(",", 1)
|
||||||
zi = int(f)
|
zi = int(f)
|
||||||
t = threading.Thread(
|
Daemon(stackmon, "stackmon" + suffix, (fp, zi, suffix))
|
||||||
target=stackmon,
|
|
||||||
args=(fp, zi, suffix),
|
|
||||||
name="stackmon" + suffix,
|
|
||||||
)
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
|
|
||||||
def stackmon(fp: str, ival: float, suffix: str) -> None:
|
def stackmon(fp: str, ival: float, suffix: str) -> None:
|
||||||
@@ -911,13 +1100,7 @@ def start_log_thrs(
|
|||||||
tname = "logthr-n{}-i{:x}".format(nid, os.getpid())
|
tname = "logthr-n{}-i{:x}".format(nid, os.getpid())
|
||||||
lname = tname[3:]
|
lname = tname[3:]
|
||||||
|
|
||||||
t = threading.Thread(
|
Daemon(log_thrs, tname, (logger, ival, lname))
|
||||||
target=log_thrs,
|
|
||||||
args=(logger, ival, lname),
|
|
||||||
name=tname,
|
|
||||||
)
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
|
|
||||||
def log_thrs(log: Callable[[str, str, int], None], ival: float, name: str) -> None:
|
def log_thrs(log: Callable[[str, str, int], None], ival: float, name: str) -> None:
|
||||||
@@ -964,12 +1147,20 @@ def ren_open(
|
|||||||
fun = kwargs.pop("fun", open)
|
fun = kwargs.pop("fun", open)
|
||||||
fdir = kwargs.pop("fdir", None)
|
fdir = kwargs.pop("fdir", None)
|
||||||
suffix = kwargs.pop("suffix", None)
|
suffix = kwargs.pop("suffix", None)
|
||||||
|
overwrite = kwargs.pop("overwrite", None)
|
||||||
|
|
||||||
if fname == os.devnull:
|
if fname == os.devnull:
|
||||||
with fun(fname, *args, **kwargs) as f:
|
with fun(fname, *args, **kwargs) as f:
|
||||||
yield {"orz": (f, fname)}
|
yield {"orz": (f, fname)}
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if overwrite:
|
||||||
|
assert fdir
|
||||||
|
fpath = os.path.join(fdir, fname)
|
||||||
|
with fun(fsenc(fpath), *args, **kwargs) as f:
|
||||||
|
yield {"orz": (f, fname)}
|
||||||
|
return
|
||||||
|
|
||||||
if suffix:
|
if suffix:
|
||||||
ext = fname.split(".")[-1]
|
ext = fname.split(".")[-1]
|
||||||
if len(ext) < 7:
|
if len(ext) < 7:
|
||||||
@@ -1003,6 +1194,7 @@ def ren_open(
|
|||||||
|
|
||||||
with fun(fsenc(fpath), *args, **kwargs) as f:
|
with fun(fsenc(fpath), *args, **kwargs) as f:
|
||||||
if b64:
|
if b64:
|
||||||
|
assert fdir
|
||||||
fp2 = "fn-trunc.{}.txt".format(b64)
|
fp2 = "fn-trunc.{}.txt".format(b64)
|
||||||
fp2 = os.path.join(fdir, fp2)
|
fp2 = os.path.join(fdir, fp2)
|
||||||
with open(fsenc(fp2), "wb") as f2:
|
with open(fsenc(fp2), "wb") as f2:
|
||||||
@@ -1014,7 +1206,7 @@ def ren_open(
|
|||||||
except OSError as ex_:
|
except OSError as ex_:
|
||||||
ex = ex_
|
ex = ex_
|
||||||
|
|
||||||
if ex.errno == 22 and not asciified:
|
if ex.errno == errno.EINVAL and not asciified:
|
||||||
asciified = True
|
asciified = True
|
||||||
bname, fname = [
|
bname, fname = [
|
||||||
zs.encode("ascii", "replace").decode("ascii").replace("?", "_")
|
zs.encode("ascii", "replace").decode("ascii").replace("?", "_")
|
||||||
@@ -1022,11 +1214,14 @@ def ren_open(
|
|||||||
]
|
]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ex.errno not in [36, 63, 95] and (not WINDOWS or ex.errno != 22):
|
# ENOTSUP: zfs on ubuntu 20.04
|
||||||
|
if ex.errno not in (errno.ENAMETOOLONG, errno.ENOSR, errno.ENOTSUP) and (
|
||||||
|
not WINDOWS or ex.errno != errno.EINVAL
|
||||||
|
):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if not b64:
|
if not b64:
|
||||||
zs = (orig_name + "\n" + suffix).encode("utf-8", "replace")
|
zs = "{}\n{}".format(orig_name, suffix).encode("utf-8", "replace")
|
||||||
zs = hashlib.sha512(zs).digest()[:12]
|
zs = hashlib.sha512(zs).digest()[:12]
|
||||||
b64 = base64.urlsafe_b64encode(zs).decode("utf-8")
|
b64 = base64.urlsafe_b64encode(zs).decode("utf-8")
|
||||||
|
|
||||||
@@ -1115,7 +1310,7 @@ class MultipartParser(object):
|
|||||||
return field, None
|
return field, None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
is_webkit = self.headers["user-agent"].lower().find("applewebkit") >= 0
|
is_webkit = "applewebkit" in self.headers["user-agent"].lower()
|
||||||
except:
|
except:
|
||||||
is_webkit = False
|
is_webkit = False
|
||||||
|
|
||||||
@@ -1176,6 +1371,7 @@ class MultipartParser(object):
|
|||||||
buf = buf[d:]
|
buf = buf[d:]
|
||||||
|
|
||||||
# look for boundary near the end of the buffer
|
# look for boundary near the end of the buffer
|
||||||
|
n = 0
|
||||||
for n in range(1, len(buf) + 1):
|
for n in range(1, len(buf) + 1):
|
||||||
if not buf[-n:] in self.boundary:
|
if not buf[-n:] in self.boundary:
|
||||||
n -= 1
|
n -= 1
|
||||||
@@ -1280,8 +1476,12 @@ def get_boundary(headers: dict[str, str]) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def read_header(sr: Unrecv) -> list[str]:
|
def read_header(sr: Unrecv) -> list[str]:
|
||||||
|
t0 = time.time()
|
||||||
ret = b""
|
ret = b""
|
||||||
while True:
|
while True:
|
||||||
|
if time.time() - t0 > 120:
|
||||||
|
return []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ret += sr.recv(1024)
|
ret += sr.recv(1024)
|
||||||
except:
|
except:
|
||||||
@@ -1327,8 +1527,24 @@ def gen_filekey_dbg(
|
|||||||
|
|
||||||
assert log_ptn
|
assert log_ptn
|
||||||
if log_ptn.search(fspath):
|
if log_ptn.search(fspath):
|
||||||
t = "fk({}) salt({}) size({}) inode({}) fspath({})"
|
try:
|
||||||
log(t.format(ret[:8], salt, fsize, inode, fspath))
|
import inspect
|
||||||
|
|
||||||
|
ctx = ",".join(inspect.stack()[n].function for n in range(2, 5))
|
||||||
|
except:
|
||||||
|
ctx = ""
|
||||||
|
|
||||||
|
p2 = "a"
|
||||||
|
try:
|
||||||
|
p2 = absreal(fspath)
|
||||||
|
if p2 != fspath:
|
||||||
|
raise Exception()
|
||||||
|
except:
|
||||||
|
t = "maybe wrong abspath for filekey;\norig: {}\nreal: {}"
|
||||||
|
log(t.format(fspath, p2), 1)
|
||||||
|
|
||||||
|
t = "fk({}) salt({}) size({}) inode({}) fspath({}) at({})"
|
||||||
|
log(t.format(ret[:8], salt, fsize, inode, fspath, ctx), 5)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@@ -1336,8 +1552,7 @@ def gen_filekey_dbg(
|
|||||||
def gencookie(k: str, v: str, dur: Optional[int]) -> str:
|
def gencookie(k: str, v: str, dur: Optional[int]) -> str:
|
||||||
v = v.replace(";", "")
|
v = v.replace(";", "")
|
||||||
if dur:
|
if dur:
|
||||||
dt = datetime.utcfromtimestamp(time.time() + dur)
|
exp = formatdate(time.time() + dur, usegmt=True)
|
||||||
exp = dt.strftime("%a, %d %b %Y %H:%M:%S GMT")
|
|
||||||
else:
|
else:
|
||||||
exp = "Fri, 15 Aug 1997 01:00:00 GMT"
|
exp = "Fri, 15 Aug 1997 01:00:00 GMT"
|
||||||
|
|
||||||
@@ -1489,9 +1704,12 @@ def exclude_dotfiles(filepaths: list[str]) -> list[str]:
|
|||||||
return [x for x in filepaths if not x.split("/")[-1].startswith(".")]
|
return [x for x in filepaths if not x.split("/")[-1].startswith(".")]
|
||||||
|
|
||||||
|
|
||||||
def http_ts(ts: int) -> str:
|
def ipnorm(ip: str) -> str:
|
||||||
file_dt = datetime.utcfromtimestamp(ts)
|
if ":" in ip:
|
||||||
return file_dt.strftime(HTTP_TS_FMT)
|
# assume /64 clients; drop 4 groups
|
||||||
|
return IPv6Address(ip).exploded[:-20]
|
||||||
|
|
||||||
|
return ip
|
||||||
|
|
||||||
|
|
||||||
def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str:
|
def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str:
|
||||||
@@ -1516,17 +1734,21 @@ def html_bescape(s: bytes, quot: bool = False, crlf: bool = False) -> bytes:
|
|||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
def quotep(txt: str) -> str:
|
def _quotep2(txt: str) -> str:
|
||||||
"""url quoter which deals with bytes correctly"""
|
"""url quoter which deals with bytes correctly"""
|
||||||
btxt = w8enc(txt)
|
btxt = w8enc(txt)
|
||||||
quot1 = quote(btxt, safe=b"/")
|
quot = quote(btxt, safe=b"/")
|
||||||
if not PY2:
|
return w8dec(quot.replace(b" ", b"+"))
|
||||||
quot2 = quot1.encode("ascii")
|
|
||||||
else:
|
|
||||||
quot2 = quot1
|
|
||||||
|
|
||||||
quot3 = quot2.replace(b" ", b"+")
|
|
||||||
return w8dec(quot3)
|
def _quotep3(txt: str) -> str:
|
||||||
|
"""url quoter which deals with bytes correctly"""
|
||||||
|
btxt = w8enc(txt)
|
||||||
|
quot = quote(btxt, safe=b"/").encode("utf-8")
|
||||||
|
return w8dec(quot.replace(b" ", b"+"))
|
||||||
|
|
||||||
|
|
||||||
|
quotep = _quotep3 if not PY2 else _quotep2
|
||||||
|
|
||||||
|
|
||||||
def unquotep(txt: str) -> str:
|
def unquotep(txt: str) -> str:
|
||||||
@@ -1545,25 +1767,36 @@ def vsplit(vpath: str) -> tuple[str, str]:
|
|||||||
|
|
||||||
|
|
||||||
def vjoin(rd: str, fn: str) -> str:
|
def vjoin(rd: str, fn: str) -> str:
|
||||||
return rd + "/" + fn if rd else fn
|
if rd and fn:
|
||||||
|
return rd + "/" + fn
|
||||||
|
else:
|
||||||
|
return rd or fn
|
||||||
|
|
||||||
|
|
||||||
def w8dec(txt: bytes) -> str:
|
def _w8dec2(txt: bytes) -> str:
|
||||||
"""decodes filesystem-bytes to wtf8"""
|
"""decodes filesystem-bytes to wtf8"""
|
||||||
if PY2:
|
|
||||||
return surrogateescape.decodefilename(txt)
|
return surrogateescape.decodefilename(txt)
|
||||||
|
|
||||||
|
|
||||||
|
def _w8enc2(txt: str) -> bytes:
|
||||||
|
"""encodes wtf8 to filesystem-bytes"""
|
||||||
|
return surrogateescape.encodefilename(txt)
|
||||||
|
|
||||||
|
|
||||||
|
def _w8dec3(txt: bytes) -> str:
|
||||||
|
"""decodes filesystem-bytes to wtf8"""
|
||||||
return txt.decode(FS_ENCODING, "surrogateescape")
|
return txt.decode(FS_ENCODING, "surrogateescape")
|
||||||
|
|
||||||
|
|
||||||
def w8enc(txt: str) -> bytes:
|
def _w8enc3(txt: str) -> bytes:
|
||||||
"""encodes wtf8 to filesystem-bytes"""
|
"""encodes wtf8 to filesystem-bytes"""
|
||||||
if PY2:
|
|
||||||
return surrogateescape.encodefilename(txt)
|
|
||||||
|
|
||||||
return txt.encode(FS_ENCODING, "surrogateescape")
|
return txt.encode(FS_ENCODING, "surrogateescape")
|
||||||
|
|
||||||
|
|
||||||
|
w8dec = _w8dec3 if not PY2 else _w8dec2
|
||||||
|
w8enc = _w8enc3 if not PY2 else _w8enc2
|
||||||
|
|
||||||
|
|
||||||
def w8b64dec(txt: str) -> str:
|
def w8b64dec(txt: str) -> str:
|
||||||
"""decodes base64(filesystem-bytes) to wtf8"""
|
"""decodes base64(filesystem-bytes) to wtf8"""
|
||||||
return w8dec(base64.urlsafe_b64decode(txt.encode("ascii")))
|
return w8dec(base64.urlsafe_b64decode(txt.encode("ascii")))
|
||||||
@@ -1574,17 +1807,20 @@ def w8b64enc(txt: str) -> str:
|
|||||||
return base64.urlsafe_b64encode(w8enc(txt)).decode("ascii")
|
return base64.urlsafe_b64encode(w8enc(txt)).decode("ascii")
|
||||||
|
|
||||||
|
|
||||||
if PY2 and WINDOWS:
|
if not PY2 or not WINDOWS:
|
||||||
# moonrunes become \x3f with bytestrings,
|
|
||||||
# losing mojibake support is worth
|
|
||||||
def _not_actually_mbcs(txt):
|
|
||||||
return txt
|
|
||||||
|
|
||||||
fsenc = _not_actually_mbcs
|
|
||||||
fsdec = _not_actually_mbcs
|
|
||||||
else:
|
|
||||||
fsenc = w8enc
|
fsenc = w8enc
|
||||||
fsdec = w8dec
|
fsdec = w8dec
|
||||||
|
else:
|
||||||
|
# moonrunes become \x3f with bytestrings,
|
||||||
|
# losing mojibake support is worth
|
||||||
|
def _not_actually_mbcs_enc(txt: str) -> bytes:
|
||||||
|
return txt
|
||||||
|
|
||||||
|
def _not_actually_mbcs_dec(txt: bytes) -> str:
|
||||||
|
return txt
|
||||||
|
|
||||||
|
fsenc = _not_actually_mbcs_enc
|
||||||
|
fsdec = _not_actually_mbcs_dec
|
||||||
|
|
||||||
|
|
||||||
def s3enc(mem_cur: "sqlite3.Cursor", rd: str, fn: str) -> tuple[str, str]:
|
def s3enc(mem_cur: "sqlite3.Cursor", rd: str, fn: str) -> tuple[str, str]:
|
||||||
@@ -1611,10 +1847,7 @@ def db_ex_chk(log: "NamedLogger", ex: Exception, db_path: str) -> bool:
|
|||||||
if str(ex) != "database is locked":
|
if str(ex) != "database is locked":
|
||||||
return False
|
return False
|
||||||
|
|
||||||
thr = threading.Thread(target=lsof, args=(log, db_path), name="dbex")
|
Daemon(lsof, "dbex", (log, db_path))
|
||||||
thr.daemon = True
|
|
||||||
thr.start()
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@@ -1701,7 +1934,7 @@ def shut_socket(log: "NamedLogger", sck: socket.socket, timeout: int = 3) -> Non
|
|||||||
finally:
|
finally:
|
||||||
td = time.time() - t0
|
td = time.time() - t0
|
||||||
if td >= 1:
|
if td >= 1:
|
||||||
log("shut({}) in {:.3f} sec".format(fd, td), "1;30")
|
log("shut({}) in {:.3f} sec".format(fd, td), "90")
|
||||||
|
|
||||||
sck.close()
|
sck.close()
|
||||||
|
|
||||||
@@ -1858,9 +2091,9 @@ def sendfile_kern(
|
|||||||
n = os.sendfile(out_fd, in_fd, ofs, req)
|
n = os.sendfile(out_fd, in_fd, ofs, req)
|
||||||
stuck = 0
|
stuck = 0
|
||||||
except OSError as ex:
|
except OSError as ex:
|
||||||
|
# client stopped reading; do another select
|
||||||
d = time.time() - stuck
|
d = time.time() - stuck
|
||||||
log("sendfile stuck for {:.3f} sec: {!r}".format(d, ex))
|
if d < 3600 and ex.errno == errno.EWOULDBLOCK:
|
||||||
if d < 3600 and ex.errno == 11: # eagain
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
n = 0
|
n = 0
|
||||||
@@ -1887,6 +2120,7 @@ def statdir(
|
|||||||
if lstat and (PY2 or os.stat not in os.supports_follow_symlinks):
|
if lstat and (PY2 or os.stat not in os.supports_follow_symlinks):
|
||||||
scandir = False
|
scandir = False
|
||||||
|
|
||||||
|
src = "statdir"
|
||||||
try:
|
try:
|
||||||
btop = fsenc(top)
|
btop = fsenc(top)
|
||||||
if scandir and hasattr(os, "scandir"):
|
if scandir and hasattr(os, "scandir"):
|
||||||
@@ -2064,7 +2298,7 @@ def killtree(root: int) -> None:
|
|||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(pid, signal.SIGTERM)
|
||||||
else:
|
else:
|
||||||
# windows gets minimal effort sorry
|
# windows gets minimal effort sorry
|
||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(root, signal.SIGTERM)
|
||||||
return
|
return
|
||||||
|
|
||||||
for n in range(10):
|
for n in range(10):
|
||||||
@@ -2086,19 +2320,21 @@ def runcmd(
|
|||||||
kill = ka.pop("kill", "t") # [t]ree [m]ain [n]one
|
kill = ka.pop("kill", "t") # [t]ree [m]ain [n]one
|
||||||
capture = ka.pop("capture", 3) # 0=none 1=stdout 2=stderr 3=both
|
capture = ka.pop("capture", 3) # 0=none 1=stdout 2=stderr 3=both
|
||||||
|
|
||||||
sin = ka.pop("sin", None)
|
sin: Optional[bytes] = ka.pop("sin", None)
|
||||||
if sin:
|
if sin:
|
||||||
ka["stdin"] = sp.PIPE
|
ka["stdin"] = sp.PIPE
|
||||||
|
|
||||||
cout = sp.PIPE if capture in [1, 3] else None
|
cout = sp.PIPE if capture in [1, 3] else None
|
||||||
cerr = sp.PIPE if capture in [2, 3] else None
|
cerr = sp.PIPE if capture in [2, 3] else None
|
||||||
|
bout: bytes
|
||||||
|
berr: bytes
|
||||||
|
|
||||||
p = sp.Popen(argv, stdout=cout, stderr=cerr, **ka)
|
p = sp.Popen(argv, stdout=cout, stderr=cerr, **ka)
|
||||||
if not timeout or PY2:
|
if not timeout or PY2:
|
||||||
stdout, stderr = p.communicate(sin)
|
bout, berr = p.communicate(sin)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
stdout, stderr = p.communicate(sin, timeout=timeout)
|
bout, berr = p.communicate(sin, timeout=timeout)
|
||||||
except sp.TimeoutExpired:
|
except sp.TimeoutExpired:
|
||||||
if kill == "n":
|
if kill == "n":
|
||||||
return -18, "", "" # SIGCONT; leave it be
|
return -18, "", "" # SIGCONT; leave it be
|
||||||
@@ -2108,15 +2344,15 @@ def runcmd(
|
|||||||
killtree(p.pid)
|
killtree(p.pid)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
stdout, stderr = p.communicate(timeout=1)
|
bout, berr = p.communicate(timeout=1)
|
||||||
except:
|
except:
|
||||||
stdout = b""
|
bout = b""
|
||||||
stderr = b""
|
berr = b""
|
||||||
|
|
||||||
stdout = stdout.decode("utf-8", "replace") if cout else b""
|
stdout = bout.decode("utf-8", "replace") if cout else ""
|
||||||
stderr = stderr.decode("utf-8", "replace") if cerr else b""
|
stderr = berr.decode("utf-8", "replace") if cerr else ""
|
||||||
|
|
||||||
rc = p.returncode
|
rc: int = p.returncode
|
||||||
if rc is None:
|
if rc is None:
|
||||||
rc = -14 # SIGALRM; failed to kill
|
rc = -14 # SIGALRM; failed to kill
|
||||||
|
|
||||||
@@ -2301,7 +2537,7 @@ def termsize() -> tuple[int, int]:
|
|||||||
def ioctl_GWINSZ(fd: int) -> Optional[tuple[int, int]]:
|
def ioctl_GWINSZ(fd: int) -> Optional[tuple[int, int]]:
|
||||||
try:
|
try:
|
||||||
cr = sunpack(b"hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, b"AAAA"))
|
cr = sunpack(b"hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, b"AAAA"))
|
||||||
return int(cr[1]), int(cr[0])
|
return cr[::-1]
|
||||||
except:
|
except:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -2314,15 +2550,23 @@ def termsize() -> tuple[int, int]:
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if cr:
|
|
||||||
return cr
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return int(env["COLUMNS"]), int(env["LINES"])
|
return cr or (int(env["COLUMNS"]), int(env["LINES"]))
|
||||||
except:
|
except:
|
||||||
return 80, 25
|
return 80, 25
|
||||||
|
|
||||||
|
|
||||||
|
def hidedir(dp) -> None:
|
||||||
|
if ANYWIN:
|
||||||
|
try:
|
||||||
|
k32 = ctypes.WinDLL("kernel32")
|
||||||
|
attrs = k32.GetFileAttributesW(dp)
|
||||||
|
if attrs >= 0:
|
||||||
|
k32.SetFileAttributesW(dp, attrs | 2)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Pebkac(Exception):
|
class Pebkac(Exception):
|
||||||
def __init__(self, code: int, msg: Optional[str] = None) -> None:
|
def __init__(self, code: int, msg: Optional[str] = None) -> None:
|
||||||
super(Pebkac, self).__init__(msg or HTTPCODE[code])
|
super(Pebkac, self).__init__(msg or HTTPCODE[code])
|
||||||
|
|||||||
0
copyparty/web/a/__init__.py
Normal file
0
copyparty/web/a/__init__.py
Normal file
1
copyparty/web/a/partyfuse.py
Symbolic link
1
copyparty/web/a/partyfuse.py
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../bin/partyfuse.py
|
||||||
1
copyparty/web/a/up2k.py
Symbolic link
1
copyparty/web/a/up2k.py
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../bin/up2k.py
|
||||||
1
copyparty/web/a/webdav-cfg.bat
Symbolic link
1
copyparty/web/a/webdav-cfg.bat
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../contrib/webdav-cfg.bat
|
||||||
@@ -28,7 +28,7 @@ window.baguetteBox = (function () {
|
|||||||
touch = {}, // start-pos
|
touch = {}, // start-pos
|
||||||
touchFlag = false, // busy
|
touchFlag = false, // busy
|
||||||
re_i = /.+\.(gif|jpe?g|png|webp)(\?|$)/i,
|
re_i = /.+\.(gif|jpe?g|png|webp)(\?|$)/i,
|
||||||
re_v = /.+\.(webm|mp4)(\?|$)/i,
|
re_v = /.+\.(webm|mkv|mp4)(\?|$)/i,
|
||||||
anims = ['slideIn', 'fadeIn', 'none'],
|
anims = ['slideIn', 'fadeIn', 'none'],
|
||||||
data = {}, // all galleries
|
data = {}, // all galleries
|
||||||
imagesElements = [],
|
imagesElements = [],
|
||||||
@@ -246,12 +246,24 @@ window.baguetteBox = (function () {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function keyDownHandler(e) {
|
function keyDownHandler(e) {
|
||||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing || modal.busy)
|
if (modal.busy)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (e.key == '?')
|
||||||
|
return halp();
|
||||||
|
|
||||||
|
if (anymod(e, true))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
var k = e.code + '', v = vid(), pos = -1;
|
var k = e.code + '', v = vid(), pos = -1;
|
||||||
|
|
||||||
if (k == "ArrowLeft" || k == "KeyJ")
|
if (k == "BracketLeft")
|
||||||
|
setloop(1);
|
||||||
|
else if (k == "BracketRight")
|
||||||
|
setloop(2);
|
||||||
|
else if (e.shiftKey)
|
||||||
|
return;
|
||||||
|
else if (k == "ArrowLeft" || k == "KeyJ")
|
||||||
showPreviousImage();
|
showPreviousImage();
|
||||||
else if (k == "ArrowRight" || k == "KeyL")
|
else if (k == "ArrowRight" || k == "KeyL")
|
||||||
showNextImage();
|
showNextImage();
|
||||||
@@ -289,10 +301,6 @@ window.baguetteBox = (function () {
|
|||||||
rotn(e.shiftKey ? -1 : 1);
|
rotn(e.shiftKey ? -1 : 1);
|
||||||
else if (k == "KeyY")
|
else if (k == "KeyY")
|
||||||
dlpic();
|
dlpic();
|
||||||
else if (k == "BracketLeft")
|
|
||||||
setloop(1);
|
|
||||||
else if (k == "BracketRight")
|
|
||||||
setloop(2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function anim() {
|
function anim() {
|
||||||
@@ -406,7 +414,7 @@ window.baguetteBox = (function () {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function keyUpHandler(e) {
|
function keyUpHandler(e) {
|
||||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing)
|
if (anymod(e))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
var k = e.code + '';
|
var k = e.code + '';
|
||||||
|
|||||||
@@ -857,6 +857,12 @@ html.y #path a:hover {
|
|||||||
color: var(--srv-3);
|
color: var(--srv-3);
|
||||||
border-bottom: 1px solid var(--srv-3b);
|
border-bottom: 1px solid var(--srv-3b);
|
||||||
}
|
}
|
||||||
|
#goh+span {
|
||||||
|
color: var(--bg-u5);
|
||||||
|
padding-left: .5em;
|
||||||
|
margin-left: .5em;
|
||||||
|
border-left: .2em solid var(--bg-u5);
|
||||||
|
}
|
||||||
#repl {
|
#repl {
|
||||||
padding: .33em;
|
padding: .33em;
|
||||||
}
|
}
|
||||||
@@ -1069,18 +1075,18 @@ html.y #widget.open {
|
|||||||
top: -.12em;
|
top: -.12em;
|
||||||
}
|
}
|
||||||
#wtico {
|
#wtico {
|
||||||
cursor: url(/.cpr/dd/4.png), pointer;
|
cursor: url(dd/4.png), pointer;
|
||||||
animation: cursor 500ms;
|
animation: cursor 500ms;
|
||||||
}
|
}
|
||||||
#wtico:hover {
|
#wtico:hover {
|
||||||
animation: cursor 500ms infinite;
|
animation: cursor 500ms infinite;
|
||||||
}
|
}
|
||||||
@keyframes cursor {
|
@keyframes cursor {
|
||||||
0% {cursor: url(/.cpr/dd/2.png), pointer}
|
0% {cursor: url(dd/2.png), pointer}
|
||||||
30% {cursor: url(/.cpr/dd/3.png), pointer}
|
30% {cursor: url(dd/3.png), pointer}
|
||||||
50% {cursor: url(/.cpr/dd/4.png), pointer}
|
50% {cursor: url(dd/4.png), pointer}
|
||||||
75% {cursor: url(/.cpr/dd/5.png), pointer}
|
75% {cursor: url(dd/5.png), pointer}
|
||||||
85% {cursor: url(/.cpr/dd/4.png), pointer}
|
85% {cursor: url(dd/4.png), pointer}
|
||||||
}
|
}
|
||||||
@keyframes spin {
|
@keyframes spin {
|
||||||
100% {transform: rotate(360deg)}
|
100% {transform: rotate(360deg)}
|
||||||
@@ -1095,7 +1101,6 @@ html.y #widget.open {
|
|||||||
#wtoggle {
|
#wtoggle {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
font-size: .8em;
|
|
||||||
top: -1em;
|
top: -1em;
|
||||||
right: 0;
|
right: 0;
|
||||||
height: 1em;
|
height: 1em;
|
||||||
@@ -1177,7 +1182,7 @@ html.y #widget.open {
|
|||||||
font-size: .4em;
|
font-size: .4em;
|
||||||
margin: -.3em .1em;
|
margin: -.3em .1em;
|
||||||
}
|
}
|
||||||
#wtoggle.sel #wzip #selzip {
|
#wtoggle.sel .l1 {
|
||||||
top: -.6em;
|
top: -.6em;
|
||||||
padding: .4em .3em;
|
padding: .4em .3em;
|
||||||
}
|
}
|
||||||
@@ -1221,6 +1226,40 @@ html.y #widget.open {
|
|||||||
width: calc(100% - 10.5em);
|
width: calc(100% - 10.5em);
|
||||||
background: rgba(0,0,0,0.2);
|
background: rgba(0,0,0,0.2);
|
||||||
}
|
}
|
||||||
|
#widget.cmp {
|
||||||
|
height: 1.6em;
|
||||||
|
bottom: -1.6em;
|
||||||
|
}
|
||||||
|
#widget.cmp.open {
|
||||||
|
bottom: 0;
|
||||||
|
}
|
||||||
|
#widget.cmp #wtoggle {
|
||||||
|
font-size: 1.2em;
|
||||||
|
}
|
||||||
|
#widget.cmp #wtgrid {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#widget.cmp #pctl {
|
||||||
|
top: 0;
|
||||||
|
left: 0;
|
||||||
|
font-size: .75em;
|
||||||
|
}
|
||||||
|
#widget.cmp #pctl a {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
#widget.cmp #barpos,
|
||||||
|
#widget.cmp #barbuf {
|
||||||
|
width: calc(100% - 11em);
|
||||||
|
border-radius: 0;
|
||||||
|
left: 5em;
|
||||||
|
top: 0;
|
||||||
|
}
|
||||||
|
#widget.cmp #pvol {
|
||||||
|
top: 0;
|
||||||
|
right: 0;
|
||||||
|
max-width: 5.8em;
|
||||||
|
border-radius: 0;
|
||||||
|
}
|
||||||
.opview {
|
.opview {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
@@ -1344,8 +1383,12 @@ input.eq_gain {
|
|||||||
padding-right: .2em;
|
padding-right: .2em;
|
||||||
text-align: right;
|
text-align: right;
|
||||||
}
|
}
|
||||||
|
#srch_form:not(.tags) #tsrch_tags,
|
||||||
|
#srch_form:not(.tags) #tsrch_adv {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
#op_search input {
|
#op_search input {
|
||||||
margin: 0;
|
margin: .1em 0 0 0;
|
||||||
}
|
}
|
||||||
#srch_q {
|
#srch_q {
|
||||||
white-space: pre;
|
white-space: pre;
|
||||||
@@ -1805,6 +1848,36 @@ a.btn,
|
|||||||
-ms-user-select: none;
|
-ms-user-select: none;
|
||||||
user-select: none;
|
user-select: none;
|
||||||
}
|
}
|
||||||
|
#hkhelp {
|
||||||
|
background: var(--bg);
|
||||||
|
}
|
||||||
|
#hkhelp table {
|
||||||
|
margin: 2em 2em 0 2em;
|
||||||
|
float: left;
|
||||||
|
}
|
||||||
|
#hkhelp th {
|
||||||
|
border-bottom: 1px solid var(--bg-u5);
|
||||||
|
background: var(--bg-u1);
|
||||||
|
font-weight: bold;
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
#hkhelp tr+tr th {
|
||||||
|
border-top: 1.5em solid var(--bg);
|
||||||
|
}
|
||||||
|
#hkhelp td {
|
||||||
|
padding: .2em .3em;
|
||||||
|
}
|
||||||
|
#hkhelp td:first-child {
|
||||||
|
font-family: 'scp', monospace, monospace;
|
||||||
|
}
|
||||||
|
html.noscroll,
|
||||||
|
html.noscroll .sbar {
|
||||||
|
scrollbar-width: none;
|
||||||
|
}
|
||||||
|
html.noscroll::-webkit-scrollbar,
|
||||||
|
html.noscroll .sbar::-webkit-scrollbar {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,11 @@
|
|||||||
<meta charset="utf-8">
|
<meta charset="utf-8">
|
||||||
<title>{{ title }}</title>
|
<title>{{ title }}</title>
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
<meta name="viewport" content="width=device-width, initial-scale=0.8, minimum-scale=0.6">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
{{ html_head }}
|
{{ html_head }}
|
||||||
<link rel="stylesheet" media="screen" href="/.cpr/ui.css?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/ui.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" media="screen" href="/.cpr/browser.css?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/browser.css?_={{ ts }}">
|
||||||
{%- if css %}
|
{%- if css %}
|
||||||
<link rel="stylesheet" media="screen" href="{{ css }}?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ css }}?_={{ ts }}">
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@@ -41,7 +42,7 @@
|
|||||||
<div id="op_mkdir" class="opview opbox act">
|
<div id="op_mkdir" class="opview opbox act">
|
||||||
<form method="post" enctype="multipart/form-data" accept-charset="utf-8" action="{{ url_suf }}">
|
<form method="post" enctype="multipart/form-data" accept-charset="utf-8" action="{{ url_suf }}">
|
||||||
<input type="hidden" name="act" value="mkdir" />
|
<input type="hidden" name="act" value="mkdir" />
|
||||||
📂<input type="text" name="name" class="i">
|
📂<input type="text" name="name" class="i" placeholder="awesome mix vol.1">
|
||||||
<input type="submit" value="make directory">
|
<input type="submit" value="make directory">
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
@@ -49,14 +50,14 @@
|
|||||||
<div id="op_new_md" class="opview opbox">
|
<div id="op_new_md" class="opview opbox">
|
||||||
<form method="post" enctype="multipart/form-data" accept-charset="utf-8" action="{{ url_suf }}">
|
<form method="post" enctype="multipart/form-data" accept-charset="utf-8" action="{{ url_suf }}">
|
||||||
<input type="hidden" name="act" value="new_md" />
|
<input type="hidden" name="act" value="new_md" />
|
||||||
📝<input type="text" name="name" class="i">
|
📝<input type="text" name="name" class="i" placeholder="weekend-plans">
|
||||||
<input type="submit" value="new markdown doc">
|
<input type="submit" value="new markdown doc">
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="op_msg" class="opview opbox act">
|
<div id="op_msg" class="opview opbox act">
|
||||||
<form method="post" enctype="application/x-www-form-urlencoded" accept-charset="utf-8" action="{{ url_suf }}">
|
<form method="post" enctype="application/x-www-form-urlencoded" accept-charset="utf-8" action="{{ url_suf }}">
|
||||||
📟<input type="text" name="msg" class="i">
|
📟<input type="text" name="msg" class="i" placeholder="lorem ipsum dolor sit amet">
|
||||||
<input type="submit" value="send msg to srv log">
|
<input type="submit" value="send msg to srv log">
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
@@ -70,7 +71,7 @@
|
|||||||
<h1 id="path">
|
<h1 id="path">
|
||||||
<a href="#" id="entree">🌲</a>
|
<a href="#" id="entree">🌲</a>
|
||||||
{%- for n in vpnodes %}
|
{%- for n in vpnodes %}
|
||||||
<a href="/{{ n[0] }}">{{ n[1] }}</a>
|
<a href="{{ r }}/{{ n[0] }}">{{ n[1] }}</a>
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
@@ -120,7 +121,7 @@
|
|||||||
|
|
||||||
<div id="epi" class="logue">{{ logues[1] }}</div>
|
<div id="epi" class="logue">{{ logues[1] }}</div>
|
||||||
|
|
||||||
<h2><a href="/?h" id="goh">control-panel</a></h2>
|
<h2><a href="{{ r }}/?h" id="goh">control-panel</a></h2>
|
||||||
|
|
||||||
<a href="#" id="repl">π</a>
|
<a href="#" id="repl">π</a>
|
||||||
|
|
||||||
@@ -133,7 +134,8 @@
|
|||||||
<div id="widget"></div>
|
<div id="widget"></div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
var acct = "{{ acct }}",
|
var SR = {{ r|tojson }},
|
||||||
|
acct = "{{ acct }}",
|
||||||
perms = {{ perms }},
|
perms = {{ perms }},
|
||||||
themes = {{ themes }},
|
themes = {{ themes }},
|
||||||
dtheme = "{{ dtheme }}",
|
dtheme = "{{ dtheme }}",
|
||||||
@@ -159,10 +161,10 @@
|
|||||||
|
|
||||||
document.documentElement.className = localStorage.theme || dtheme;
|
document.documentElement.className = localStorage.theme || dtheme;
|
||||||
</script>
|
</script>
|
||||||
<script src="/.cpr/util.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/util.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/baguettebox.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/baguettebox.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/browser.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/browser.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/up2k.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/up2k.js?_={{ ts }}"></script>
|
||||||
{%- if js %}
|
{%- if js %}
|
||||||
<script src="{{ js }}?_={{ ts }}"></script>
|
<script src="{{ js }}?_={{ ts }}"></script>
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -57,7 +57,7 @@
|
|||||||
<div>{{ logues[1] }}</div><br />
|
<div>{{ logues[1] }}</div><br />
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
<h2><a href="/{{ url_suf }}{{ url_suf and '&' or '?' }}h">control-panel</a></h2>
|
<h2><a href="{{ r }}/{{ url_suf }}{{ url_suf and '&' or '?' }}h">control-panel</a></h2>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|||||||
0
copyparty/web/dd/__init__.py
Normal file
0
copyparty/web/dd/__init__.py
Normal file
0
copyparty/web/deps/__init__.py
Normal file
0
copyparty/web/deps/__init__.py
Normal file
@@ -4,6 +4,12 @@ html, body {
|
|||||||
font-family: sans-serif;
|
font-family: sans-serif;
|
||||||
line-height: 1.5em;
|
line-height: 1.5em;
|
||||||
}
|
}
|
||||||
|
html.y #helpbox a {
|
||||||
|
color: #079;
|
||||||
|
}
|
||||||
|
html.z #helpbox a {
|
||||||
|
color: #fc5;
|
||||||
|
}
|
||||||
#repl {
|
#repl {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
top: 0;
|
top: 0;
|
||||||
|
|||||||
@@ -3,11 +3,12 @@
|
|||||||
<title>📝 {{ title }}</title>
|
<title>📝 {{ title }}</title>
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=0.7">
|
<meta name="viewport" content="width=device-width, initial-scale=0.7">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
{{ html_head }}
|
{{ html_head }}
|
||||||
<link rel="stylesheet" href="/.cpr/ui.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/ui.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" href="/.cpr/md.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/md.css?_={{ ts }}">
|
||||||
{%- if edit %}
|
{%- if edit %}
|
||||||
<link rel="stylesheet" href="/.cpr/md2.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/md2.css?_={{ ts }}">
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
@@ -31,7 +32,7 @@
|
|||||||
{%- else %}
|
{%- else %}
|
||||||
<a href="{{ arg_base }}edit" tt="good: higher performance$Ngood: same document width as viewer$Nbad: assumes you know markdown">edit (basic)</a>
|
<a href="{{ arg_base }}edit" tt="good: higher performance$Ngood: same document width as viewer$Nbad: assumes you know markdown">edit (basic)</a>
|
||||||
<a href="{{ arg_base }}edit2" tt="not in-house so probably less buggy">edit (fancy)</a>
|
<a href="{{ arg_base }}edit2" tt="not in-house so probably less buggy">edit (fancy)</a>
|
||||||
<a href="{{ arg_base }}raw">view raw</a>
|
<a href="{{ arg_base }}">view raw</a>
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
</div>
|
</div>
|
||||||
<div id="toc"></div>
|
<div id="toc"></div>
|
||||||
@@ -127,7 +128,8 @@ write markdown (most html is 🙆 too)
|
|||||||
|
|
||||||
<script>
|
<script>
|
||||||
|
|
||||||
var last_modified = {{ lastmod }},
|
var SR = {{ r|tojson }},
|
||||||
|
last_modified = {{ lastmod }},
|
||||||
have_emp = {{ have_emp|tojson }},
|
have_emp = {{ have_emp|tojson }},
|
||||||
dfavico = "{{ favico }}";
|
dfavico = "{{ favico }}";
|
||||||
|
|
||||||
@@ -152,10 +154,10 @@ l.light = drk? 0:1;
|
|||||||
})();
|
})();
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
<script src="/.cpr/util.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/util.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/deps/marked.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/deps/marked.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/md.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/md.js?_={{ ts }}"></script>
|
||||||
{%- if edit %}
|
{%- if edit %}
|
||||||
<script src="/.cpr/md2.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/md2.js?_={{ ts }}"></script>
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
</body></html>
|
</body></html>
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
"use strict";
|
"use strict";
|
||||||
|
|
||||||
var dom_toc = ebi('toc');
|
var dom_toc = ebi('toc'),
|
||||||
var dom_wrap = ebi('mw');
|
dom_wrap = ebi('mw'),
|
||||||
var dom_hbar = ebi('mh');
|
dom_hbar = ebi('mh'),
|
||||||
var dom_nav = ebi('mn');
|
dom_nav = ebi('mn'),
|
||||||
var dom_pre = ebi('mp');
|
dom_pre = ebi('mp'),
|
||||||
var dom_src = ebi('mt');
|
dom_src = ebi('mt'),
|
||||||
var dom_navtgl = ebi('navtoggle');
|
dom_navtgl = ebi('navtoggle'),
|
||||||
|
hash0 = location.hash;
|
||||||
|
|
||||||
|
|
||||||
// chrome 49 needs this
|
// chrome 49 needs this
|
||||||
@@ -35,12 +36,12 @@ var dbg = function () { };
|
|||||||
|
|
||||||
// add navbar
|
// add navbar
|
||||||
(function () {
|
(function () {
|
||||||
var parts = get_evpath().split('/'), link = '', o;
|
var parts = (get_evpath().slice(0, -1).split('?')[0] + '?v').split('/'), link = '', o;
|
||||||
for (var a = 0, aa = parts.length - 2; a <= aa; a++) {
|
for (var a = 0, aa = parts.length - 1; a <= aa; a++) {
|
||||||
link += parts[a] + (a < aa ? '/' : '');
|
link += parts[a] + (a < aa ? '/' : '');
|
||||||
o = mknod('a');
|
o = mknod('a');
|
||||||
o.setAttribute('href', link);
|
o.setAttribute('href', link);
|
||||||
o.textContent = uricom_dec(parts[a]) || 'top';
|
o.textContent = uricom_dec(parts[a].split('?')[0]) || 'top';
|
||||||
dom_nav.appendChild(o);
|
dom_nav.appendChild(o);
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
@@ -256,7 +257,7 @@ function convert_markdown(md_text, dest_dom) {
|
|||||||
var html = dom_li.innerHTML;
|
var html = dom_li.innerHTML;
|
||||||
dom_li.innerHTML =
|
dom_li.innerHTML =
|
||||||
'<span class="todo_' + clas + '">' + char + '</span>' +
|
'<span class="todo_' + clas + '">' + char + '</span>' +
|
||||||
html.substr(html.indexOf('>') + 1);
|
html.slice(html.indexOf('>') + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// separate <code> for each line in <pre>
|
// separate <code> for each line in <pre>
|
||||||
@@ -328,6 +329,15 @@ function convert_markdown(md_text, dest_dom) {
|
|||||||
catch (ex) {
|
catch (ex) {
|
||||||
md_plug_err(ex, ext[1]);
|
md_plug_err(ex, ext[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hash0)
|
||||||
|
setTimeout(function () {
|
||||||
|
try {
|
||||||
|
QS(hash0).scrollIntoView();
|
||||||
|
hash0 = '';
|
||||||
|
}
|
||||||
|
catch (ex) { }
|
||||||
|
}, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -498,5 +508,5 @@ dom_navtgl.onclick = function () {
|
|||||||
if (sread('hidenav') == 1)
|
if (sread('hidenav') == 1)
|
||||||
dom_navtgl.onclick();
|
dom_navtgl.onclick();
|
||||||
|
|
||||||
if (window['tt'])
|
if (window.tt && tt.init)
|
||||||
tt.init();
|
tt.init();
|
||||||
|
|||||||
@@ -107,7 +107,8 @@ var draw_md = (function () {
|
|||||||
map_src = genmap(dom_ref, map_src);
|
map_src = genmap(dom_ref, map_src);
|
||||||
map_pre = genmap(dom_pre, map_pre);
|
map_pre = genmap(dom_pre, map_pre);
|
||||||
|
|
||||||
clmod(ebi('save'), 'disabled', src == server_md);
|
clmod(ebi('save'), 'disabled',
|
||||||
|
src.replace(/\r/g, "") == server_md.replace(/\r/g, ""));
|
||||||
|
|
||||||
var t1 = Date.now();
|
var t1 = Date.now();
|
||||||
delay = t1 - t0 > 100 ? 25 : 1;
|
delay = t1 - t0 > 100 ? 25 : 1;
|
||||||
@@ -230,7 +231,8 @@ redraw = (function () {
|
|||||||
// modification checker
|
// modification checker
|
||||||
function Modpoll() {
|
function Modpoll() {
|
||||||
var r = {
|
var r = {
|
||||||
skip_one: true,
|
initial: true,
|
||||||
|
skip_one: false,
|
||||||
disabled: false
|
disabled: false
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -253,7 +255,7 @@ function Modpoll() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
console.log('modpoll...');
|
console.log('modpoll...');
|
||||||
var url = (document.location + '').split('?')[0] + '?raw&_=' + Date.now();
|
var url = (document.location + '').split('?')[0] + '?_=' + Date.now();
|
||||||
var xhr = new XHR();
|
var xhr = new XHR();
|
||||||
xhr.open('GET', url, true);
|
xhr.open('GET', url, true);
|
||||||
xhr.responseType = 'text';
|
xhr.responseType = 'text';
|
||||||
@@ -275,8 +277,18 @@ function Modpoll() {
|
|||||||
if (!this.responseText)
|
if (!this.responseText)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
var server_ref = server_md.replace(/\r/g, '');
|
var new_md = this.responseText,
|
||||||
var server_now = this.responseText.replace(/\r/g, '');
|
server_ref = server_md.replace(/\r/g, ''),
|
||||||
|
server_now = new_md.replace(/\r/g, '');
|
||||||
|
|
||||||
|
// firefox bug: sometimes get stale text even if copyparty sent a 200
|
||||||
|
if (r.initial && server_ref != server_now)
|
||||||
|
return modal.confirm('Your browser decided to show an outdated copy of the document!\n\nDo you want to load the latest version from the server instead?', function () {
|
||||||
|
dom_src.value = server_md = new_md;
|
||||||
|
draw_md();
|
||||||
|
}, null);
|
||||||
|
|
||||||
|
r.initial = false;
|
||||||
|
|
||||||
if (server_ref != server_now) {
|
if (server_ref != server_now) {
|
||||||
console.log("modpoll diff |" + server_ref.length + "|, |" + server_now.length + "|");
|
console.log("modpoll diff |" + server_ref.length + "|, |" + server_now.length + "|");
|
||||||
@@ -296,6 +308,7 @@ function Modpoll() {
|
|||||||
console.log('modpoll eq');
|
console.log('modpoll eq');
|
||||||
};
|
};
|
||||||
|
|
||||||
|
setTimeout(r.periodic, 300);
|
||||||
if (md_opt.modpoll_freq > 0)
|
if (md_opt.modpoll_freq > 0)
|
||||||
setInterval(r.periodic, 1000 * md_opt.modpoll_freq);
|
setInterval(r.periodic, 1000 * md_opt.modpoll_freq);
|
||||||
|
|
||||||
@@ -389,7 +402,7 @@ function save_cb() {
|
|||||||
|
|
||||||
function run_savechk(lastmod, txt, btn, ntry) {
|
function run_savechk(lastmod, txt, btn, ntry) {
|
||||||
// download the saved doc from the server and compare
|
// download the saved doc from the server and compare
|
||||||
var url = (document.location + '').split('?')[0] + '?raw&_=' + Date.now();
|
var url = (document.location + '').split('?')[0] + '?_=' + Date.now();
|
||||||
var xhr = new XHR();
|
var xhr = new XHR();
|
||||||
xhr.open('GET', url, true);
|
xhr.open('GET', url, true);
|
||||||
xhr.responseType = 'text';
|
xhr.responseType = 'text';
|
||||||
|
|||||||
@@ -3,11 +3,12 @@
|
|||||||
<title>📝 {{ title }}</title>
|
<title>📝 {{ title }}</title>
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=0.7">
|
<meta name="viewport" content="width=device-width, initial-scale=0.7">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
{{ html_head }}
|
{{ html_head }}
|
||||||
<link rel="stylesheet" href="/.cpr/ui.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/ui.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" href="/.cpr/mde.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/mde.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" href="/.cpr/deps/mini-fa.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/deps/mini-fa.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" href="/.cpr/deps/easymde.css?_={{ ts }}">
|
<link rel="stylesheet" href="{{ r }}/.cpr/deps/easymde.css?_={{ ts }}">
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="mw">
|
<div id="mw">
|
||||||
@@ -25,7 +26,8 @@
|
|||||||
<a href="#" id="repl">π</a>
|
<a href="#" id="repl">π</a>
|
||||||
<script>
|
<script>
|
||||||
|
|
||||||
var last_modified = {{ lastmod }},
|
var SR = {{ r|tojson }},
|
||||||
|
last_modified = {{ lastmod }},
|
||||||
have_emp = {{ have_emp|tojson }},
|
have_emp = {{ have_emp|tojson }},
|
||||||
dfavico = "{{ favico }}";
|
dfavico = "{{ favico }}";
|
||||||
|
|
||||||
@@ -47,8 +49,8 @@ l.light = drk? 0:1;
|
|||||||
})();
|
})();
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
<script src="/.cpr/util.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/util.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/deps/marked.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/deps/marked.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/deps/easymde.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/deps/easymde.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/mde.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/mde.js?_={{ ts }}"></script>
|
||||||
</body></html>
|
</body></html>
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ var dom_md = ebi('mt');
|
|||||||
|
|
||||||
(function () {
|
(function () {
|
||||||
var n = document.location + '';
|
var n = document.location + '';
|
||||||
n = n.substr(n.indexOf('//') + 2).split('?')[0].split('/');
|
n = (n.slice(n.indexOf('//') + 2).split('?')[0] + '?v').split('/');
|
||||||
n[0] = 'top';
|
n[0] = 'top';
|
||||||
var loc = [];
|
var loc = [];
|
||||||
var nav = [];
|
var nav = [];
|
||||||
@@ -15,7 +15,7 @@ var dom_md = ebi('mt');
|
|||||||
if (a > 0)
|
if (a > 0)
|
||||||
loc.push(n[a]);
|
loc.push(n[a]);
|
||||||
|
|
||||||
var dec = uricom_dec(n[a]).replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">");
|
var dec = uricom_dec(n[a].split('?')[0]).replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">");
|
||||||
|
|
||||||
nav.push('<a href="/' + loc.join('/') + '">' + dec + '</a>');
|
nav.push('<a href="/' + loc.join('/') + '">' + dec + '</a>');
|
||||||
}
|
}
|
||||||
@@ -166,7 +166,7 @@ function save_cb() {
|
|||||||
//alert('save OK -- wrote ' + r.size + ' bytes.\n\nsha512: ' + r.sha512);
|
//alert('save OK -- wrote ' + r.size + ' bytes.\n\nsha512: ' + r.sha512);
|
||||||
|
|
||||||
// download the saved doc from the server and compare
|
// download the saved doc from the server and compare
|
||||||
var url = (document.location + '').split('?')[0] + '?raw';
|
var url = (document.location + '').split('?')[0] + '?_=' + Date.now();
|
||||||
var xhr = new XHR();
|
var xhr = new XHR();
|
||||||
xhr.open('GET', url, true);
|
xhr.open('GET', url, true);
|
||||||
xhr.responseType = 'text';
|
xhr.responseType = 'text';
|
||||||
|
|||||||
@@ -6,8 +6,9 @@
|
|||||||
<title>{{ svcname }}</title>
|
<title>{{ svcname }}</title>
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
{{ html_head }}
|
{{ html_head }}
|
||||||
<link rel="stylesheet" media="screen" href="/.cpr/msg.css?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/msg.css?_={{ ts }}">
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<body>
|
<body>
|
||||||
|
|||||||
@@ -10,10 +10,14 @@ html {
|
|||||||
padding: 0 1em 3em 1em;
|
padding: 0 1em 3em 1em;
|
||||||
line-height: 1.3em;
|
line-height: 1.3em;
|
||||||
}
|
}
|
||||||
|
#wrap.w {
|
||||||
|
max-width: 96%;
|
||||||
|
}
|
||||||
h1 {
|
h1 {
|
||||||
border-bottom: 1px solid #ccc;
|
border-bottom: 1px solid #ccc;
|
||||||
margin: 2em 0 .4em 0;
|
margin: 2em 0 .4em 0;
|
||||||
padding: 0 0 .2em 0;
|
padding: 0;
|
||||||
|
line-height: 1em;
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
}
|
}
|
||||||
li {
|
li {
|
||||||
@@ -23,23 +27,30 @@ a {
|
|||||||
color: #047;
|
color: #047;
|
||||||
background: #fff;
|
background: #fff;
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
|
white-space: nowrap;
|
||||||
border-bottom: 1px solid #8ab;
|
border-bottom: 1px solid #8ab;
|
||||||
border-radius: .2em;
|
border-radius: .2em;
|
||||||
padding: .2em .8em;
|
padding: .2em .6em;
|
||||||
|
margin: 0 .3em;
|
||||||
}
|
}
|
||||||
a+a {
|
td a {
|
||||||
margin-left: .5em;
|
margin: 0;
|
||||||
}
|
}
|
||||||
.refresh,
|
.af,
|
||||||
.logout {
|
.logout {
|
||||||
float: right;
|
float: right;
|
||||||
margin: -.2em 0 0 .5em;
|
margin: -.2em 0 0 .8em;
|
||||||
}
|
}
|
||||||
.logout,
|
.logout,
|
||||||
a.r {
|
a.r {
|
||||||
color: #c04;
|
color: #c04;
|
||||||
border-color: #c7a;
|
border-color: #c7a;
|
||||||
}
|
}
|
||||||
|
a.g {
|
||||||
|
color: #2b0;
|
||||||
|
border-color: #3a0;
|
||||||
|
box-shadow: 0 .3em 1em #4c0;
|
||||||
|
}
|
||||||
#repl {
|
#repl {
|
||||||
border: none;
|
border: none;
|
||||||
background: none;
|
background: none;
|
||||||
@@ -64,9 +75,15 @@ table {
|
|||||||
.num td:first-child {
|
.num td:first-child {
|
||||||
text-align: right;
|
text-align: right;
|
||||||
}
|
}
|
||||||
|
.cn {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
.btns {
|
.btns {
|
||||||
margin: 1em 0;
|
margin: 1em 0;
|
||||||
}
|
}
|
||||||
|
.btns>a:first-child {
|
||||||
|
margin-left: 0;
|
||||||
|
}
|
||||||
#msg {
|
#msg {
|
||||||
margin: 3em 0;
|
margin: 3em 0;
|
||||||
}
|
}
|
||||||
@@ -83,6 +100,39 @@ blockquote {
|
|||||||
border-left: .3em solid rgba(128,128,128,0.5);
|
border-left: .3em solid rgba(128,128,128,0.5);
|
||||||
border-radius: 0 0 0 .25em;
|
border-radius: 0 0 0 .25em;
|
||||||
}
|
}
|
||||||
|
pre, code {
|
||||||
|
color: #480;
|
||||||
|
background: #fff;
|
||||||
|
font-family: 'scp', monospace, monospace;
|
||||||
|
border: 1px solid rgba(128,128,128,0.3);
|
||||||
|
border-radius: .2em;
|
||||||
|
padding: .15em .2em;
|
||||||
|
}
|
||||||
|
html.z pre,
|
||||||
|
html.z code {
|
||||||
|
color: #9e0;
|
||||||
|
background: #000;
|
||||||
|
background: rgba(0,16,0,0.2);
|
||||||
|
}
|
||||||
|
.os {
|
||||||
|
line-height: 1.5em;
|
||||||
|
}
|
||||||
|
.sph {
|
||||||
|
margin-top: 4em;
|
||||||
|
}
|
||||||
|
.sph code {
|
||||||
|
margin-left: .3em;
|
||||||
|
}
|
||||||
|
pre b,
|
||||||
|
code b {
|
||||||
|
color: #000;
|
||||||
|
font-weight: normal;
|
||||||
|
text-shadow: 0 0 .2em #0f0;
|
||||||
|
}
|
||||||
|
html.z pre b,
|
||||||
|
html.z code b {
|
||||||
|
color: #fff;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
html.z {
|
html.z {
|
||||||
@@ -102,6 +152,11 @@ html.z a.r {
|
|||||||
background: #804;
|
background: #804;
|
||||||
border-color: #c28;
|
border-color: #c28;
|
||||||
}
|
}
|
||||||
|
html.z a.g {
|
||||||
|
background: #470;
|
||||||
|
border-color: #af4;
|
||||||
|
box-shadow: 0 .3em 1em #7d0;
|
||||||
|
}
|
||||||
html.z input {
|
html.z input {
|
||||||
color: #fff;
|
color: #fff;
|
||||||
background: #626;
|
background: #626;
|
||||||
|
|||||||
@@ -6,19 +6,21 @@
|
|||||||
<title>{{ svcname }}</title>
|
<title>{{ svcname }}</title>
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
{{ html_head }}
|
{{ html_head }}
|
||||||
<link rel="stylesheet" media="screen" href="/.cpr/splash.css?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/splash.css?_={{ ts }}">
|
||||||
<link rel="stylesheet" media="screen" href="/.cpr/ui.css?_={{ ts }}">
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/ui.css?_={{ ts }}">
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<body>
|
<body>
|
||||||
<div id="wrap">
|
<div id="wrap">
|
||||||
<a id="a" href="/?h" class="refresh">refresh</a>
|
<a id="a" href="{{ r }}/?h" class="af">refresh</a>
|
||||||
|
<a id="v" href="{{ r }}/?hc" class="af">connect</a>
|
||||||
|
|
||||||
{%- if this.uname == '*' %}
|
{%- if this.uname == '*' %}
|
||||||
<p id="b">howdy stranger <small>(you're not logged in)</small></p>
|
<p id="b">howdy stranger <small>(you're not logged in)</small></p>
|
||||||
{%- else %}
|
{%- else %}
|
||||||
<a id="c" href="/?pw=x" class="logout">logout</a>
|
<a id="c" href="{{ r }}/?pw=x" class="logout">logout</a>
|
||||||
<p><span id="m">welcome back,</span> <strong>{{ this.uname }}</strong></p>
|
<p><span id="m">welcome back,</span> <strong>{{ this.uname }}</strong></p>
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
@@ -51,8 +53,8 @@
|
|||||||
</table>
|
</table>
|
||||||
</td></tr></table>
|
</td></tr></table>
|
||||||
<div class="btns">
|
<div class="btns">
|
||||||
<a id="d" href="/?stack">dump stack</a>
|
<a id="d" href="{{ r }}/?stack">dump stack</a>
|
||||||
<a id="e" href="/?reload=cfg">reload cfg</a>
|
<a id="e" href="{{ r }}/?reload=cfg">reload cfg</a>
|
||||||
</div>
|
</div>
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
@@ -77,18 +79,18 @@
|
|||||||
<h1 id="cc">client config:</h1>
|
<h1 id="cc">client config:</h1>
|
||||||
<ul>
|
<ul>
|
||||||
{% if k304 %}
|
{% if k304 %}
|
||||||
<li><a id="h" href="/?k304=n">disable k304</a> (currently enabled)
|
<li><a id="h" href="{{ r }}/?k304=n">disable k304</a> (currently enabled)
|
||||||
{%- else %}
|
{%- else %}
|
||||||
<li><a id="i" href="/?k304=y" class="r">enable k304</a> (currently disabled)
|
<li><a id="i" href="{{ r }}/?k304=y" class="r">enable k304</a> (currently disabled)
|
||||||
{% endif %}
|
{% endif %}
|
||||||
<blockquote id="j">enabling this will disconnect your client on every HTTP 304, which can prevent some buggy proxies from getting stuck (suddenly not loading pages), <em>but</em> it will also make things slower in general</blockquote></li>
|
<blockquote id="j">enabling this will disconnect your client on every HTTP 304, which can prevent some buggy proxies from getting stuck (suddenly not loading pages), <em>but</em> it will also make things slower in general</blockquote></li>
|
||||||
|
|
||||||
<li><a id="k" href="/?reset" class="r" onclick="localStorage.clear();return true">reset client settings</a></li>
|
<li><a id="k" href="{{ r }}/?reset" class="r" onclick="localStorage.clear();return true">reset client settings</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<h1 id="l">login for more:</h1>
|
<h1 id="l">login for more:</h1>
|
||||||
<ul>
|
<ul>
|
||||||
<form method="post" enctype="multipart/form-data" action="/{{ qvpath }}">
|
<form method="post" enctype="multipart/form-data" action="{{ r }}/{{ qvpath }}">
|
||||||
<input type="hidden" name="act" value="login" />
|
<input type="hidden" name="act" value="login" />
|
||||||
<input type="password" name="cppwd" />
|
<input type="password" name="cppwd" />
|
||||||
<input type="submit" value="Login" />
|
<input type="submit" value="Login" />
|
||||||
@@ -98,13 +100,14 @@
|
|||||||
<a href="#" id="repl">π</a>
|
<a href="#" id="repl">π</a>
|
||||||
<script>
|
<script>
|
||||||
|
|
||||||
var lang="{{ lang }}",
|
var SR = {{ r|tojson }},
|
||||||
|
lang="{{ lang }}",
|
||||||
dfavico="{{ favico }}";
|
dfavico="{{ favico }}";
|
||||||
|
|
||||||
document.documentElement.className=localStorage.theme||"{{ this.args.theme }}";
|
document.documentElement.className=localStorage.theme||"{{ this.args.theme }}";
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
<script src="/.cpr/util.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/util.js?_={{ ts }}"></script>
|
||||||
<script src="/.cpr/splash.js?_={{ ts }}"></script>
|
<script src="{{ r }}/.cpr/splash.js?_={{ ts }}"></script>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|||||||
@@ -12,23 +12,26 @@ var Ls = {
|
|||||||
"cc1": "klient-konfigurasjon",
|
"cc1": "klient-konfigurasjon",
|
||||||
"h1": "skru av k304",
|
"h1": "skru av k304",
|
||||||
"i1": "skru på k304",
|
"i1": "skru på k304",
|
||||||
"j1": "k304 bryter tilkoplingen for hver HTTP 304. Dette hjelper visse mellomtjenere som kan sette seg fast / plutselig slutter å laste sider, men det reduserer også ytelsen betydelig",
|
"j1": "k304 bryter tilkoplingen for hver HTTP 304. Dette hjelper mot visse mellomtjenere som kan sette seg fast / plutselig slutter å laste sider, men det reduserer også ytelsen betydelig",
|
||||||
"k1": "nullstill innstillinger",
|
"k1": "nullstill innstillinger",
|
||||||
"l1": "logg inn:",
|
"l1": "logg inn:",
|
||||||
"m1": "velkommen tilbake,",
|
"m1": "velkommen tilbake,",
|
||||||
"n1": "404: filen finnes ikke ┐( ´ -`)┌",
|
"n1": "404: filen finnes ikke ┐( ´ -`)┌",
|
||||||
"o1": 'eller kanskje du ikke har tilgang? prøv å logge inn eller <a href="/?h">gå hjem</a>',
|
"o1": 'eller kanskje du ikke har tilgang? prøv å logge inn eller <a href="' + SR + '/?h">gå hjem</a>',
|
||||||
"p1": "403: tilgang nektet ~┻━┻",
|
"p1": "403: tilgang nektet ~┻━┻",
|
||||||
"q1": 'du må logge inn eller <a href="/?h">gå hjem</a>',
|
"q1": 'du må logge inn eller <a href="' + SR + '/?h">gå hjem</a>',
|
||||||
"r1": "gå hjem",
|
"r1": "gå hjem",
|
||||||
".s1": "kartlegg",
|
".s1": "kartlegg",
|
||||||
"t1": "handling",
|
"t1": "handling",
|
||||||
"u2": "tid siden noen sist skrev til serveren$N( opplastning / navneendring / ... )$N$N17d = 17 dager$N1h23 = 1 time 23 minutter$N4m56 = 4 minuter 56 sekunder",
|
"u2": "tid siden noen sist skrev til serveren$N( opplastning / navneendring / ... )$N$N17d = 17 dager$N1h23 = 1 time 23 minutter$N4m56 = 4 minuter 56 sekunder",
|
||||||
|
"v1": "koble til",
|
||||||
|
"v2": "bruk denne serveren som en lokal harddisk$N$NADVARSEL: kommer til å vise passordet ditt!"
|
||||||
},
|
},
|
||||||
"eng": {
|
"eng": {
|
||||||
"d2": "shows the state of all active threads",
|
"d2": "shows the state of all active threads",
|
||||||
"e2": "reload config files (accounts/volumes/volflags),$Nand rescan all e2ds volumes",
|
"e2": "reload config files (accounts/volumes/volflags),$Nand rescan all e2ds volumes",
|
||||||
"u2": "time since the last server write$N( upload / rename / ... )$N$N17d = 17 days$N1h23 = 1 hour 23 minutes$N4m56 = 4 minutes 56 seconds",
|
"u2": "time since the last server write$N( upload / rename / ... )$N$N17d = 17 days$N1h23 = 1 hour 23 minutes$N4m56 = 4 minutes 56 seconds",
|
||||||
|
"v2": "use this server as a local HDD$N$NWARNING: this will show your password!",
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
d = Ls[sread("lang") || lang];
|
d = Ls[sread("lang") || lang];
|
||||||
|
|||||||
201
copyparty/web/svcs.html
Normal file
201
copyparty/web/svcs.html
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>{{ args.doctitle }} @ {{ args.name }}</title>
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=0.8">
|
||||||
|
<meta name="theme-color" content="#333">
|
||||||
|
{{ html_head }}
|
||||||
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/splash.css?_={{ ts }}">
|
||||||
|
<link rel="stylesheet" media="screen" href="{{ r }}/.cpr/ui.css?_={{ ts }}">
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<div id="wrap" class="w">
|
||||||
|
<div class="cn">
|
||||||
|
<p class="btns"><a href="{{ r }}/{{ vp }}">browse files</a> // <a href="{{ r }}/?h">control panel</a></p>
|
||||||
|
<p>or choose your OS for cooler alternatives:</p>
|
||||||
|
<div class="ossel">
|
||||||
|
<a id="swin" href="#">Windows</a>
|
||||||
|
<a id="slin" href="#">Linux</a>
|
||||||
|
<a id="smac" href="#">macOS</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p class="sph">
|
||||||
|
make this server appear on your computer as a regular HDD!<br />
|
||||||
|
pick your favorite below (sorted by performance, best first) and lets 🎉<br />
|
||||||
|
<br />
|
||||||
|
<span class="os win lin mac">placeholders:</span>
|
||||||
|
<span class="os win">
|
||||||
|
{% if accs %}<code><b>{{ pw }}</b></code>=password, {% endif %}<code><b>W:</b></code>=mountpoint
|
||||||
|
</span>
|
||||||
|
<span class="os lin mac">
|
||||||
|
{% if accs %}<code><b>{{ pw }}</b></code>=password, {% endif %}<code><b>mp</b></code>=mountpoint
|
||||||
|
</span>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
{% if not args.no_dav %}
|
||||||
|
<h1>WebDAV</h1>
|
||||||
|
|
||||||
|
<div class="os win">
|
||||||
|
<p><em>note: rclone-FTP is a bit faster, so {% if args.ftp or args.ftps %}try that first{% else %}consider enabling FTP in server settings{% endif %}</em></p>
|
||||||
|
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
|
||||||
|
<pre>
|
||||||
|
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
|
||||||
|
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>W:</b>
|
||||||
|
</pre>
|
||||||
|
{% if s %}
|
||||||
|
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<p>if you want to use the native WebDAV client in windows instead (slow and buggy), first run <a href="{{ r }}/.cpr/a/webdav-cfg.bat">webdav-cfg.bat</a> to remove the 47 MiB filesize limit (also fixes latency and password login), then connect:</p>
|
||||||
|
<pre>
|
||||||
|
net use <b>w:</b> http{{ s }}://{{ ep }}/{{ vp }}{% if accs %} k /user:<b>{{ pw }}</b>{% endif %}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="os lin">
|
||||||
|
<pre>
|
||||||
|
yum install davfs2
|
||||||
|
{% if accs %}printf '%s\n' <b>{{ pw }}</b> k | {% endif %}mount -t davfs -ouid=1000 http{{ s }}://{{ ep }}/{{ vp }} <b>mp</b>
|
||||||
|
</pre>
|
||||||
|
<p>or you can use rclone instead, which is much slower but doesn't require root:</p>
|
||||||
|
<pre>
|
||||||
|
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
|
||||||
|
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>mp</b>
|
||||||
|
</pre>
|
||||||
|
{% if s %}
|
||||||
|
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<p>or the emergency alternative (gnome/gui-only):</p>
|
||||||
|
<!-- gnome-bug: ignores vp -->
|
||||||
|
<pre>
|
||||||
|
{%- if accs %}
|
||||||
|
echo <b>{{ pw }}</b> | gio mount dav{{ s }}://k@{{ ep }}/{{ vp }}
|
||||||
|
{%- else %}
|
||||||
|
gio mount -a dav{{ s }}://{{ ep }}/{{ vp }}
|
||||||
|
{%- endif %}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="os mac">
|
||||||
|
<pre>
|
||||||
|
osascript -e ' mount volume "http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }}" '
|
||||||
|
</pre>
|
||||||
|
<p>or you can open up a Finder, press command-K and paste this instead:</p>
|
||||||
|
<pre>
|
||||||
|
http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }}
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
{% if s %}
|
||||||
|
<p><em>replace <code>https</code> with <code>http</code> if it doesn't work</em></p>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
{% if args.ftp or args.ftps %}
|
||||||
|
<h1>FTP</h1>
|
||||||
|
|
||||||
|
<div class="os win">
|
||||||
|
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
|
||||||
|
<pre>
|
||||||
|
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
|
||||||
|
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>W:</b>
|
||||||
|
</pre>
|
||||||
|
<p>if you want to use the native FTP client in windows instead (please dont), press <code>win+R</code> and run this command:</p>
|
||||||
|
<pre>
|
||||||
|
explorer {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}<b>{{ pw }}</b>:k@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="os lin">
|
||||||
|
<pre>
|
||||||
|
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
|
||||||
|
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>mp</b>
|
||||||
|
</pre>
|
||||||
|
<p>emergency alternative (gnome/gui-only):</p>
|
||||||
|
<!-- gnome-bug: ignores vp -->
|
||||||
|
<pre>
|
||||||
|
{%- if accs %}
|
||||||
|
echo <b>{{ pw }}</b> | gio mount ftp{{ "" if args.ftp else "s" }}://k@{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }}
|
||||||
|
{%- else %}
|
||||||
|
gio mount -a ftp{{ "" if args.ftp else "s" }}://{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }}
|
||||||
|
{%- endif %}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="os mac">
|
||||||
|
<p>note: FTP is read-only on macos; please use WebDAV instead</p>
|
||||||
|
<pre>
|
||||||
|
open {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}k:<b>{{ pw }}</b>@{% else %}anonymous:@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<h1>partyfuse</h1>
|
||||||
|
<p>
|
||||||
|
<a href="{{ r }}/.cpr/a/partyfuse.py">partyfuse.py</a> -- fast, read-only,
|
||||||
|
<span class="os win">needs <a href="https://winfsp.dev/rel/">winfsp</a></span>
|
||||||
|
<span class="os lin">doesn't need root</span>
|
||||||
|
</p>
|
||||||
|
<pre>
|
||||||
|
partyfuse.py{% if accs %} -a <b>{{ pw }}</b>{% endif %} http{{ s }}://{{ ep }}/{{ vp }} <b><span class="os win">W:</span><span class="os lin mac">mp</span></b>
|
||||||
|
</pre>
|
||||||
|
{% if s %}
|
||||||
|
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>-td</code></em></p>
|
||||||
|
{% endif %}
|
||||||
|
<p>
|
||||||
|
you can use <a href="{{ r }}/.cpr/a/up2k.py">up2k.py</a> to upload (sometimes faster than web-browsers)
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
{% if args.smb %}
|
||||||
|
<h1>SMB / CIFS</h1>
|
||||||
|
<em><a href="https://github.com/SecureAuthCorp/impacket/issues/1433">bug:</a> max ~300 files in each folder</em>
|
||||||
|
|
||||||
|
<div class="os win">
|
||||||
|
<pre>
|
||||||
|
net use <b>w:</b> \\{{ host }}\a{% if accs %} k /user:<b>{{ pw }}</b>{% endif %}
|
||||||
|
</pre>
|
||||||
|
<!-- rclone fails due to copyparty-smb bugs -->
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="os lin">
|
||||||
|
<pre>
|
||||||
|
mount -t cifs -o{% if accs %}user=<b>{{ pw }}</b>,pass=k,{% endif %}vers={{ 1 if args.smb1 else 2 }}.0,port={{ args.smb_port }},uid=1000 //{{ host }}/a/ <b>mp</b>
|
||||||
|
</pre>
|
||||||
|
<!-- p>or the emergency alternative (gnome/gui-only):</p nevermind, only works through mdns -->
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<pre class="os mac">
|
||||||
|
open 'smb://<b>{{ pw }}</b>:k@{{ host }}/a'
|
||||||
|
</pre>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<a href="#" id="repl">π</a>
|
||||||
|
<script>
|
||||||
|
|
||||||
|
var SR = {{ r|tojson }},
|
||||||
|
lang="{{ lang }}",
|
||||||
|
dfavico="{{ favico }}";
|
||||||
|
|
||||||
|
document.documentElement.className=localStorage.theme||"{{ args.theme }}";
|
||||||
|
|
||||||
|
</script>
|
||||||
|
<script src="{{ r }}/.cpr/util.js?_={{ ts }}"></script>
|
||||||
|
<script src="{{ r }}/.cpr/svcs.js?_={{ ts }}"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
42
copyparty/web/svcs.js
Normal file
42
copyparty/web/svcs.js
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
function QSA(x) {
|
||||||
|
return document.querySelectorAll(x);
|
||||||
|
}
|
||||||
|
var LINUX = /Linux/.test(navigator.userAgent),
|
||||||
|
MACOS = /[^a-z]mac ?os/i.test(navigator.userAgent),
|
||||||
|
WINDOWS = /Windows/.test(navigator.userAgent);
|
||||||
|
|
||||||
|
|
||||||
|
var oa = QSA('pre');
|
||||||
|
for (var a = 0; a < oa.length; a++) {
|
||||||
|
var html = oa[a].innerHTML,
|
||||||
|
nd = /^ +/.exec(html)[0].length,
|
||||||
|
rd = new RegExp('(^|\r?\n) {' + nd + '}', 'g');
|
||||||
|
|
||||||
|
oa[a].innerHTML = html.replace(rd, '$1').replace(/[ \r\n]+$/, '').replace(/\r?\n/g, '<br />');
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
oa = QSA('.ossel a');
|
||||||
|
for (var a = 0; a < oa.length; a++)
|
||||||
|
oa[a].onclick = esetos;
|
||||||
|
|
||||||
|
function esetos(e) {
|
||||||
|
ev(e);
|
||||||
|
setos(((e && e.target) || (window.event && window.event.srcElement)).id.slice(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
function setos(os) {
|
||||||
|
var oa = QSA('.os');
|
||||||
|
for (var a = 0; a < oa.length; a++)
|
||||||
|
oa[a].style.display = 'none';
|
||||||
|
|
||||||
|
var oa = QSA('.' + os);
|
||||||
|
for (var a = 0; a < oa.length; a++)
|
||||||
|
oa[a].style.display = '';
|
||||||
|
|
||||||
|
oa = QSA('.ossel a');
|
||||||
|
for (var a = 0; a < oa.length; a++)
|
||||||
|
clmod(oa[a], 'g', oa[a].id.slice(1) == os);
|
||||||
|
}
|
||||||
|
|
||||||
|
setos(WINDOWS ? 'win' : LINUX ? 'lin' : MACOS ? 'mac' : 'idk');
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
@font-face {
|
@font-face {
|
||||||
font-family: 'scp';
|
font-family: 'scp';
|
||||||
font-display: swap;
|
font-display: swap;
|
||||||
src: local('Source Code Pro Regular'), local('SourceCodePro-Regular'), url(/.cpr/deps/scp.woff2) format('woff2');
|
src: local('Source Code Pro Regular'), local('SourceCodePro-Regular'), url(deps/scp.woff2) format('woff2');
|
||||||
}
|
}
|
||||||
html {
|
html {
|
||||||
touch-action: manipulation;
|
touch-action: manipulation;
|
||||||
@@ -202,6 +202,7 @@ html.y #tth {
|
|||||||
border: .4em solid var(--fg);
|
border: .4em solid var(--fg);
|
||||||
box-shadow: 0 2em 4em 1em var(--bg-max);
|
box-shadow: 0 2em 4em 1em var(--bg-max);
|
||||||
}
|
}
|
||||||
|
#hkhelp,
|
||||||
#modal {
|
#modal {
|
||||||
position: fixed;
|
position: fixed;
|
||||||
overflow: auto;
|
overflow: auto;
|
||||||
|
|||||||
@@ -779,7 +779,7 @@ function up2k_init(subtle) {
|
|||||||
|
|
||||||
setTimeout(function () {
|
setTimeout(function () {
|
||||||
if (window.WebAssembly && !hws.length)
|
if (window.WebAssembly && !hws.length)
|
||||||
fetch('/.cpr/w.hash.js' + CB);
|
fetch(SR + '/.cpr/w.hash.js' + CB);
|
||||||
}, 1000);
|
}, 1000);
|
||||||
|
|
||||||
function showmodal(msg) {
|
function showmodal(msg) {
|
||||||
@@ -809,7 +809,7 @@ function up2k_init(subtle) {
|
|||||||
m = L.u_https1 + ' <a href="' + (window.location + '').replace(':', 's:') + '">' + L.u_https2 + '</a> ' + L.u_https3;
|
m = L.u_https1 + ' <a href="' + (window.location + '').replace(':', 's:') + '">' + L.u_https2 + '</a> ' + L.u_https3;
|
||||||
|
|
||||||
showmodal('<h1>loading ' + fn + '</h1>');
|
showmodal('<h1>loading ' + fn + '</h1>');
|
||||||
import_js('/.cpr/deps/' + fn, unmodal);
|
import_js(SR + '/.cpr/deps/' + fn, unmodal);
|
||||||
|
|
||||||
if (HTTPS) {
|
if (HTTPS) {
|
||||||
// chrome<37 firefox<34 edge<12 opera<24 safari<7
|
// chrome<37 firefox<34 edge<12 opera<24 safari<7
|
||||||
@@ -865,7 +865,7 @@ function up2k_init(subtle) {
|
|||||||
bcfg_bind(uc, 'turbo', 'u2turbo', turbolvl > 1, draw_turbo);
|
bcfg_bind(uc, 'turbo', 'u2turbo', turbolvl > 1, draw_turbo);
|
||||||
bcfg_bind(uc, 'datechk', 'u2tdate', turbolvl < 3, null);
|
bcfg_bind(uc, 'datechk', 'u2tdate', turbolvl < 3, null);
|
||||||
bcfg_bind(uc, 'az', 'u2sort', u2sort.indexOf('n') + 1, set_u2sort);
|
bcfg_bind(uc, 'az', 'u2sort', u2sort.indexOf('n') + 1, set_u2sort);
|
||||||
bcfg_bind(uc, 'hashw', 'hashw', !!window.WebAssembly && (!subtle || !CHROME || MOBILE), set_hashw);
|
bcfg_bind(uc, 'hashw', 'hashw', !!window.WebAssembly && (!subtle || !CHROME || MOBILE || VCHROME >= 107), set_hashw);
|
||||||
bcfg_bind(uc, 'upnag', 'upnag', false, set_upnag);
|
bcfg_bind(uc, 'upnag', 'upnag', false, set_upnag);
|
||||||
bcfg_bind(uc, 'upsfx', 'upsfx', false);
|
bcfg_bind(uc, 'upsfx', 'upsfx', false);
|
||||||
|
|
||||||
@@ -904,6 +904,16 @@ function up2k_init(subtle) {
|
|||||||
"u": "",
|
"u": "",
|
||||||
"t": ""
|
"t": ""
|
||||||
},
|
},
|
||||||
|
"etaw": {
|
||||||
|
"h": [['', 0, 0, 0]],
|
||||||
|
"u": [['', 0, 0, 0]],
|
||||||
|
"t": [['', 0, 0, 0]]
|
||||||
|
},
|
||||||
|
"etac": {
|
||||||
|
"h": 0,
|
||||||
|
"u": 0,
|
||||||
|
"t": 0
|
||||||
|
},
|
||||||
"car": 0,
|
"car": 0,
|
||||||
"slow_io": null,
|
"slow_io": null,
|
||||||
"oserr": false,
|
"oserr": false,
|
||||||
@@ -928,7 +938,7 @@ function up2k_init(subtle) {
|
|||||||
r.st = st;
|
r.st = st;
|
||||||
r.uc = uc;
|
r.uc = uc;
|
||||||
|
|
||||||
if (!window.File || !File.prototype.slice || !window.FileReader || !window.FileList)
|
if (!window.File || !window.FileReader || !window.FileList || !File.prototype || !File.prototype.slice)
|
||||||
return un2k(L.u_ever);
|
return un2k(L.u_ever);
|
||||||
|
|
||||||
var flag = false;
|
var flag = false;
|
||||||
@@ -938,15 +948,19 @@ function up2k_init(subtle) {
|
|||||||
function nav() {
|
function nav() {
|
||||||
start_actx();
|
start_actx();
|
||||||
|
|
||||||
|
var uf = function () { ebi('file' + fdom_ctr).click(); },
|
||||||
|
ud = function () { ebi('dir' + fdom_ctr).click(); };
|
||||||
|
|
||||||
// too buggy on chrome <= 72
|
// too buggy on chrome <= 72
|
||||||
var m = / Chrome\/([0-9]+)\./.exec(navigator.userAgent);
|
var m = / Chrome\/([0-9]+)\./.exec(navigator.userAgent);
|
||||||
if (m && parseInt(m[1]) < 73)
|
if (m && parseInt(m[1]) < 73)
|
||||||
return ebi('file' + fdom_ctr).click();
|
return uf();
|
||||||
|
|
||||||
modal.confirm(L.u_nav_m,
|
// phones dont support folder upload
|
||||||
function () { ebi('file' + fdom_ctr).click(); },
|
if (MOBILE)
|
||||||
function () { ebi('dir' + fdom_ctr).click(); },
|
return uf();
|
||||||
null, L.u_nav_b);
|
|
||||||
|
modal.confirm(L.u_nav_m, uf, ud, null, L.u_nav_b);
|
||||||
}
|
}
|
||||||
ebi('u2btn').onclick = nav;
|
ebi('u2btn').onclick = nav;
|
||||||
|
|
||||||
@@ -1120,7 +1134,7 @@ function up2k_init(subtle) {
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
var wi = fobj.webkitGetAsEntry();
|
var wi = fobj.getAsEntry ? fobj.getAsEntry() : fobj.webkitGetAsEntry();
|
||||||
if (wi.isDirectory) {
|
if (wi.isDirectory) {
|
||||||
dirs.push(wi);
|
dirs.push(wi);
|
||||||
continue;
|
continue;
|
||||||
@@ -1202,7 +1216,7 @@ function up2k_init(subtle) {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
var name = dn.fullPath;
|
var name = dn.fullPath;
|
||||||
if (name.indexOf('/') === 0)
|
if (name.startsWith('/'))
|
||||||
name = name.slice(1);
|
name = name.slice(1);
|
||||||
|
|
||||||
pf.push(name);
|
pf.push(name);
|
||||||
@@ -1225,7 +1239,16 @@ function up2k_init(subtle) {
|
|||||||
dirs.shift();
|
dirs.shift();
|
||||||
rd = null;
|
rd = null;
|
||||||
}
|
}
|
||||||
return read_dirs(rd, pf, dirs, good, nil, bad, spins);
|
read_dirs(rd, pf, dirs, good, nil, bad, spins);
|
||||||
|
}, function () {
|
||||||
|
var dn = dirs[0],
|
||||||
|
name = dn.fullPath;
|
||||||
|
|
||||||
|
if (name.startsWith('/'))
|
||||||
|
name = name.slice(1);
|
||||||
|
|
||||||
|
bad.push([dn, name + '/']);
|
||||||
|
read_dirs(null, pf, dirs.slice(1), good, nil, bad, spins);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1299,7 +1322,7 @@ function up2k_init(subtle) {
|
|||||||
|
|
||||||
if (window.WebAssembly && !hws.length) {
|
if (window.WebAssembly && !hws.length) {
|
||||||
for (var a = 0; a < Math.min(navigator.hardwareConcurrency || 4, 16); a++)
|
for (var a = 0; a < Math.min(navigator.hardwareConcurrency || 4, 16); a++)
|
||||||
hws.push(new Worker('/.cpr/w.hash.js' + CB));
|
hws.push(new Worker(SR + '/.cpr/w.hash.js' + CB));
|
||||||
|
|
||||||
console.log(hws.length + " hashers");
|
console.log(hws.length + " hashers");
|
||||||
}
|
}
|
||||||
@@ -1467,10 +1490,20 @@ function up2k_init(subtle) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (var a = 0; a < t.length; a++) {
|
for (var a = 0; a < t.length; a++) {
|
||||||
var rem = st.bytes.total - t[a][2],
|
var hid = t[a][0],
|
||||||
bps = t[a][1] / t[a][3],
|
|
||||||
hid = t[a][0],
|
|
||||||
eid = hid.slice(-1),
|
eid = hid.slice(-1),
|
||||||
|
etaw = st.etaw[eid];
|
||||||
|
|
||||||
|
if (st.etac[eid] > 100) { // num chunks
|
||||||
|
st.etac[eid] = 0;
|
||||||
|
etaw.push(jcp(t[a]));
|
||||||
|
if (etaw.length > 5)
|
||||||
|
etaw.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
var h = etaw[0],
|
||||||
|
rem = st.bytes.total - t[a][2],
|
||||||
|
bps = (t[a][1] - h[1]) / Math.max(0.1, t[a][3] - h[3]),
|
||||||
eta = Math.floor(rem / bps);
|
eta = Math.floor(rem / bps);
|
||||||
|
|
||||||
if (t[a][1] < 1024 || t[a][3] < 0.1) {
|
if (t[a][1] < 1024 || t[a][3] < 0.1) {
|
||||||
@@ -1495,7 +1528,7 @@ function up2k_init(subtle) {
|
|||||||
|
|
||||||
st.oserr = true;
|
st.oserr = true;
|
||||||
var msg = HTTPS ? L.u_emtleak3 : L.u_emtleak2.format((window.location + '').replace(':', 's:'));
|
var msg = HTTPS ? L.u_emtleak3 : L.u_emtleak2.format((window.location + '').replace(':', 's:'));
|
||||||
modal.alert(L.u_emtleak1 + msg + L.u_emtleak4 + (CHROME ? L.u_emtleakc : FIREFOX ? L.u_emtleakf : ''));
|
modal.alert(L.u_emtleak1 + msg + (CHROME ? L.u_emtleakc : FIREFOX ? L.u_emtleakf : ''));
|
||||||
}
|
}
|
||||||
|
|
||||||
/////
|
/////
|
||||||
@@ -1800,7 +1833,7 @@ function up2k_init(subtle) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
for (var mul = 1; mul <= 2; mul++) {
|
for (var mul = 1; mul <= 2; mul++) {
|
||||||
var nchunks = Math.ceil(filesize / chunksize);
|
var nchunks = Math.ceil(filesize / chunksize);
|
||||||
if (nchunks <= 256 || chunksize >= 32 * 1024 * 1024)
|
if (nchunks <= 256 || (chunksize >= 32 * 1024 * 1024 && nchunks <= 4096))
|
||||||
return chunksize;
|
return chunksize;
|
||||||
|
|
||||||
chunksize += stepsize;
|
chunksize += stepsize;
|
||||||
@@ -1841,6 +1874,7 @@ function up2k_init(subtle) {
|
|||||||
cdr = Math.min(chunksize + car, t.size);
|
cdr = Math.min(chunksize + car, t.size);
|
||||||
|
|
||||||
st.bytes.hashed += cdr - car;
|
st.bytes.hashed += cdr - car;
|
||||||
|
st.etac.h++;
|
||||||
|
|
||||||
function orz(e) {
|
function orz(e) {
|
||||||
bpend--;
|
bpend--;
|
||||||
@@ -2088,7 +2122,7 @@ function up2k_init(subtle) {
|
|||||||
try { orz(e); } catch (ex) { vis_exh(ex + '', 'up2k.js', '', '', ex); }
|
try { orz(e); } catch (ex) { vis_exh(ex + '', 'up2k.js', '', '', ex); }
|
||||||
};
|
};
|
||||||
|
|
||||||
xhr.open('HEAD', t.purl + uricom_enc(t.name) + '?raw', true);
|
xhr.open('HEAD', t.purl + uricom_enc(t.name), true);
|
||||||
xhr.send();
|
xhr.send();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2284,8 +2318,8 @@ function up2k_init(subtle) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var err_pend = rsp.indexOf('partial upload exists') + 1,
|
var err_pend = rsp.indexOf('partial upload exists at a different') + 1,
|
||||||
err_dupe = rsp.indexOf('file already exists') + 1;
|
err_dupe = rsp.indexOf('upload rejected, file already exists') + 1;
|
||||||
|
|
||||||
if (err_pend || err_dupe) {
|
if (err_pend || err_dupe) {
|
||||||
err = rsp;
|
err = rsp;
|
||||||
@@ -2390,6 +2424,8 @@ function up2k_init(subtle) {
|
|||||||
st.bytes.finished += cdr - car;
|
st.bytes.finished += cdr - car;
|
||||||
st.bytes.uploaded += cdr - car;
|
st.bytes.uploaded += cdr - car;
|
||||||
t.bytes_uploaded += cdr - car;
|
t.bytes_uploaded += cdr - car;
|
||||||
|
st.etac.u++;
|
||||||
|
st.etac.t++;
|
||||||
}
|
}
|
||||||
else if (txt.indexOf('already got that') + 1 ||
|
else if (txt.indexOf('already got that') + 1 ||
|
||||||
txt.indexOf('already being written') + 1) {
|
txt.indexOf('already being written') + 1) {
|
||||||
@@ -2497,7 +2533,7 @@ function up2k_init(subtle) {
|
|||||||
tt.att(QS('#u2conf'));
|
tt.att(QS('#u2conf'));
|
||||||
|
|
||||||
function bumpthread2(e) {
|
function bumpthread2(e) {
|
||||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing)
|
if (anymod(e))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (e.code == 'ArrowUp')
|
if (e.code == 'ArrowUp')
|
||||||
@@ -2571,6 +2607,7 @@ function up2k_init(subtle) {
|
|||||||
el.innerHTML = '<div>' + L.u_life_cfg + '</div><div>' + L.u_life_est + '</div><div id="undor"></div>';
|
el.innerHTML = '<div>' + L.u_life_cfg + '</div><div>' + L.u_life_est + '</div><div id="undor"></div>';
|
||||||
set_life(Math.min(lifetime, icfg_get('lifetime', lifetime)));
|
set_life(Math.min(lifetime, icfg_get('lifetime', lifetime)));
|
||||||
ebi('lifem').oninput = ebi('lifeh').oninput = mod_life;
|
ebi('lifem').oninput = ebi('lifeh').oninput = mod_life;
|
||||||
|
ebi('lifem').onkeydown = ebi('lifeh').onkeydown = kd_life;
|
||||||
tt.att(ebi('u2life'));
|
tt.att(ebi('u2life'));
|
||||||
}
|
}
|
||||||
draw_life();
|
draw_life();
|
||||||
@@ -2596,12 +2633,23 @@ function up2k_init(subtle) {
|
|||||||
set_life(v);
|
set_life(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function kd_life(e) {
|
||||||
|
var el = e.target,
|
||||||
|
d = e.code == 'ArrowUp' ? 1 : e.code == 'ArrowDown' ? -1 : 0;
|
||||||
|
|
||||||
|
if (anymod(e) || !d)
|
||||||
|
return;
|
||||||
|
|
||||||
|
el.value = parseInt(el.value) + d;
|
||||||
|
mod_life(e);
|
||||||
|
}
|
||||||
|
|
||||||
function set_life(v) {
|
function set_life(v) {
|
||||||
//ebi('lifes').value = v;
|
//ebi('lifes').value = v;
|
||||||
ebi('lifem').value = parseInt(v / 60);
|
ebi('lifem').value = parseInt(v / 60);
|
||||||
ebi('lifeh').value = parseInt(v / 3600);
|
ebi('lifeh').value = parseInt(v / 3600);
|
||||||
|
|
||||||
var undo = have_unpost - (v || lifetime);
|
var undo = have_unpost - (v ? lifetime - v : 0);
|
||||||
ebi('undor').innerHTML = undo <= 0 ?
|
ebi('undor').innerHTML = undo <= 0 ?
|
||||||
L.u_unp_ng : L.u_unp_ok.format(lhumantime(undo));
|
L.u_unp_ng : L.u_unp_ok.format(lhumantime(undo));
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,36 @@
|
|||||||
"use strict";
|
"use strict";
|
||||||
|
|
||||||
if (!window['console'])
|
if (!window.console || !console.log)
|
||||||
window['console'] = {
|
window.console = {
|
||||||
"log": function (msg) { }
|
"log": function (msg) { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
var wah = '',
|
var wah = '',
|
||||||
|
L, tt, treectl, thegrid, up2k, asmCrypto, hashwasm, vbar, marked,
|
||||||
CB = '?_=' + Date.now(),
|
CB = '?_=' + Date.now(),
|
||||||
|
R = SR.slice(1),
|
||||||
|
RS = R ? "/" + R : "",
|
||||||
HALFMAX = 8192 * 8192 * 8192 * 8192,
|
HALFMAX = 8192 * 8192 * 8192 * 8192,
|
||||||
HTTPS = (window.location + '').indexOf('https:') === 0,
|
HTTPS = (window.location + '').indexOf('https:') === 0,
|
||||||
TOUCH = 'ontouchstart' in window,
|
TOUCH = 'ontouchstart' in window,
|
||||||
MOBILE = TOUCH,
|
MOBILE = TOUCH,
|
||||||
CHROME = !!window.chrome,
|
CHROME = !!window.chrome,
|
||||||
|
VCHROME = CHROME ? 1 : 0,
|
||||||
FIREFOX = ('netscape' in window) && / rv:/.test(navigator.userAgent),
|
FIREFOX = ('netscape' in window) && / rv:/.test(navigator.userAgent),
|
||||||
IPHONE = TOUCH && /iPhone|iPad|iPod/i.test(navigator.userAgent),
|
IPHONE = TOUCH && /iPhone|iPad|iPod/i.test(navigator.userAgent),
|
||||||
WINDOWS = navigator.platform ? navigator.platform == 'Win32' : /Windows/.test(navigator.userAgent);
|
LINUX = /Linux/.test(navigator.userAgent),
|
||||||
|
MACOS = /[^a-z]mac ?os/i.test(navigator.userAgent),
|
||||||
|
WINDOWS = /Windows/.test(navigator.userAgent);
|
||||||
|
|
||||||
|
if (!window.WebAssembly || !WebAssembly.Memory)
|
||||||
|
window.WebAssembly = false;
|
||||||
|
|
||||||
|
if (!window.Notification || !Notification.permission)
|
||||||
|
window.Notification = false;
|
||||||
|
|
||||||
|
if (!window.FormData)
|
||||||
|
window.FormData = false;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
CB = '?' + document.currentScript.src.split('?').pop();
|
CB = '?' + document.currentScript.src.split('?').pop();
|
||||||
@@ -26,8 +41,13 @@ try {
|
|||||||
if (navigator.userAgentData.platform == 'Windows')
|
if (navigator.userAgentData.platform == 'Windows')
|
||||||
WINDOWS = true;
|
WINDOWS = true;
|
||||||
|
|
||||||
if (navigator.userAgentData.brands.some(function (d) { return d.brand == 'Chromium' }))
|
CHROME = navigator.userAgentData.brands.find(function (d) { return d.brand == 'Chromium' });
|
||||||
CHROME = true;
|
if (CHROME)
|
||||||
|
VCHROME = CHROME.version;
|
||||||
|
else
|
||||||
|
VCHROME = 0;
|
||||||
|
|
||||||
|
CHROME = !!CHROME;
|
||||||
}
|
}
|
||||||
catch (ex) { }
|
catch (ex) { }
|
||||||
|
|
||||||
@@ -38,11 +58,15 @@ var ebi = document.getElementById.bind(document),
|
|||||||
XHR = XMLHttpRequest;
|
XHR = XMLHttpRequest;
|
||||||
|
|
||||||
|
|
||||||
function mknod(et, eid) {
|
function mknod(et, eid, html) {
|
||||||
var ret = document.createElement(et);
|
var ret = document.createElement(et);
|
||||||
|
|
||||||
if (eid)
|
if (eid)
|
||||||
ret.id = eid;
|
ret.id = eid;
|
||||||
|
|
||||||
|
if (html)
|
||||||
|
ret.innerHTML = html;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,7 +157,7 @@ function vis_exh(msg, url, lineNo, columnNo, error) {
|
|||||||
window.onerror = undefined;
|
window.onerror = undefined;
|
||||||
var html = [
|
var html = [
|
||||||
'<h1>you hit a bug!</h1>',
|
'<h1>you hit a bug!</h1>',
|
||||||
'<p style="font-size:1.3em;margin:0">try to <a href="#" onclick="localStorage.clear();location.reload();">reset copyparty settings</a> if you are stuck here, or <a href="#" onclick="ignex();">ignore this</a> / <a href="#" onclick="ignex(true);">ignore all</a></p>',
|
'<p style="font-size:1.3em;margin:0">try to <a href="#" onclick="localStorage.clear();location.reload();">reset copyparty settings</a> if you are stuck here, or <a href="#" onclick="ignex();">ignore this</a> / <a href="#" onclick="ignex(true);">ignore all</a> / <a href="?b=u">basic</a></p>',
|
||||||
'<p style="color:#fff">please send me a screenshot arigathanks gozaimuch: <a href="<ghi>" target="_blank">github issue</a> or <code>ed#2644</code></p>',
|
'<p style="color:#fff">please send me a screenshot arigathanks gozaimuch: <a href="<ghi>" target="_blank">github issue</a> or <code>ed#2644</code></p>',
|
||||||
'<p class="b">' + esc(url + ' @' + lineNo + ':' + columnNo), '<br />' + esc(String(msg)).replace(/\n/g, '<br />') + '</p>',
|
'<p class="b">' + esc(url + ' @' + lineNo + ':' + columnNo), '<br />' + esc(String(msg)).replace(/\n/g, '<br />') + '</p>',
|
||||||
'<p><b>UA:</b> ' + esc(navigator.userAgent + '')
|
'<p><b>UA:</b> ' + esc(navigator.userAgent + '')
|
||||||
@@ -229,6 +253,11 @@ function ctrl(e) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function anymod(e, shift_ok) {
|
||||||
|
return e && (e.ctrlKey || e.altKey || e.metaKey || e.isComposing || (!shift_ok && e.shiftKey));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function ev(e) {
|
function ev(e) {
|
||||||
e = e || window.event;
|
e = e || window.event;
|
||||||
if (!e)
|
if (!e)
|
||||||
@@ -381,13 +410,14 @@ function clgot(el, cls) {
|
|||||||
|
|
||||||
|
|
||||||
var ANIM = true;
|
var ANIM = true;
|
||||||
if (window.matchMedia) {
|
try {
|
||||||
var mq = window.matchMedia('(prefers-reduced-motion: reduce)');
|
var mq = window.matchMedia('(prefers-reduced-motion: reduce)');
|
||||||
mq.onchange = function () {
|
mq.onchange = function () {
|
||||||
ANIM = !mq.matches;
|
ANIM = !mq.matches;
|
||||||
};
|
};
|
||||||
ANIM = !mq.matches;
|
ANIM = !mq.matches;
|
||||||
}
|
}
|
||||||
|
catch (ex) { }
|
||||||
|
|
||||||
|
|
||||||
function yscroll() {
|
function yscroll() {
|
||||||
@@ -747,7 +777,7 @@ function lhumantime(v) {
|
|||||||
var t = shumantime(v, 1),
|
var t = shumantime(v, 1),
|
||||||
tp = t.replace(/([a-z])/g, " $1 ").split(/ /g).slice(0, -1);
|
tp = t.replace(/([a-z])/g, " $1 ").split(/ /g).slice(0, -1);
|
||||||
|
|
||||||
if (!window.L || tp.length < 2 || tp[1].indexOf('$') + 1)
|
if (!L || tp.length < 2 || tp[1].indexOf('$') + 1)
|
||||||
return t;
|
return t;
|
||||||
|
|
||||||
var ret = '';
|
var ret = '';
|
||||||
@@ -1251,8 +1281,8 @@ var modal = (function () {
|
|||||||
tok, tng, prim, sec, ok_cancel;
|
tok, tng, prim, sec, ok_cancel;
|
||||||
|
|
||||||
r.load = function () {
|
r.load = function () {
|
||||||
tok = (window.L && L.m_ok) || 'OK';
|
tok = (L && L.m_ok) || 'OK';
|
||||||
tng = (window.L && L.m_ng) || 'Cancel';
|
tng = (L && L.m_ng) || 'Cancel';
|
||||||
prim = '<a href="#" id="modal-ok">' + tok + '</a>';
|
prim = '<a href="#" id="modal-ok">' + tok + '</a>';
|
||||||
sec = '<a href="#" id="modal-ng">' + tng + '</a>';
|
sec = '<a href="#" id="modal-ng">' + tng + '</a>';
|
||||||
ok_cancel = WINDOWS ? prim + sec : sec + prim;
|
ok_cancel = WINDOWS ? prim + sec : sec + prim;
|
||||||
@@ -1606,13 +1636,32 @@ var favico = (function () {
|
|||||||
})();
|
})();
|
||||||
|
|
||||||
|
|
||||||
|
function cprop(name) {
|
||||||
|
return getComputedStyle(document.documentElement).getPropertyValue(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function bchrome() {
|
||||||
|
console.log(document.documentElement.className);
|
||||||
|
var v, o = QS('meta[name=theme-color]');
|
||||||
|
if (!o)
|
||||||
|
return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
v = cprop('--bg-u3');
|
||||||
|
}
|
||||||
|
catch (ex) { }
|
||||||
|
o.setAttribute('content', v ? v : document.documentElement.className.indexOf('y') + 1 ? '#eee' : '#333');
|
||||||
|
}
|
||||||
|
bchrome();
|
||||||
|
|
||||||
var cf_cha_t = 0;
|
var cf_cha_t = 0;
|
||||||
function xhrchk(xhr, prefix, e404, lvl, tag) {
|
function xhrchk(xhr, prefix, e404, lvl, tag) {
|
||||||
if (xhr.status < 400 && xhr.status >= 200)
|
if (xhr.status < 400 && xhr.status >= 200)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (xhr.status == 403)
|
if (xhr.status == 403)
|
||||||
return toast.err(0, prefix + (window.L && L.xhr403 || "403: access denied\n\ntry pressing F5, maybe you got logged out"), tag);
|
return toast.err(0, prefix + (L && L.xhr403 || "403: access denied\n\ntry pressing F5, maybe you got logged out"), tag);
|
||||||
|
|
||||||
if (xhr.status == 404)
|
if (xhr.status == 404)
|
||||||
return toast.err(0, prefix + e404, tag);
|
return toast.err(0, prefix + e404, tag);
|
||||||
@@ -1631,7 +1680,7 @@ function xhrchk(xhr, prefix, e404, lvl, tag) {
|
|||||||
|
|
||||||
qsr('#cf_frame');
|
qsr('#cf_frame');
|
||||||
var fr = mknod('iframe', 'cf_frame');
|
var fr = mknod('iframe', 'cf_frame');
|
||||||
fr.src = '/?cf_challenge';
|
fr.src = SR + '/?cf_challenge';
|
||||||
document.body.appendChild(fr);
|
document.body.appendChild(fr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ catch (ex) {
|
|||||||
}
|
}
|
||||||
function load_fb() {
|
function load_fb() {
|
||||||
subtle = null;
|
subtle = null;
|
||||||
importScripts('/.cpr/deps/sha512.hw.js');
|
importScripts('deps/sha512.hw.js');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,131 @@
|
|||||||
|
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||||
|
# 2022-1203-2048 `v1.5.1` babel
|
||||||
|
|
||||||
|
named after [that other thing](https://en.wikipedia.org/wiki/Tower_of_Babel), not [the song](https://soundcloud.com/kanaze/babel-dimension-0-remix)
|
||||||
|
* read-only demo server at https://a.ocv.me/pub/demo/
|
||||||
|
|
||||||
|
## new features
|
||||||
|
* new protocols!
|
||||||
|
* native IPv6 support, no longer requiring a reverse-proxy for that
|
||||||
|
* [webdav server](https://github.com/9001/copyparty#webdav-server) -- read/write-access to copyparty straight from windows explorer, macos finder, kde/gnome
|
||||||
|
* [smb/cifs server](https://github.com/9001/copyparty#smb-server) -- extremely buggy and unsafe, for when there is no other choice
|
||||||
|
* [zeroconf](https://github.com/9001/copyparty#zeroconf) -- copyparty announces itself on the LAN, showing up in various file managers
|
||||||
|
* [mdns](https://github.com/9001/copyparty#mdns) -- macos/kde/gnome + makes copyparty available at http://hostname.local/
|
||||||
|
* [ssdp](https://github.com/9001/copyparty#ssdp) -- windows
|
||||||
|
* commands to mount copyparty as a local disk are in the web-UI at control-panel --> `connect`
|
||||||
|
* detect buggy / malicious clients spamming the server with idle connections
|
||||||
|
* first tries to be nice with `Connection: close` (enough to fix windows-webdav)
|
||||||
|
* eventually bans the IP for `--loris` minutes (default: 1 hour)
|
||||||
|
* new arg `--xlink` for cross-volume detection of duplicate files on upload
|
||||||
|
* new arg `--no-snap` to disable upload tracking on restart
|
||||||
|
* will not create `.hist` folders unless required for thumbnails or markdown backups
|
||||||
|
* [config includes](https://github.com/9001/copyparty/blob/hovudstraum/docs/example2.conf) -- split your config across multiple config files
|
||||||
|
* ux improvements
|
||||||
|
* hotkey `?` shows a summary of all the hotkeys
|
||||||
|
* hotkey `Y` to download selected files
|
||||||
|
* position indicator when hovering over the audio scrubber
|
||||||
|
* textlabel on the volume slider
|
||||||
|
* placeholder values in textboxes
|
||||||
|
* options to hide scrollbars, compact media player, follow playing song
|
||||||
|
* phone-specific
|
||||||
|
* buttons for prev/next folder
|
||||||
|
* much better ui for hiding folder columns
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
* now possible to upload files larger than 697 GiB
|
||||||
|
* technically a [breaking change](https://github.com/9001/copyparty#breaking-changes) if you wrote your own up2k client
|
||||||
|
* please let me know if you did because that's awesome
|
||||||
|
* several macos issues due to hardcoded syscall numbers
|
||||||
|
* sfx: fix python 3.12 support (forbids nullbytes in source code)
|
||||||
|
* use ctypes to discover network config -- fixes grapheneos, non-english windows
|
||||||
|
* detect firefox showing stale markdown documents in the editor
|
||||||
|
* detect+ban password bruteforcing on ftp too
|
||||||
|
* http 206 failing on empty files
|
||||||
|
* incorrect header timestamps on non-english locales
|
||||||
|
* remind ftp clients that you cannot cd into an image file -- fixes kde dolphin
|
||||||
|
* ux fixes
|
||||||
|
* uploader survives running into inaccessible folders
|
||||||
|
* middleclick documents in the textviewer sidebar to open in a new tab
|
||||||
|
* playing really long audio files (1 week or more) would spinlock the browser
|
||||||
|
|
||||||
|
## other changes
|
||||||
|
* autodetect max number of clients based on OS limits
|
||||||
|
* `-nc` is probably no longer necessary when running behind a reverse-proxy
|
||||||
|
* allow/try playing mkv files in chrome
|
||||||
|
* markdown documents returned as plaintext unless `?v`
|
||||||
|
* only compress `-lo` logfiles if filename ends with `.xz`
|
||||||
|
* changed sfx compression from bz2 to gz
|
||||||
|
* startup is slightly faster
|
||||||
|
* better compatibility with embedded linux
|
||||||
|
* copyparty64.exe -- 64bit edition for [running inside WinPE](https://user-images.githubusercontent.com/241032/205454984-e6b550df-3c49-486d-9267-1614078dd0dd.png)
|
||||||
|
* which was an actual feature request, believe it or not!
|
||||||
|
* more attempts at avoiding the [firefox fd leak](https://bugzilla.mozilla.org/show_bug.cgi?id=1790500)
|
||||||
|
* if you are uploading many small files and the browser keeps crashing, use chrome instead
|
||||||
|
* or the commandline client, which is now available for download straight from copyparty
|
||||||
|
* control-panel --> `connect` --> `up2k.py`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||||
|
# 2022-1013-1937 `v1.4.6` wav2opus
|
||||||
|
|
||||||
|
* read-only demo server at https://a.ocv.me/pub/demo/
|
||||||
|
* latest gzip edition of the sfx: *This version*
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
* the option to transcode flac to opus while playing audio in the browser was supposed to transcode wav-files as well, instead of being extremely hazardous to mobile data plans (sorry)
|
||||||
|
* `--license` didn't work if copyparty was installed from `pip`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||||
|
# 2022-1009-0919 `v1.4.5` qr-code
|
||||||
|
|
||||||
|
* read-only demo server at https://a.ocv.me/pub/demo/
|
||||||
|
* latest gzip edition of the sfx: [v1.0.14](https://github.com/9001/copyparty/releases/tag/v1.0.14#:~:text=release-specific%20notes)
|
||||||
|
|
||||||
|
## new features
|
||||||
|
* display a server [qr-code](https://github.com/9001/copyparty#qr-code) [(screenshot)](https://user-images.githubusercontent.com/241032/194728533-6f00849b-c6ac-43c6-9359-83e454d11e00.png) on startup
|
||||||
|
* primarily for running copyparty on a phone and accessing it from another
|
||||||
|
* optionally specify a path or password with `--qrl lootbox/?pw=hunter2`
|
||||||
|
* uses the server's exteral ip (default route) unless `--qri` specifies a domain / ip-prefix
|
||||||
|
* classic cp437 `▄` `▀` for space efficiency; some misbehaving terminals / fonts need `--qrz 2`
|
||||||
|
* new permission `G` returns the filekey of uploaded files for users without read-access
|
||||||
|
* when combined with permission `w` and volflag `fk`, uploaded files will not be accessible unless the filekey is provided in the url, and `G` provides the filekey to the uploader unlike `g`
|
||||||
|
* filekeys are added to the unpost listing
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
* renaming / moving folders is now **at least 120x faster**
|
||||||
|
* and that's on nvme drives, so probably like 2000x on HDDs
|
||||||
|
* uploads to volumes with lifetimes could get instapurged depending on browser and browser settings
|
||||||
|
* ux fixes
|
||||||
|
* FINALLY fixed messageboxes appearing offscreen on phones (and some other layout issues)
|
||||||
|
* stop asking about folder-uploads on phones because they dont support it
|
||||||
|
* on android-firefox, default to truncating huge folders with the load-more button due to ff onscroll being buggy
|
||||||
|
* audioplayer looking funky if ffmpeg unavailable
|
||||||
|
* waveform-seekbar cache expiration (the thumbcleaner complaining about png files)
|
||||||
|
* ie11 panic when opening a folder which contains a file named `up2k`
|
||||||
|
* turns out `<a name=foo>` becomes `window.foo` unless that's already declared somewhere in js -- luckily other browsers "only" do that with IDs
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||||
|
# 2022-0926-2037 `v1.4.3` signal in the noise
|
||||||
|
|
||||||
|
* read-only demo server at https://a.ocv.me/pub/demo/
|
||||||
|
* latest gzip edition of the sfx: [v1.0.14](https://github.com/9001/copyparty/releases/tag/v1.0.14#:~:text=release-specific%20notes)
|
||||||
|
|
||||||
|
## new features
|
||||||
|
* `--bak-flips` saves a copy of corrupted / bitflipped up2k uploads
|
||||||
|
* comparing against a good copy can help pinpoint the culprit
|
||||||
|
* also see [tracking bitflips](https://github.com/9001/copyparty/blob/hovudstraum/docs/notes.sh#:~:text=tracking%20bitflips)
|
||||||
|
|
||||||
|
## bugfixes
|
||||||
|
* some edgecases where deleted files didn't get dropped from the db
|
||||||
|
* can reduce performance over time, hitting the filesystem more than necessary
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||||
# 2022-0925-1236 `v1.4.2` fuhgeddaboudit
|
# 2022-0925-1236 `v1.4.2` fuhgeddaboudit
|
||||||
|
|
||||||
|
|||||||
5
docs/copyparty.d/foo/another.conf
Normal file
5
docs/copyparty.d/foo/another.conf
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# this file gets included twice from ../some.conf,
|
||||||
|
# setting user permissions for a volume
|
||||||
|
rw usr1
|
||||||
|
r usr2
|
||||||
|
% sibling.conf
|
||||||
3
docs/copyparty.d/foo/sibling.conf
Normal file
3
docs/copyparty.d/foo/sibling.conf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# and this config file gets included from ./another.conf,
|
||||||
|
# adding a final permission for each of the two volumes in ../some.conf
|
||||||
|
m usr1 usr2
|
||||||
26
docs/copyparty.d/some.conf
Normal file
26
docs/copyparty.d/some.conf
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# lets make two volumes with the same accounts/permissions for both;
|
||||||
|
# first declare the accounts just once:
|
||||||
|
u usr1:passw0rd
|
||||||
|
u usr2:letmein
|
||||||
|
|
||||||
|
# and listen on 127.0.0.1 only, port 2434
|
||||||
|
-i 127.0.0.1
|
||||||
|
-p 2434
|
||||||
|
|
||||||
|
# share /usr/share/games from the server filesystem
|
||||||
|
/usr/share/games
|
||||||
|
/vidya
|
||||||
|
# include config file with volume permissions
|
||||||
|
% foo/another.conf
|
||||||
|
|
||||||
|
# and share your ~/Music folder too
|
||||||
|
~/Music
|
||||||
|
/bangers
|
||||||
|
% foo/another.conf
|
||||||
|
|
||||||
|
# which should result in each of the volumes getting the following permissions:
|
||||||
|
# usr1 read/write/move
|
||||||
|
# usr2 read/move
|
||||||
|
#
|
||||||
|
# because another.conf sets the read/write permissions before it
|
||||||
|
# includes sibling.conf which adds the move permission
|
||||||
286
docs/devnotes.md
Normal file
286
docs/devnotes.md
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
## devnotes toc
|
||||||
|
|
||||||
|
* top
|
||||||
|
* [future plans](#future-plans) - some improvement ideas
|
||||||
|
* [design](#design)
|
||||||
|
* [up2k](#up2k) - quick outline of the up2k protocol
|
||||||
|
* [why chunk-hashes](#why-chunk-hashes) - a single sha512 would be better, right?
|
||||||
|
* [http api](#http-api)
|
||||||
|
* [read](#read)
|
||||||
|
* [write](#write)
|
||||||
|
* [admin](#admin)
|
||||||
|
* [general](#general)
|
||||||
|
* [assumptions](#assumptions)
|
||||||
|
* [mdns](#mdns)
|
||||||
|
* [sfx repack](#sfx-repack) - reduce the size of an sfx by removing features
|
||||||
|
* [building](#building)
|
||||||
|
* [dev env setup](#dev-env-setup)
|
||||||
|
* [just the sfx](#just-the-sfx)
|
||||||
|
* [complete release](#complete-release)
|
||||||
|
* [todo](#todo) - roughly sorted by priority
|
||||||
|
* [discarded ideas](#discarded-ideas)
|
||||||
|
|
||||||
|
|
||||||
|
# future plans
|
||||||
|
|
||||||
|
some improvement ideas
|
||||||
|
|
||||||
|
* the JS is a mess -- a preact rewrite would be nice
|
||||||
|
* preferably without build dependencies like webpack/babel/node.js, maybe a python thing to assemble js files into main.js
|
||||||
|
* good excuse to look at using virtual lists (browsers start to struggle when folders contain over 5000 files)
|
||||||
|
* the UX is a mess -- a proper design would be nice
|
||||||
|
* very organic (much like the python/js), everything was an afterthought
|
||||||
|
* true for both the layout and the visual flair
|
||||||
|
* something like the tron board-room ui (or most other hollywood ones, like ironman) would be :100:
|
||||||
|
* some of the python files are way too big
|
||||||
|
* `up2k.py` ended up doing all the file indexing / db management
|
||||||
|
* `httpcli.py` should be separated into modules in general
|
||||||
|
|
||||||
|
|
||||||
|
# design
|
||||||
|
|
||||||
|
## up2k
|
||||||
|
|
||||||
|
quick outline of the up2k protocol, see [uploading](https://github.com/9001/copyparty#uploading) for the web-client
|
||||||
|
* the up2k client splits a file into an "optimal" number of chunks
|
||||||
|
* 1 MiB each, unless that becomes more than 256 chunks
|
||||||
|
* tries 1.5M, 2M, 3, 4, 6, ... until <= 256 chunks or size >= 32M
|
||||||
|
* client posts the list of hashes, filename, size, last-modified
|
||||||
|
* server creates the `wark`, an identifier for this upload
|
||||||
|
* `sha512( salt + filesize + chunk_hashes )`
|
||||||
|
* and a sparse file is created for the chunks to drop into
|
||||||
|
* client uploads each chunk
|
||||||
|
* header entries for the chunk-hash and wark
|
||||||
|
* server writes chunks into place based on the hash
|
||||||
|
* client does another handshake with the hashlist; server replies with OK or a list of chunks to reupload
|
||||||
|
|
||||||
|
up2k has saved a few uploads from becoming corrupted in-transfer already;
|
||||||
|
* caught an android phone on wifi redhanded in wireshark with a bitflip, however bup with https would *probably* have noticed as well (thanks to tls also functioning as an integrity check)
|
||||||
|
* also stopped someone from uploading because their ram was bad
|
||||||
|
|
||||||
|
regarding the frequent server log message during uploads;
|
||||||
|
`6.0M 106M/s 2.77G 102.9M/s n948 thank 4/0/3/1 10042/7198 00:01:09`
|
||||||
|
* this chunk was `6 MiB`, uploaded at `106 MiB/s`
|
||||||
|
* on this http connection, `2.77 GiB` transferred, `102.9 MiB/s` average, `948` chunks handled
|
||||||
|
* client says `4` uploads OK, `0` failed, `3` busy, `1` queued, `10042 MiB` total size, `7198 MiB` and `00:01:09` left
|
||||||
|
|
||||||
|
## why chunk-hashes
|
||||||
|
|
||||||
|
a single sha512 would be better, right?
|
||||||
|
|
||||||
|
this was due to `crypto.subtle` [not yet](https://github.com/w3c/webcrypto/issues/73) providing a streaming api (or the option to seed the sha512 hasher with a starting hash)
|
||||||
|
|
||||||
|
as a result, the hashes are much less useful than they could have been (search the server by sha512, provide the sha512 in the response http headers, ...)
|
||||||
|
|
||||||
|
however it allows for hashing multiple chunks in parallel, greatly increasing upload speed from fast storage (NVMe, raid-0 and such)
|
||||||
|
|
||||||
|
* both the [browser uploader](https://github.com/9001/copyparty#uploading) and the [commandline one](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) does this now, allowing for fast uploading even from plaintext http
|
||||||
|
|
||||||
|
hashwasm would solve the streaming issue but reduces hashing speed for sha512 (xxh128 does 6 GiB/s), and it would make old browsers and [iphones](https://bugs.webkit.org/show_bug.cgi?id=228552) unsupported
|
||||||
|
|
||||||
|
* blake2 might be a better choice since xxh is non-cryptographic, but that gets ~15 MiB/s on slower androids
|
||||||
|
|
||||||
|
|
||||||
|
# http api
|
||||||
|
|
||||||
|
* table-column `params` = URL parameters; `?foo=bar&qux=...`
|
||||||
|
* table-column `body` = POST payload
|
||||||
|
* method `jPOST` = json post
|
||||||
|
* method `mPOST` = multipart post
|
||||||
|
* method `uPOST` = url-encoded post
|
||||||
|
* `FILE` = conventional HTTP file upload entry (rfc1867 et al, filename in `Content-Disposition`)
|
||||||
|
|
||||||
|
authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
|
||||||
|
|
||||||
|
## read
|
||||||
|
|
||||||
|
| method | params | result |
|
||||||
|
|--|--|--|
|
||||||
|
| GET | `?ls` | list files/folders at URL as JSON |
|
||||||
|
| GET | `?ls&dots` | list files/folders at URL as JSON, including dotfiles |
|
||||||
|
| GET | `?ls=t` | list files/folders at URL as plaintext |
|
||||||
|
| GET | `?ls=v` | list files/folders at URL, terminal-formatted |
|
||||||
|
| GET | `?b` | list files/folders at URL as simplified HTML |
|
||||||
|
| GET | `?tree=.` | list one level of subdirectories inside URL |
|
||||||
|
| GET | `?tree` | list one level of subdirectories for each level until URL |
|
||||||
|
| GET | `?tar` | download everything below URL as a tar file |
|
||||||
|
| GET | `?zip=utf-8` | download everything below URL as a zip file |
|
||||||
|
| GET | `?ups` | show recent uploads from your IP |
|
||||||
|
| GET | `?ups&filter=f` | ...where URL contains `f` |
|
||||||
|
| GET | `?mime=foo` | specify return mimetype `foo` |
|
||||||
|
| GET | `?v` | render markdown file at URL |
|
||||||
|
| GET | `?txt` | get file at URL as plaintext |
|
||||||
|
| GET | `?txt=iso-8859-1` | ...with specific charset |
|
||||||
|
| GET | `?th` | get image/video at URL as thumbnail |
|
||||||
|
| GET | `?th=opus` | convert audio file to 128kbps opus |
|
||||||
|
| GET | `?th=caf` | ...in the iOS-proprietary container |
|
||||||
|
|
||||||
|
| method | body | result |
|
||||||
|
|--|--|--|
|
||||||
|
| jPOST | `{"q":"foo"}` | do a server-wide search; see the `[🔎]` search tab `raw` field for syntax |
|
||||||
|
|
||||||
|
| method | params | body | result |
|
||||||
|
|--|--|--|--|
|
||||||
|
| jPOST | `?tar` | `["foo","bar"]` | download folders `foo` and `bar` inside URL as a tar file |
|
||||||
|
|
||||||
|
## write
|
||||||
|
|
||||||
|
| method | params | result |
|
||||||
|
|--|--|--|
|
||||||
|
| GET | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar |
|
||||||
|
|
||||||
|
| method | params | body | result |
|
||||||
|
|--|--|--|--|
|
||||||
|
| PUT | | (binary data) | upload into file at URL |
|
||||||
|
| PUT | `?gz` | (binary data) | compress with gzip and write into file at URL |
|
||||||
|
| PUT | `?xz` | (binary data) | compress with xz and write into file at URL |
|
||||||
|
| mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL |
|
||||||
|
| mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json |
|
||||||
|
| mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL |
|
||||||
|
| GET | `?delete` | | delete URL recursively |
|
||||||
|
| jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively |
|
||||||
|
| uPOST | | `msg=foo` | send message `foo` into server log |
|
||||||
|
| mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL |
|
||||||
|
|
||||||
|
upload modifiers:
|
||||||
|
|
||||||
|
| http-header | url-param | effect |
|
||||||
|
|--|--|--|
|
||||||
|
| `Accept: url` | `want=url` | return just the file URL |
|
||||||
|
| `Rand: 4` | `rand=4` | generate random filename with 4 characters |
|
||||||
|
| `Life: 30` | `life=30` | delete file after 30 seconds |
|
||||||
|
|
||||||
|
* `life` only has an effect if the volume has a lifetime, and the volume lifetime must be greater than the file's
|
||||||
|
|
||||||
|
* server behavior of `msg` can be reconfigured with `--urlform`
|
||||||
|
|
||||||
|
## admin
|
||||||
|
|
||||||
|
| method | params | result |
|
||||||
|
|--|--|--|
|
||||||
|
| GET | `?reload=cfg` | reload config files and rescan volumes |
|
||||||
|
| GET | `?scan` | initiate a rescan of the volume which provides URL |
|
||||||
|
| GET | `?stack` | show a stacktrace of all threads |
|
||||||
|
|
||||||
|
## general
|
||||||
|
|
||||||
|
| method | params | result |
|
||||||
|
|--|--|--|
|
||||||
|
| GET | `?pw=x` | logout |
|
||||||
|
|
||||||
|
|
||||||
|
# assumptions
|
||||||
|
|
||||||
|
## mdns
|
||||||
|
|
||||||
|
* outgoing replies will always fit in one packet
|
||||||
|
* if a client mentions any of our services, assume it's not missing any
|
||||||
|
* always answer with all services, even if the client only asked for a few
|
||||||
|
* not-impl: probe tiebreaking (too complicated)
|
||||||
|
* not-impl: unicast listen (assume avahi took it)
|
||||||
|
|
||||||
|
|
||||||
|
# sfx repack
|
||||||
|
|
||||||
|
reduce the size of an sfx by removing features
|
||||||
|
|
||||||
|
if you don't need all the features, you can repack the sfx and save a bunch of space; all you need is an sfx and a copy of this repo (nothing else to download or build, except if you're on windows then you need msys2 or WSL)
|
||||||
|
* `393k` size of original sfx.py as of v1.1.3
|
||||||
|
* `310k` after `./scripts/make-sfx.sh re no-cm`
|
||||||
|
* `269k` after `./scripts/make-sfx.sh re no-cm no-hl`
|
||||||
|
|
||||||
|
the features you can opt to drop are
|
||||||
|
* `cm`/easymde, the "fancy" markdown editor, saves ~82k
|
||||||
|
* `hl`, prism, the syntax hilighter, saves ~41k
|
||||||
|
* `fnt`, source-code-pro, the monospace font, saves ~9k
|
||||||
|
* `dd`, the custom mouse cursor for the media player tray tab, saves ~2k
|
||||||
|
|
||||||
|
for the `re`pack to work, first run one of the sfx'es once to unpack it
|
||||||
|
|
||||||
|
**note:** you can also just download and run [/scripts/copyparty-repack.sh](https://github.com/9001/copyparty/blob/hovudstraum/scripts/copyparty-repack.sh) -- this will grab the latest copyparty release from github and do a few repacks; works on linux/macos (and windows with msys2 or WSL)
|
||||||
|
|
||||||
|
|
||||||
|
# building
|
||||||
|
|
||||||
|
## dev env setup
|
||||||
|
|
||||||
|
you need python 3.9 or newer due to type hints
|
||||||
|
|
||||||
|
the rest is mostly optional; if you need a working env for vscode or similar
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python3 -m venv .venv
|
||||||
|
. .venv/bin/activate
|
||||||
|
pip install jinja2 strip_hints # MANDATORY
|
||||||
|
pip install mutagen # audio metadata
|
||||||
|
pip install pyftpdlib # ftp server
|
||||||
|
pip install impacket # smb server -- disable Windows Defender if you REALLY need this on windows
|
||||||
|
pip install Pillow pyheif-pillow-opener pillow-avif-plugin # thumbnails
|
||||||
|
pip install black==21.12b0 click==8.0.2 bandit pylint flake8 isort mypy # vscode tooling
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## just the sfx
|
||||||
|
|
||||||
|
first grab the web-dependencies from a previous sfx (assuming you don't need to modify something in those):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
rm -rf copyparty/web/deps
|
||||||
|
curl -L https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py >x.py
|
||||||
|
python3 x.py --version
|
||||||
|
rm x.py
|
||||||
|
mv /tmp/pe-copyparty/copyparty/web/deps/ copyparty/web/deps/
|
||||||
|
```
|
||||||
|
|
||||||
|
then build the sfx using any of the following examples:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./scripts/make-sfx.sh # regular edition
|
||||||
|
./scripts/make-sfx.sh gz no-cm # gzip-compressed + no fancy markdown editor
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## complete release
|
||||||
|
|
||||||
|
also builds the sfx so skip the sfx section above
|
||||||
|
|
||||||
|
in the `scripts` folder:
|
||||||
|
|
||||||
|
* run `make -C deps-docker` to build all dependencies
|
||||||
|
* run `./rls.sh 1.2.3` which uploads to pypi + creates github release + sfx
|
||||||
|
|
||||||
|
|
||||||
|
# todo
|
||||||
|
|
||||||
|
roughly sorted by priority
|
||||||
|
|
||||||
|
* nothing! currently
|
||||||
|
|
||||||
|
|
||||||
|
## discarded ideas
|
||||||
|
|
||||||
|
* reduce up2k roundtrips
|
||||||
|
* start from a chunk index and just go
|
||||||
|
* terminate client on bad data
|
||||||
|
* not worth the effort, just throw enough conncetions at it
|
||||||
|
* single sha512 across all up2k chunks?
|
||||||
|
* crypto.subtle cannot into streaming, would have to use hashwasm, expensive
|
||||||
|
* separate sqlite table per tag
|
||||||
|
* performance fixed by skipping some indexes (`+mt.k`)
|
||||||
|
* audio fingerprinting
|
||||||
|
* only makes sense if there can be a wasm client and that doesn't exist yet (except for olaf which is agpl hence counts as not existing)
|
||||||
|
* `os.copy_file_range` for up2k cloning
|
||||||
|
* almost never hit this path anyways
|
||||||
|
* up2k partials ui
|
||||||
|
* feels like there isn't much point
|
||||||
|
* cache sha512 chunks on client
|
||||||
|
* too dangerous -- overtaken by turbo mode
|
||||||
|
* comment field
|
||||||
|
* nah
|
||||||
|
* look into android thumbnail cache file format
|
||||||
|
* absolutely not
|
||||||
|
* indexedDB for hashes, cfg enable/clear/sz, 2gb avail, ~9k for 1g, ~4k for 100m, 500k items before autoeviction
|
||||||
|
* blank hashlist when up-ok to skip handshake
|
||||||
|
* too many confusing side-effects
|
||||||
|
* hls framework for Someone Else to drop code into :^)
|
||||||
|
* probably not, too much stuff to consider -- seeking, start at offset, task stitching (probably np-hard), conditional passthru, rate-control (especially multi-consumer), session keepalive, cache mgmt...
|
||||||
13
docs/example2.conf
Normal file
13
docs/example2.conf
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# you can include additional config like this
|
||||||
|
# (the space after the % is important)
|
||||||
|
#
|
||||||
|
# since copyparty.d is a folder, it'll include all *.conf
|
||||||
|
# files inside (not recursively) in alphabetical order
|
||||||
|
# (not necessarily same as numerical/natural order)
|
||||||
|
#
|
||||||
|
# paths are relative from the location of each included file
|
||||||
|
# unless the path is absolute, for example % /etc/copyparty.d
|
||||||
|
#
|
||||||
|
# max include depth is 64
|
||||||
|
|
||||||
|
% copyparty.d
|
||||||
@@ -12,8 +12,20 @@ https://github.com/pallets/markupsafe/
|
|||||||
C: 2010 Pallets
|
C: 2010 Pallets
|
||||||
L: BSD 3-Clause
|
L: BSD 3-Clause
|
||||||
|
|
||||||
|
https://github.com/paulc/dnslib/
|
||||||
|
C: 2010-2017 Paul Chakravarti
|
||||||
|
L: BSD 2-Clause
|
||||||
|
|
||||||
|
https://github.com/pydron/ifaddr/
|
||||||
|
C: 2014 Stefan C. Mueller
|
||||||
|
L: BSD-2-Clause
|
||||||
|
|
||||||
https://github.com/giampaolo/pyftpdlib/
|
https://github.com/giampaolo/pyftpdlib/
|
||||||
C: 2007 Giampaolo Rodola'
|
C: 2007 Giampaolo Rodola
|
||||||
|
L: MIT
|
||||||
|
|
||||||
|
https://github.com/nayuki/QR-Code-generator/
|
||||||
|
C: Project Nayuki
|
||||||
L: MIT
|
L: MIT
|
||||||
|
|
||||||
https://github.com/python/cpython/blob/3.10/Lib/asyncore.py
|
https://github.com/python/cpython/blob/3.10/Lib/asyncore.py
|
||||||
|
|||||||
@@ -67,6 +67,7 @@ mkdir -p "${dirs[@]}"
|
|||||||
for dir in "${dirs[@]}"; do for fn in ふが "$(printf \\xed\\x93)" 'qw,er;ty%20as df?gh+jkl%zxc&vbn <qwe>"rty'"'"'uio&asd fgh'; do echo "$dir" > "$dir/$fn.html"; done; done
|
for dir in "${dirs[@]}"; do for fn in ふが "$(printf \\xed\\x93)" 'qw,er;ty%20as df?gh+jkl%zxc&vbn <qwe>"rty'"'"'uio&asd fgh'; do echo "$dir" > "$dir/$fn.html"; done; done
|
||||||
# qw er+ty%20ui%%20op<as>df&gh&jk#zx'cv"bn`m=qw*er^ty?ui@op,as.df-gh_jk
|
# qw er+ty%20ui%%20op<as>df&gh&jk#zx'cv"bn`m=qw*er^ty?ui@op,as.df-gh_jk
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## upload mojibake
|
## upload mojibake
|
||||||
|
|
||||||
@@ -143,6 +144,17 @@ sqlite3 -readonly up2k.db.key-full 'select w, v from mt where k = "key" order by
|
|||||||
sqlite3 -readonly up2k.db.key-full 'select w, v from mt where k = "key" order by w' > k1; sqlite3 -readonly up2k.db 'select mt.w, mt.v, up.rd, up.fn from mt inner join up on mt.w = substr(up.w,1,16) where mt.k = "key" order by up.rd, up.fn' > k2; ok=0; ng=0; while IFS='|' read w k2 path; do k1="$(grep -E "^$w" k1 | sed -r 's/.*\|//')"; [ "$k1" = "$k2" ] && ok=$((ok+1)) || { ng=$((ng+1)); printf '%3s %3s %s\n' "$k1" "$k2" "$path"; }; done < <(cat k2); echo "match $ok diff $ng"
|
sqlite3 -readonly up2k.db.key-full 'select w, v from mt where k = "key" order by w' > k1; sqlite3 -readonly up2k.db 'select mt.w, mt.v, up.rd, up.fn from mt inner join up on mt.w = substr(up.w,1,16) where mt.k = "key" order by up.rd, up.fn' > k2; ok=0; ng=0; while IFS='|' read w k2 path; do k1="$(grep -E "^$w" k1 | sed -r 's/.*\|//')"; [ "$k1" = "$k2" ] && ok=$((ok+1)) || { ng=$((ng+1)); printf '%3s %3s %s\n' "$k1" "$k2" "$path"; }; done < <(cat k2); echo "match $ok diff $ng"
|
||||||
|
|
||||||
|
|
||||||
|
##
|
||||||
|
## scanning for exceptions
|
||||||
|
|
||||||
|
cd /dev/shm
|
||||||
|
journalctl -aS '720 hour ago' -t python3 -o with-unit --utc | cut -d\ -f2,6- > cpp.log
|
||||||
|
tac cpp.log | awk '/RuntimeError: generator ignored GeneratorExit/{n=1} n{n--;if(n==0)print} 1' | grep 'generator ignored GeneratorExit' -C7 | head -n 100
|
||||||
|
awk '/Exception ignored in: <generator object StreamZip.gen/{s=1;next} /could not create thumbnail/{s=3;next} s{s--;next} 1' <cpp.log | less -R
|
||||||
|
less-search:
|
||||||
|
>: |Exception|Traceback
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## tracking bitflips
|
## tracking bitflips
|
||||||
|
|
||||||
@@ -168,6 +180,7 @@ printf ' %s [%s]\n' $h2 "$(grep -F $h2 <handshakes | head -n 1)"
|
|||||||
# BUT the clients will immediately re-handshake the upload with the same bitflipped hashes, so the uploaders have to refresh their browsers before you do that,
|
# BUT the clients will immediately re-handshake the upload with the same bitflipped hashes, so the uploaders have to refresh their browsers before you do that,
|
||||||
# so maybe just ask them to refresh and do nothing for 6 hours so the timeout kicks in, which deletes the placeholders/name-reservations and you can then manually delete the .PARTIALs at some point later
|
# so maybe just ask them to refresh and do nothing for 6 hours so the timeout kicks in, which deletes the placeholders/name-reservations and you can then manually delete the .PARTIALs at some point later
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## media
|
## media
|
||||||
|
|
||||||
@@ -214,9 +227,6 @@ for d in /usr /var; do find $d -type f -size +30M 2>/dev/null; done | while IFS=
|
|||||||
brew install python@2
|
brew install python@2
|
||||||
pip install virtualenv
|
pip install virtualenv
|
||||||
|
|
||||||
# readme toc
|
|
||||||
cat README.md | awk 'function pr() { if (!h) {return}; if (/^ *[*!#|]/||!s) {printf "%s\n",h;h=0;return}; if (/.../) {printf "%s - %s\n",h,$0;h=0}; }; /^#/{s=1;pr()} /^#* *(install on android|dev env setup|just the sfx|complete release|optional gpl stuff)|`$/{s=0} /^#/{lv=length($1);sub(/[^ ]+ /,"");bab=$0;gsub(/ /,"-",bab); h=sprintf("%" ((lv-1)*4+1) "s [%s](#%s)", "*",$0,bab);next} !h{next} {sub(/ .*/,"");sub(/[:;,]$/,"")} {pr()}' > toc; grep -E '^## readme toc' -B1000 -A2 <README.md >p1; grep -E '^## quickstart' -B2 -A999999 <README.md >p2; (cat p1; grep quickstart -A1000 <toc; cat p2) >README.md; rm p1 p2 toc
|
|
||||||
|
|
||||||
# fix firefox phantom breakpoints,
|
# fix firefox phantom breakpoints,
|
||||||
# suggestions from bugtracker, doesnt work (debugger is not attachable)
|
# suggestions from bugtracker, doesnt work (debugger is not attachable)
|
||||||
devtools settings >> advanced >> enable browser chrome debugging + enable remote debugging
|
devtools settings >> advanced >> enable browser chrome debugging + enable remote debugging
|
||||||
|
|||||||
@@ -4,25 +4,32 @@ speed estimates with server and client on the same win10 machine:
|
|||||||
* `1070 MiB/s` with rclone as both server and client
|
* `1070 MiB/s` with rclone as both server and client
|
||||||
* `570 MiB/s` with rclone-client and `copyparty -ed -j16` as server
|
* `570 MiB/s` with rclone-client and `copyparty -ed -j16` as server
|
||||||
* `220 MiB/s` with rclone-client and `copyparty -ed` as server
|
* `220 MiB/s` with rclone-client and `copyparty -ed` as server
|
||||||
* `100 MiB/s` with [../bin/copyparty-fuse.py](../bin/copyparty-fuse.py) as client
|
* `100 MiB/s` with [../bin/partyfuse.py](../bin/partyfuse.py) as client
|
||||||
|
|
||||||
when server is on another machine (1gbit LAN),
|
when server is on another machine (1gbit LAN),
|
||||||
* `75 MiB/s` with [../bin/copyparty-fuse.py](../bin/copyparty-fuse.py) as client
|
* `75 MiB/s` with [../bin/partyfuse.py](../bin/partyfuse.py) as client
|
||||||
* `92 MiB/s` with rclone-client and `copyparty -ed` as server
|
* `92 MiB/s` with rclone-client and `copyparty -ed` as server
|
||||||
* `103 MiB/s` (connection max) with `copyparty -ed -j16` and all the others
|
* `103 MiB/s` (connection max) with `copyparty -ed -j16` and all the others
|
||||||
|
|
||||||
|
|
||||||
# creating the config file
|
# creating the config file
|
||||||
|
|
||||||
if you want to use password auth, add `headers = Cookie,cppwd=fgsfds` below
|
replace `hunter2` with your password, or remove the `hunter2` lines if you allow anonymous access
|
||||||
|
|
||||||
|
|
||||||
### on windows clients:
|
### on windows clients:
|
||||||
```
|
```
|
||||||
(
|
(
|
||||||
echo [cpp]
|
echo [cpp-rw]
|
||||||
|
echo type = webdav
|
||||||
|
echo vendor = other
|
||||||
|
echo url = http://127.0.0.1:3923/
|
||||||
|
echo headers = Cookie,cppwd=hunter2
|
||||||
|
echo(
|
||||||
|
echo [cpp-ro]
|
||||||
echo type = http
|
echo type = http
|
||||||
echo url = http://127.0.0.1:3923/
|
echo url = http://127.0.0.1:3923/
|
||||||
|
echo headers = Cookie,cppwd=hunter2
|
||||||
) > %userprofile%\.config\rclone\rclone.conf
|
) > %userprofile%\.config\rclone\rclone.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -32,16 +39,26 @@ also install the windows dependencies: [winfsp](https://github.com/billziss-gh/w
|
|||||||
### on unix clients:
|
### on unix clients:
|
||||||
```
|
```
|
||||||
cat > ~/.config/rclone/rclone.conf <<'EOF'
|
cat > ~/.config/rclone/rclone.conf <<'EOF'
|
||||||
[cpp]
|
[cpp-rw]
|
||||||
|
type = webdav
|
||||||
|
vendor = other
|
||||||
|
url = http://127.0.0.1:3923/
|
||||||
|
headers = Cookie,cppwd=hunter2
|
||||||
|
|
||||||
|
[cpp-ro]
|
||||||
type = http
|
type = http
|
||||||
url = http://127.0.0.1:3923/
|
url = http://127.0.0.1:3923/
|
||||||
|
headers = Cookie,cppwd=hunter2
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
# mounting the copyparty server locally
|
# mounting the copyparty server locally
|
||||||
|
|
||||||
|
connect to `cpp-rw:` for read-write, or `cpp-ro:` for read-only (twice as fast):
|
||||||
|
|
||||||
```
|
```
|
||||||
rclone.exe mount --vfs-cache-max-age 5s --attr-timeout 5s --dir-cache-time 5s cpp: Z:
|
rclone.exe mount --vfs-cache-mode writes --vfs-cache-max-age 5s --attr-timeout 5s --dir-cache-time 5s cpp-rw: W:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -51,12 +68,5 @@ feels out of place but is too good not to mention
|
|||||||
|
|
||||||
```
|
```
|
||||||
rclone.exe serve http --read-only .
|
rclone.exe serve http --read-only .
|
||||||
|
rclone.exe serve webdav .
|
||||||
```
|
```
|
||||||
|
|
||||||
* `webdav` gives write-access but `http` is twice as fast
|
|
||||||
* `ftp` is buggy, avoid
|
|
||||||
|
|
||||||
|
|
||||||
# bugs
|
|
||||||
|
|
||||||
* rclone-client throws an exception if you try to read an empty file (should return zero bytes)
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ set -e
|
|||||||
# 23663 copyparty-extras/up2k.py
|
# 23663 copyparty-extras/up2k.py
|
||||||
# `- standalone utility to upload or search for files
|
# `- standalone utility to upload or search for files
|
||||||
#
|
#
|
||||||
# 32280 copyparty-extras/copyparty-fuse.py
|
# 32280 copyparty-extras/partyfuse.py
|
||||||
# `- standalone to mount a URL as a local read-only filesystem
|
# `- standalone to mount a URL as a local read-only filesystem
|
||||||
#
|
#
|
||||||
# 270004 copyparty
|
# 270004 copyparty
|
||||||
@@ -119,7 +119,7 @@ chmod 755 \
|
|||||||
|
|
||||||
# extract the sfx
|
# extract the sfx
|
||||||
( cd copyparty-extras/sfx-full/
|
( cd copyparty-extras/sfx-full/
|
||||||
./copyparty-sfx.py -h
|
./copyparty-sfx.py --version
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -148,7 +148,7 @@ repack sfx-lite "re no-dd no-cm no-hl gz"
|
|||||||
# delete extracted source code
|
# delete extracted source code
|
||||||
( cd copyparty-extras/
|
( cd copyparty-extras/
|
||||||
mv copyparty-*/bin/up2k.py .
|
mv copyparty-*/bin/up2k.py .
|
||||||
mv copyparty-*/bin/copyparty-fuse.py .
|
mv copyparty-*/bin/partyfuse.py .
|
||||||
cp -pv sfx-lite/copyparty-sfx.py ../copyparty
|
cp -pv sfx-lite/copyparty-sfx.py ../copyparty
|
||||||
rm -rf copyparty-{0..9}*.*.*{0..9}
|
rm -rf copyparty-{0..9}*.*.*{0..9}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
FROM alpine:3
|
# TODO easymde embeds codemirror on 3.17 due to new npm probably
|
||||||
|
FROM alpine:3.16
|
||||||
WORKDIR /z
|
WORKDIR /z
|
||||||
ENV ver_asmcrypto=5b994303a9d3e27e0915f72a10b6c2c51535a4dc \
|
ENV ver_asmcrypto=c72492f4a66e17a0e5dd8ad7874de354f3ccdaa5 \
|
||||||
ver_hashwasm=4.9.0 \
|
ver_hashwasm=4.9.0 \
|
||||||
ver_marked=4.0.18 \
|
ver_marked=4.2.3 \
|
||||||
ver_mde=2.18.0 \
|
ver_mde=2.18.0 \
|
||||||
ver_codemirror=5.65.9 \
|
ver_codemirror=5.65.10 \
|
||||||
ver_fontawesome=5.13.0 \
|
ver_fontawesome=5.13.0 \
|
||||||
ver_zopfli=1.0.3
|
ver_zopfli=1.0.3
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
diff --git a/src/Lexer.js b/src/Lexer.js
|
diff --git a/src/Lexer.js b/src/Lexer.js
|
||||||
adds linetracking to marked.js v4.0.17;
|
adds linetracking to marked.js v4.2.3;
|
||||||
add data-ln="%d" to most tags, %d is the source markdown line
|
add data-ln="%d" to most tags, %d is the source markdown line
|
||||||
--- a/src/Lexer.js
|
--- a/src/Lexer.js
|
||||||
+++ b/src/Lexer.js
|
+++ b/src/Lexer.js
|
||||||
@@ -123,20 +123,20 @@ add data-ln="%d" to most tags, %d is the source markdown line
|
|||||||
+ this.ln++;
|
+ this.ln++;
|
||||||
lastToken = tokens[tokens.length - 1];
|
lastToken = tokens[tokens.length - 1];
|
||||||
if (lastToken && lastToken.type === 'text') {
|
if (lastToken && lastToken.type === 'text') {
|
||||||
@@ -365,4 +396,5 @@ export class Lexer {
|
@@ -367,4 +398,5 @@ export class Lexer {
|
||||||
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
|
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
|
||||||
src = src.substring(token.raw.length);
|
src = src.substring(token.raw.length);
|
||||||
+ this.ln = token.ln || this.ln;
|
+ this.ln = token.ln || this.ln;
|
||||||
tokens.push(token);
|
tokens.push(token);
|
||||||
return true;
|
return true;
|
||||||
@@ -430,4 +462,6 @@ export class Lexer {
|
@@ -432,4 +464,6 @@ export class Lexer {
|
||||||
if (token = this.tokenizer.br(src)) {
|
if (token = this.tokenizer.br(src)) {
|
||||||
src = src.substring(token.raw.length);
|
src = src.substring(token.raw.length);
|
||||||
+ // no need to reset (no more blockTokens anyways)
|
+ // no need to reset (no more blockTokens anyways)
|
||||||
+ token.ln = this.ln++;
|
+ token.ln = this.ln++;
|
||||||
tokens.push(token);
|
tokens.push(token);
|
||||||
continue;
|
continue;
|
||||||
@@ -472,4 +506,5 @@ export class Lexer {
|
@@ -474,4 +508,5 @@ export class Lexer {
|
||||||
if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
|
if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
|
||||||
src = src.substring(token.raw.length);
|
src = src.substring(token.raw.length);
|
||||||
+ this.ln = token.ln || this.ln;
|
+ this.ln = token.ln || this.ln;
|
||||||
@@ -234,7 +234,7 @@ index 7c36a75..aa1a53a 100644
|
|||||||
- return '<pre><code class="'
|
- return '<pre><code class="'
|
||||||
+ return '<pre' + this.ln + '><code class="'
|
+ return '<pre' + this.ln + '><code class="'
|
||||||
+ this.options.langPrefix
|
+ this.options.langPrefix
|
||||||
+ escape(lang, true)
|
+ escape(lang)
|
||||||
@@ -43,5 +49,5 @@ export class Renderer {
|
@@ -43,5 +49,5 @@ export class Renderer {
|
||||||
*/
|
*/
|
||||||
blockquote(quote) {
|
blockquote(quote) {
|
||||||
@@ -293,7 +293,7 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
index e8a69b6..2cc772b 100644
|
index e8a69b6..2cc772b 100644
|
||||||
--- a/src/Tokenizer.js
|
--- a/src/Tokenizer.js
|
||||||
+++ b/src/Tokenizer.js
|
+++ b/src/Tokenizer.js
|
||||||
@@ -302,4 +302,7 @@ export class Tokenizer {
|
@@ -312,4 +312,7 @@ export class Tokenizer {
|
||||||
const l = list.items.length;
|
const l = list.items.length;
|
||||||
|
|
||||||
+ // each nested list gets +1 ahead; this hack makes every listgroup -1 but atleast it doesn't get infinitely bad
|
+ // each nested list gets +1 ahead; this hack makes every listgroup -1 but atleast it doesn't get infinitely bad
|
||||||
|
|||||||
@@ -1,35 +1,35 @@
|
|||||||
diff --git a/src/Lexer.js b/src/Lexer.js
|
diff --git a/src/Lexer.js b/src/Lexer.js
|
||||||
--- a/src/Lexer.js
|
--- a/src/Lexer.js
|
||||||
+++ b/src/Lexer.js
|
+++ b/src/Lexer.js
|
||||||
@@ -6,5 +6,5 @@ import { repeatString } from './helpers.js';
|
@@ -7,5 +7,5 @@ import { repeatString } from './helpers.js';
|
||||||
/**
|
|
||||||
* smartypants text replacement
|
* smartypants text replacement
|
||||||
|
* @param {string} text
|
||||||
- */
|
- */
|
||||||
+ *
|
+ *
|
||||||
function smartypants(text) {
|
function smartypants(text) {
|
||||||
return text
|
return text
|
||||||
@@ -27,5 +27,5 @@ function smartypants(text) {
|
@@ -29,5 +29,5 @@ function smartypants(text) {
|
||||||
/**
|
|
||||||
* mangle email addresses
|
* mangle email addresses
|
||||||
|
* @param {string} text
|
||||||
- */
|
- */
|
||||||
+ *
|
+ *
|
||||||
function mangle(text) {
|
function mangle(text) {
|
||||||
let out = '',
|
let out = '',
|
||||||
@@ -466,5 +466,5 @@ export class Lexer {
|
@@ -478,5 +478,5 @@ export class Lexer {
|
||||||
|
|
||||||
// autolink
|
// autolink
|
||||||
- if (token = this.tokenizer.autolink(src, mangle)) {
|
- if (token = this.tokenizer.autolink(src, mangle)) {
|
||||||
+ if (token = this.tokenizer.autolink(src)) {
|
+ if (token = this.tokenizer.autolink(src)) {
|
||||||
src = src.substring(token.raw.length);
|
src = src.substring(token.raw.length);
|
||||||
tokens.push(token);
|
tokens.push(token);
|
||||||
@@ -473,5 +473,5 @@ export class Lexer {
|
@@ -485,5 +485,5 @@ export class Lexer {
|
||||||
|
|
||||||
// url (gfm)
|
// url (gfm)
|
||||||
- if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) {
|
- if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) {
|
||||||
+ if (!this.state.inLink && (token = this.tokenizer.url(src))) {
|
+ if (!this.state.inLink && (token = this.tokenizer.url(src))) {
|
||||||
src = src.substring(token.raw.length);
|
src = src.substring(token.raw.length);
|
||||||
tokens.push(token);
|
tokens.push(token);
|
||||||
@@ -494,5 +494,5 @@ export class Lexer {
|
@@ -506,5 +506,5 @@ export class Lexer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
- if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
|
- if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
|
||||||
@@ -39,15 +39,15 @@ diff --git a/src/Lexer.js b/src/Lexer.js
|
|||||||
diff --git a/src/Renderer.js b/src/Renderer.js
|
diff --git a/src/Renderer.js b/src/Renderer.js
|
||||||
--- a/src/Renderer.js
|
--- a/src/Renderer.js
|
||||||
+++ b/src/Renderer.js
|
+++ b/src/Renderer.js
|
||||||
@@ -142,5 +142,5 @@ export class Renderer {
|
@@ -173,5 +173,5 @@ export class Renderer {
|
||||||
|
*/
|
||||||
link(href, title, text) {
|
link(href, title, text) {
|
||||||
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
||||||
+ href = cleanUrl(this.options.baseUrl, href);
|
+ href = cleanUrl(this.options.baseUrl, href);
|
||||||
if (href === null) {
|
if (href === null) {
|
||||||
return text;
|
return text;
|
||||||
@@ -155,5 +155,5 @@ export class Renderer {
|
@@ -191,5 +191,5 @@ export class Renderer {
|
||||||
|
*/
|
||||||
image(href, title, text) {
|
image(href, title, text) {
|
||||||
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
||||||
+ href = cleanUrl(this.options.baseUrl, href);
|
+ href = cleanUrl(this.options.baseUrl, href);
|
||||||
@@ -56,7 +56,7 @@ diff --git a/src/Renderer.js b/src/Renderer.js
|
|||||||
diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
||||||
--- a/src/Tokenizer.js
|
--- a/src/Tokenizer.js
|
||||||
+++ b/src/Tokenizer.js
|
+++ b/src/Tokenizer.js
|
||||||
@@ -320,14 +320,7 @@ export class Tokenizer {
|
@@ -352,14 +352,7 @@ export class Tokenizer {
|
||||||
type: 'html',
|
type: 'html',
|
||||||
raw: cap[0],
|
raw: cap[0],
|
||||||
- pre: !this.options.sanitizer
|
- pre: !this.options.sanitizer
|
||||||
@@ -65,14 +65,14 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
text: cap[0]
|
text: cap[0]
|
||||||
};
|
};
|
||||||
- if (this.options.sanitize) {
|
- if (this.options.sanitize) {
|
||||||
|
- const text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
|
||||||
- token.type = 'paragraph';
|
- token.type = 'paragraph';
|
||||||
- token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
|
- token.text = text;
|
||||||
- token.tokens = [];
|
- token.tokens = this.lexer.inline(text);
|
||||||
- this.lexer.inline(token.text, token.tokens);
|
|
||||||
- }
|
- }
|
||||||
return token;
|
return token;
|
||||||
}
|
}
|
||||||
@@ -476,15 +469,9 @@ export class Tokenizer {
|
@@ -502,15 +495,9 @@ export class Tokenizer {
|
||||||
|
|
||||||
return {
|
return {
|
||||||
- type: this.options.sanitize
|
- type: this.options.sanitize
|
||||||
@@ -90,7 +90,7 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
+ text: cap[0]
|
+ text: cap[0]
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -671,10 +658,10 @@ export class Tokenizer {
|
@@ -699,10 +686,10 @@ export class Tokenizer {
|
||||||
}
|
}
|
||||||
|
|
||||||
- autolink(src, mangle) {
|
- autolink(src, mangle) {
|
||||||
@@ -103,7 +103,7 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
+ text = escape(cap[1]);
|
+ text = escape(cap[1]);
|
||||||
href = 'mailto:' + text;
|
href = 'mailto:' + text;
|
||||||
} else {
|
} else {
|
||||||
@@ -699,10 +686,10 @@ export class Tokenizer {
|
@@ -727,10 +714,10 @@ export class Tokenizer {
|
||||||
}
|
}
|
||||||
|
|
||||||
- url(src, mangle) {
|
- url(src, mangle) {
|
||||||
@@ -116,7 +116,7 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
+ text = escape(cap[0]);
|
+ text = escape(cap[0]);
|
||||||
href = 'mailto:' + text;
|
href = 'mailto:' + text;
|
||||||
} else {
|
} else {
|
||||||
@@ -736,12 +723,12 @@ export class Tokenizer {
|
@@ -764,12 +751,12 @@ export class Tokenizer {
|
||||||
}
|
}
|
||||||
|
|
||||||
- inlineText(src, smartypants) {
|
- inlineText(src, smartypants) {
|
||||||
@@ -135,7 +135,7 @@ diff --git a/src/Tokenizer.js b/src/Tokenizer.js
|
|||||||
diff --git a/src/defaults.js b/src/defaults.js
|
diff --git a/src/defaults.js b/src/defaults.js
|
||||||
--- a/src/defaults.js
|
--- a/src/defaults.js
|
||||||
+++ b/src/defaults.js
|
+++ b/src/defaults.js
|
||||||
@@ -9,12 +9,8 @@ export function getDefaults() {
|
@@ -10,11 +10,7 @@ export function getDefaults() {
|
||||||
highlight: null,
|
highlight: null,
|
||||||
langPrefix: 'language-',
|
langPrefix: 'language-',
|
||||||
- mangle: true,
|
- mangle: true,
|
||||||
@@ -144,16 +144,15 @@ diff --git a/src/defaults.js b/src/defaults.js
|
|||||||
- sanitize: false,
|
- sanitize: false,
|
||||||
- sanitizer: null,
|
- sanitizer: null,
|
||||||
silent: false,
|
silent: false,
|
||||||
smartLists: false,
|
|
||||||
- smartypants: false,
|
- smartypants: false,
|
||||||
tokenizer: null,
|
tokenizer: null,
|
||||||
walkTokens: null,
|
walkTokens: null,
|
||||||
diff --git a/src/helpers.js b/src/helpers.js
|
diff --git a/src/helpers.js b/src/helpers.js
|
||||||
--- a/src/helpers.js
|
--- a/src/helpers.js
|
||||||
+++ b/src/helpers.js
|
+++ b/src/helpers.js
|
||||||
@@ -64,18 +64,5 @@ export function edit(regex, opt) {
|
@@ -78,18 +78,5 @@ const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
|
||||||
const nonWordAndColonTest = /[^\w:]/g;
|
* @param {string} href
|
||||||
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
|
*/
|
||||||
-export function cleanUrl(sanitize, base, href) {
|
-export function cleanUrl(sanitize, base, href) {
|
||||||
- if (sanitize) {
|
- if (sanitize) {
|
||||||
- let prot;
|
- let prot;
|
||||||
@@ -171,7 +170,7 @@ diff --git a/src/helpers.js b/src/helpers.js
|
|||||||
+export function cleanUrl(base, href) {
|
+export function cleanUrl(base, href) {
|
||||||
if (base && !originIndependentUrl.test(href)) {
|
if (base && !originIndependentUrl.test(href)) {
|
||||||
href = resolveUrl(base, href);
|
href = resolveUrl(base, href);
|
||||||
@@ -227,10 +214,4 @@ export function findClosingBracket(str, b) {
|
@@ -250,10 +237,4 @@ export function findClosingBracket(str, b) {
|
||||||
}
|
}
|
||||||
|
|
||||||
-export function checkSanitizeDeprecation(opt) {
|
-export function checkSanitizeDeprecation(opt) {
|
||||||
@@ -181,7 +180,7 @@ diff --git a/src/helpers.js b/src/helpers.js
|
|||||||
-}
|
-}
|
||||||
-
|
-
|
||||||
// copied from https://stackoverflow.com/a/5450113/806777
|
// copied from https://stackoverflow.com/a/5450113/806777
|
||||||
export function repeatString(pattern, count) {
|
/**
|
||||||
diff --git a/src/marked.js b/src/marked.js
|
diff --git a/src/marked.js b/src/marked.js
|
||||||
--- a/src/marked.js
|
--- a/src/marked.js
|
||||||
+++ b/src/marked.js
|
+++ b/src/marked.js
|
||||||
@@ -197,13 +196,13 @@ diff --git a/src/marked.js b/src/marked.js
|
|||||||
- checkSanitizeDeprecation(opt);
|
- checkSanitizeDeprecation(opt);
|
||||||
|
|
||||||
if (callback) {
|
if (callback) {
|
||||||
@@ -302,5 +300,4 @@ marked.parseInline = function(src, opt) {
|
@@ -318,5 +316,4 @@ marked.parseInline = function(src, opt) {
|
||||||
|
|
||||||
opt = merge({}, marked.defaults, opt || {});
|
opt = merge({}, marked.defaults, opt || {});
|
||||||
- checkSanitizeDeprecation(opt);
|
- checkSanitizeDeprecation(opt);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@@ -311,5 +308,5 @@ marked.parseInline = function(src, opt) {
|
@@ -327,5 +324,5 @@ marked.parseInline = function(src, opt) {
|
||||||
return Parser.parseInline(tokens, opt);
|
return Parser.parseInline(tokens, opt);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
- e.message += '\nPlease report this to https://github.com/markedjs/marked.';
|
- e.message += '\nPlease report this to https://github.com/markedjs/marked.';
|
||||||
@@ -213,42 +212,24 @@ diff --git a/src/marked.js b/src/marked.js
|
|||||||
diff --git a/test/bench.js b/test/bench.js
|
diff --git a/test/bench.js b/test/bench.js
|
||||||
--- a/test/bench.js
|
--- a/test/bench.js
|
||||||
+++ b/test/bench.js
|
+++ b/test/bench.js
|
||||||
@@ -37,5 +37,4 @@ export async function runBench(options) {
|
@@ -39,5 +39,4 @@ export async function runBench(options) {
|
||||||
breaks: false,
|
breaks: false,
|
||||||
pedantic: false,
|
pedantic: false,
|
||||||
- sanitize: false,
|
- sanitize: false
|
||||||
smartLists: false
|
|
||||||
});
|
});
|
||||||
@@ -49,5 +48,4 @@ export async function runBench(options) {
|
if (options.marked) {
|
||||||
|
@@ -50,5 +49,4 @@ export async function runBench(options) {
|
||||||
breaks: false,
|
breaks: false,
|
||||||
pedantic: false,
|
pedantic: false,
|
||||||
- sanitize: false,
|
- sanitize: false
|
||||||
smartLists: false
|
|
||||||
});
|
|
||||||
@@ -62,5 +60,4 @@ export async function runBench(options) {
|
|
||||||
breaks: false,
|
|
||||||
pedantic: false,
|
|
||||||
- sanitize: false,
|
|
||||||
smartLists: false
|
|
||||||
});
|
|
||||||
@@ -74,5 +71,4 @@ export async function runBench(options) {
|
|
||||||
breaks: false,
|
|
||||||
pedantic: false,
|
|
||||||
- sanitize: false,
|
|
||||||
smartLists: false
|
|
||||||
});
|
|
||||||
@@ -87,5 +83,4 @@ export async function runBench(options) {
|
|
||||||
breaks: false,
|
|
||||||
pedantic: true,
|
|
||||||
- sanitize: false,
|
|
||||||
smartLists: false
|
|
||||||
});
|
|
||||||
@@ -99,5 +94,4 @@ export async function runBench(options) {
|
|
||||||
breaks: false,
|
|
||||||
pedantic: true,
|
|
||||||
- sanitize: false,
|
|
||||||
smartLists: false
|
|
||||||
});
|
});
|
||||||
|
if (options.marked) {
|
||||||
|
@@ -61,5 +59,4 @@ export async function runBench(options) {
|
||||||
|
// breaks: false,
|
||||||
|
// pedantic: false,
|
||||||
|
- // sanitize: false
|
||||||
|
// });
|
||||||
|
// if (options.marked) {
|
||||||
diff --git a/test/specs/run-spec.js b/test/specs/run-spec.js
|
diff --git a/test/specs/run-spec.js b/test/specs/run-spec.js
|
||||||
--- a/test/specs/run-spec.js
|
--- a/test/specs/run-spec.js
|
||||||
+++ b/test/specs/run-spec.js
|
+++ b/test/specs/run-spec.js
|
||||||
@@ -269,70 +250,70 @@ diff --git a/test/specs/run-spec.js b/test/specs/run-spec.js
|
|||||||
diff --git a/test/unit/Lexer-spec.js b/test/unit/Lexer-spec.js
|
diff --git a/test/unit/Lexer-spec.js b/test/unit/Lexer-spec.js
|
||||||
--- a/test/unit/Lexer-spec.js
|
--- a/test/unit/Lexer-spec.js
|
||||||
+++ b/test/unit/Lexer-spec.js
|
+++ b/test/unit/Lexer-spec.js
|
||||||
@@ -635,5 +635,5 @@ paragraph
|
@@ -712,5 +712,5 @@ paragraph
|
||||||
});
|
});
|
||||||
|
|
||||||
- it('sanitize', () => {
|
- it('sanitize', () => {
|
||||||
+ /*it('sanitize', () => {
|
+ /*it('sanitize', () => {
|
||||||
expectTokens({
|
expectTokens({
|
||||||
md: '<div>html</div>',
|
md: '<div>html</div>',
|
||||||
@@ -653,5 +653,5 @@ paragraph
|
@@ -730,5 +730,5 @@ paragraph
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
- });
|
- });
|
||||||
+ });*/
|
+ });*/
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -698,5 +698,5 @@ paragraph
|
@@ -810,5 +810,5 @@ paragraph
|
||||||
});
|
});
|
||||||
|
|
||||||
- it('html sanitize', () => {
|
- it('html sanitize', () => {
|
||||||
+ /*it('html sanitize', () => {
|
+ /*it('html sanitize', () => {
|
||||||
expectInlineTokens({
|
expectInlineTokens({
|
||||||
md: '<div>html</div>',
|
md: '<div>html</div>',
|
||||||
@@ -706,5 +706,5 @@ paragraph
|
@@ -818,5 +818,5 @@ paragraph
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
- });
|
- });
|
||||||
+ });*/
|
+ });*/
|
||||||
|
|
||||||
it('link', () => {
|
it('link', () => {
|
||||||
@@ -1017,5 +1017,5 @@ paragraph
|
@@ -1129,5 +1129,5 @@ paragraph
|
||||||
});
|
});
|
||||||
|
|
||||||
- it('autolink mangle email', () => {
|
- it('autolink mangle email', () => {
|
||||||
+ /*it('autolink mangle email', () => {
|
+ /*it('autolink mangle email', () => {
|
||||||
expectInlineTokens({
|
expectInlineTokens({
|
||||||
md: '<test@example.com>',
|
md: '<test@example.com>',
|
||||||
@@ -1037,5 +1037,5 @@ paragraph
|
@@ -1149,5 +1149,5 @@ paragraph
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
- });
|
- });
|
||||||
+ });*/
|
+ });*/
|
||||||
|
|
||||||
it('url', () => {
|
it('url', () => {
|
||||||
@@ -1074,5 +1074,5 @@ paragraph
|
@@ -1186,5 +1186,5 @@ paragraph
|
||||||
});
|
});
|
||||||
|
|
||||||
- it('url mangle email', () => {
|
- it('url mangle email', () => {
|
||||||
+ /*it('url mangle email', () => {
|
+ /*it('url mangle email', () => {
|
||||||
expectInlineTokens({
|
expectInlineTokens({
|
||||||
md: 'test@example.com',
|
md: 'test@example.com',
|
||||||
@@ -1094,5 +1094,5 @@ paragraph
|
@@ -1206,5 +1206,5 @@ paragraph
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
- });
|
- });
|
||||||
+ });*/
|
+ });*/
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -1110,5 +1110,5 @@ paragraph
|
@@ -1222,5 +1222,5 @@ paragraph
|
||||||
});
|
});
|
||||||
|
|
||||||
- describe('smartypants', () => {
|
- describe('smartypants', () => {
|
||||||
+ /*describe('smartypants', () => {
|
+ /*describe('smartypants', () => {
|
||||||
it('single quotes', () => {
|
it('single quotes', () => {
|
||||||
expectInlineTokens({
|
expectInlineTokens({
|
||||||
@@ -1180,5 +1180,5 @@ paragraph
|
@@ -1292,5 +1292,5 @@ paragraph
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
- });
|
- });
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ font-family: 'fa';
|
|||||||
font-style: normal;
|
font-style: normal;
|
||||||
font-weight: 400;
|
font-weight: 400;
|
||||||
font-display: block;
|
font-display: block;
|
||||||
src: url("/.cpr/deps/mini-fa.woff") format("woff");
|
src: url("mini-fa.woff") format("woff");
|
||||||
}
|
}
|
||||||
|
|
||||||
.fa,
|
.fa,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user