Compare commits

...

70 Commits

Author SHA1 Message Date
ed
c2ace91e52 v1.6.0 2023-01-29 02:55:44 +00:00
ed
0bac87c36f make loss of hotkeys more obvious 2023-01-29 01:40:02 +00:00
ed
e650d05939 shovel across most of the env too 2023-01-29 01:19:53 +00:00
ed
85a96e4446 add custom text selection colors because chrome is broken on fedora 2023-01-29 01:03:10 +00:00
ed
2569005139 support sandboxed markdown plugins 2023-01-29 00:57:08 +00:00
ed
c50cb66aef sandboxed other-origin iframes dont cache css 2023-01-28 23:40:25 +00:00
ed
d4c5fca15b sandbox readme.md / prologue / epilogue 2023-01-28 21:24:40 +00:00
ed
75cea4f684 misc 2023-01-28 13:35:49 +00:00
ed
68c6794d33 rewrite other symlinks after the actual move;
fixes volumes where symlinking is disabled
2023-01-28 01:14:29 +00:00
ed
82f98dd54d delete/move is now POST 2023-01-28 01:02:50 +00:00
ed
741d781c18 add cors controls + improve preflight + pw header 2023-01-28 00:59:04 +00:00
ed
0be1e43451 mention mtp in the hooks readme 2023-01-28 00:07:50 +00:00
ed
5366bf22bb describe detected network changes 2023-01-27 23:56:54 +00:00
ed
bcd91b1809 add eventhook examples 2023-01-27 23:55:57 +00:00
ed
9bd5738e6f shorter fallback hostname 2023-01-27 22:19:25 +00:00
ed
bab4aa4c0a mkdir fix 2023-01-27 22:16:10 +00:00
ed
e965b9b9e2 mkdir missing volumes on startup 2023-01-27 21:52:28 +00:00
ed
31101427d3 support downloading blockdev contents 2023-01-27 21:09:57 +00:00
ed
a083dc36ba dont get confused by dangling symlinks at target 2023-01-27 20:27:00 +00:00
ed
9b7b9262aa promote dedup control to volflags 2023-01-25 21:46:15 +00:00
ed
660011fa6e md-editor: make hotkey ^e more global 2023-01-25 20:58:28 +00:00
ed
ead31b6823 add eventhook sanchecks 2023-01-25 20:51:02 +00:00
ed
4310580cd4 separate http/https logins (breaks ie4 / win3.11 login) 2023-01-24 21:23:57 +00:00
ed
b005acbfda enable text selection between breadcrumbs + update vs 2023-01-23 22:44:29 +00:00
ed
460709e6f3 upgrade wget downloader to use event hooks 2023-01-22 23:45:11 +00:00
ed
a8768d05a9 add comparison to similar software 2023-01-22 23:39:19 +00:00
ed
f8e3e87a52 add event hooks 2023-01-22 23:35:31 +00:00
ed
70f1642d0d allow tar/zip download of hidden folders 2023-01-21 20:56:44 +00:00
ed
3fc7561da4 macos 2023-01-21 10:36:31 +00:00
ed
9065226c3d oh great its in lts too 2023-01-21 10:19:04 +00:00
ed
b7e321fa47 cleanup 2023-01-19 22:26:49 +00:00
ed
664665b86b fix some location-rproxy bugs 2023-01-19 22:26:24 +00:00
ed
f4f362b7a4 add --freebind 2023-01-18 21:55:36 +00:00
ed
577d23f460 zeroconf: detect network change and reannounce 2023-01-18 21:27:27 +00:00
ed
504e168486 compensate avg.speed for single-chunk uploads 2023-01-18 19:53:19 +00:00
ed
f2f9640371 workaround firefox layout bug:
three-line toasts get a scrollbar even if it doesn't need one
and the width is not adjusted correctly when that happens
2023-01-18 19:45:04 +00:00
ed
ee46f832b1 u2cli: add option -ns for slow terminals 2023-01-17 23:29:51 +00:00
ed
b0e755d410 give curl colored (yet sortable) plaintext listings 2023-01-17 23:22:43 +00:00
ed
cfd24604d5 ux tweaks 2023-01-17 23:21:31 +00:00
ed
264894e595 add cursed usecases 2023-01-16 21:46:11 +00:00
ed
5bb9f56247 linux 6.1 fixed the 6.0 bugs; remove workarounds 2023-01-16 20:44:57 +00:00
ed
18942ed066 location-based rproxy fixes 2023-01-16 20:09:45 +00:00
ed
85321a6f31 stale tree is better than no tree 2023-01-15 20:54:03 +00:00
ed
baf641396d add optional powered-by footnode 2023-01-15 20:52:38 +00:00
ed
17c91e7014 override bogus mimetypes 2023-01-14 15:10:32 +00:00
ed
010770684d workaround another linux kernel bug 2023-01-14 08:16:15 +00:00
ed
b4c503657b ignore loss of stdout 2023-01-14 07:35:44 +00:00
ed
71bd306268 fix unpost filters with slashes 2023-01-13 17:56:32 +00:00
ed
dd7fab1352 u2cli: properly retry failed handshakes 2023-01-13 07:17:41 +00:00
ed
dacca18863 v1.5.6 2023-01-12 05:15:30 +00:00
ed
53d92cc0a6 faster upload of small files on high-latency nets 2023-01-12 02:53:22 +00:00
ed
434823f6f0 ui: allow changing num.threads in search-only 2023-01-11 16:14:02 +00:00
ed
2cb1f50370 fix dualstack on lo 2023-01-11 16:10:07 +00:00
ed
03f53f6392 gallery: fix js error on digit-keypress viewing pics 2023-01-11 16:08:15 +00:00
ed
a70ecd7af0 v1.5.5 2022-12-30 07:54:34 +00:00
ed
8b81e58205 mdns fixes 2022-12-30 07:47:53 +00:00
ed
4500c04edf v1.5.4 2022-12-29 04:44:15 +00:00
ed
6222ddd720 fix ssdp on dualstack 2022-12-22 16:50:46 +00:00
ed
8a7135cf41 support fat32 time precision, avoiding rescans
posted from warzaw airport otw to japan
2022-12-20 22:19:32 +01:00
ed
b4c7282956 password from file 2022-12-20 13:28:48 +00:00
ed
8491a40a04 Create SECURITY.md 2022-12-19 21:18:27 +00:00
ed
343d38b693 extend image-viewer with modern formats 2022-12-15 22:38:33 +00:00
ed
6cf53d7364 try next thumbnailer if one fails;
libvips assumes imagemagick was built with avif
2022-12-15 22:34:51 +00:00
ed
b070d44de7 libvips logging + raise codec errors 2022-12-15 22:22:04 +00:00
ed
79aa40fdea cosmetic fixes 2022-12-14 23:12:51 +00:00
ed
dcaff2785f v1.5.3 2022-12-13 19:56:34 +00:00
ed
497f5b4307 add hotkey to enable download mode 2022-12-13 19:50:20 +00:00
ed
be32ad0da6 add sfx tester 2022-12-13 19:05:10 +00:00
ed
8ee2bf810b stop battleplan from indirectly crashing the browser 2022-12-13 18:58:16 +00:00
ed
28232656a9 folder-sync optimizations 2022-12-13 18:56:40 +00:00
52 changed files with 2281 additions and 370 deletions

1
.vscode/launch.json vendored
View File

@@ -8,6 +8,7 @@
"module": "copyparty", "module": "copyparty",
"console": "integratedTerminal", "console": "integratedTerminal",
"cwd": "${workspaceFolder}", "cwd": "${workspaceFolder}",
"justMyCode": false,
"args": [ "args": [
//"-nw", //"-nw",
"-ed", "-ed",

View File

@@ -52,9 +52,11 @@
"--disable=missing-module-docstring", "--disable=missing-module-docstring",
"--disable=missing-class-docstring", "--disable=missing-class-docstring",
"--disable=missing-function-docstring", "--disable=missing-function-docstring",
"--disable=import-outside-toplevel",
"--disable=wrong-import-position", "--disable=wrong-import-position",
"--disable=raise-missing-from", "--disable=raise-missing-from",
"--disable=bare-except", "--disable=bare-except",
"--disable=broad-except",
"--disable=invalid-name", "--disable=invalid-name",
"--disable=line-too-long", "--disable=line-too-long",
"--disable=consider-using-f-string" "--disable=consider-using-f-string"
@@ -64,6 +66,7 @@
"editor.formatOnSave": true, "editor.formatOnSave": true,
"[html]": { "[html]": {
"editor.formatOnSave": false, "editor.formatOnSave": false,
"editor.autoIndent": "keep",
}, },
"[css]": { "[css]": {
"editor.formatOnSave": false, "editor.formatOnSave": false,

View File

@@ -1,6 +1,6 @@
# ⇆🎉 copyparty # ⇆🎉 copyparty
* http file sharing hub (py2/py3) [(on PyPI)](https://pypi.org/project/copyparty/) * portable file sharing hub (py2/py3) [(on PyPI)](https://pypi.org/project/copyparty/)
* MIT-Licensed, 2019-05-26, ed @ irc.rizon.net * MIT-Licensed, 2019-05-26, ed @ irc.rizon.net
@@ -75,7 +75,8 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else * [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else
* [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload * [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload
* [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags * [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags
* [upload events](#upload-events) - trigger a script/program on each upload * [event hooks](#event-hooks) - trigger a program on uploads, renames etc ([examples](./bin/hooks/))
* [upload events](#upload-events) - the older, more powerful approach ([examples](./bin/mtag/))
* [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed * [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed
* [themes](#themes) * [themes](#themes)
* [complete examples](#complete-examples) * [complete examples](#complete-examples)
@@ -87,6 +88,7 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [client-side](#client-side) - when uploading files * [client-side](#client-side) - when uploading files
* [security](#security) - some notes on hardening * [security](#security) - some notes on hardening
* [gotchas](#gotchas) - behavior that might be unexpected * [gotchas](#gotchas) - behavior that might be unexpected
* [cors](#cors) - cross-site request config
* [recovering from crashes](#recovering-from-crashes) * [recovering from crashes](#recovering-from-crashes)
* [client crashes](#client-crashes) * [client crashes](#client-crashes)
* [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads * [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads
@@ -163,6 +165,7 @@ recommended additional steps on debian which enable audio metadata and thumbnai
* upload * upload
* ☑ basic: plain multipart, ie6 support * ☑ basic: plain multipart, ie6 support
* ☑ [up2k](#uploading): js, resumable, multithreaded * ☑ [up2k](#uploading): js, resumable, multithreaded
* unaffected by cloudflare's max-upload-size (100 MiB)
* ☑ stash: simple PUT filedropper * ☑ stash: simple PUT filedropper
* ☑ [unpost](#unpost): undo/delete accidental uploads * ☑ [unpost](#unpost): undo/delete accidental uploads
* ☑ [self-destruct](#self-destruct) (specified server-side or client-side) * ☑ [self-destruct](#self-destruct) (specified server-side or client-side)
@@ -205,8 +208,7 @@ project goals / philosophy
* inverse linux philosophy -- do all the things, and do an *okay* job * inverse linux philosophy -- do all the things, and do an *okay* job
* quick drop-in service to get a lot of features in a pinch * quick drop-in service to get a lot of features in a pinch
* there are probably [better alternatives](https://github.com/awesome-selfhosted/awesome-selfhosted) if you have specific/long-term needs * check [the alternatives](./docs/versus.md)
* but the resumable multithreaded uploads are p slick ngl
* run anywhere, support everything * run anywhere, support everything
* as many web-browsers and python versions as possible * as many web-browsers and python versions as possible
* every browser should at least be able to browse, download, upload files * every browser should at least be able to browse, download, upload files
@@ -230,7 +232,7 @@ browser-specific:
* Android-Chrome: increase "parallel uploads" for higher speed (android bug) * Android-Chrome: increase "parallel uploads" for higher speed (android bug)
* Android-Firefox: takes a while to select files (their fix for ☝️) * Android-Firefox: takes a while to select files (their fix for ☝️)
* Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now* * Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now*
* Desktop-Firefox: may stop you from deleting files you've uploaded until you visit `about:memory` and click `Minimize memory usage` * Desktop-Firefox: [may stop you from unplugging USB flashdrives](https://bugzilla.mozilla.org/show_bug.cgi?id=1792598) until you visit `about:memory` and click `Minimize memory usage`
server-os-specific: server-os-specific:
* RHEL8 / Rocky8: you can run copyparty using `/usr/libexec/platform-python` * RHEL8 / Rocky8: you can run copyparty using `/usr/libexec/platform-python`
@@ -248,23 +250,15 @@ server-os-specific:
* Windows: if the `up2k.db` (filesystem index) is on a samba-share or network disk, you'll get unpredictable behavior if the share is disconnected for a bit * Windows: if the `up2k.db` (filesystem index) is on a samba-share or network disk, you'll get unpredictable behavior if the share is disconnected for a bit
* use `--hist` or the `hist` volflag (`-v [...]:c,hist=/tmp/foo`) to place the db on a local disk instead * use `--hist` or the `hist` volflag (`-v [...]:c,hist=/tmp/foo`) to place the db on a local disk instead
* all volumes must exist / be available on startup; up2k (mtp especially) gets funky otherwise * all volumes must exist / be available on startup; up2k (mtp especially) gets funky otherwise
* [the database can get stuck](https://github.com/9001/copyparty/issues/10)
* has only happened once but that is once too many
* luckily not dangerous for file integrity and doesn't really stop uploads or anything like that
* but would really appreciate some logs if anyone ever runs into it again
* probably more, pls let me know * probably more, pls let me know
## not my bugs ## not my bugs
* [Chrome issue 1317069](https://bugs.chromium.org/p/chromium/issues/detail?id=1317069) -- if you try to upload a folder which contains symlinks by dragging it into the browser, the symlinked files will not get uploaded * [Chrome issue 1317069](https://bugs.chromium.org/p/chromium/issues/detail?id=1317069) -- if you try to upload a folder which contains symlinks by dragging it into the browser, the symlinked files will not get uploaded
* [Chrome issue 1354816](https://bugs.chromium.org/p/chromium/issues/detail?id=1354816) -- chrome may eat all RAM uploading over plaintext http with `mt` enabled * [Chrome issue 1352210](https://bugs.chromium.org/p/chromium/issues/detail?id=1352210) -- plaintext http may be faster at filehashing than https (but also extremely CPU-intensive)
* more amusingly, [Chrome issue 1354800](https://bugs.chromium.org/p/chromium/issues/detail?id=1354800) -- chrome may eat all RAM uploading in general (altho you probably won't run into this one) * [Firefox issue 1790500](https://bugzilla.mozilla.org/show_bug.cgi?id=1790500) -- entire browser can crash after uploading ~4000 small files
* [Chrome issue 1352210](https://bugs.chromium.org/p/chromium/issues/detail?id=1352210) -- plaintext http may be faster at filehashing than https (but also extremely CPU-intensive and likely to run into the above gc bugs)
* [Firefox issue 1790500](https://bugzilla.mozilla.org/show_bug.cgi?id=1790500) -- sometimes forgets to close filedescriptors during upload so the browser can crash after ~4000 files
* iPhones: the volume control doesn't work because [apple doesn't want it to](https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/Using_HTML5_Audio_Video/Device-SpecificConsiderations/Device-SpecificConsiderations.html#//apple_ref/doc/uid/TP40009523-CH5-SW11) * iPhones: the volume control doesn't work because [apple doesn't want it to](https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/Using_HTML5_Audio_Video/Device-SpecificConsiderations/Device-SpecificConsiderations.html#//apple_ref/doc/uid/TP40009523-CH5-SW11)
* *future workaround:* enable the equalizer, make it all-zero, and set a negative boost to reduce the volume * *future workaround:* enable the equalizer, make it all-zero, and set a negative boost to reduce the volume
@@ -287,6 +281,9 @@ server-os-specific:
upgrade notes upgrade notes
* `1.6.0` (2023-01-29):
* http-api: delete/move is now `POST` instead of `GET`
* everything other than `GET` and `HEAD` must pass [cors validation](#cors)
* `1.5.0` (2022-12-03): [new chunksize formula](https://github.com/9001/copyparty/commit/54e1c8d261df) for files larger than 128 GiB * `1.5.0` (2022-12-03): [new chunksize formula](https://github.com/9001/copyparty/commit/54e1c8d261df) for files larger than 128 GiB
* **users:** upgrade to the latest [cli uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) if you use that * **users:** upgrade to the latest [cli uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) if you use that
* **devs:** update third-party up2k clients (if those even exist) * **devs:** update third-party up2k clients (if those even exist)
@@ -301,7 +298,7 @@ upgrade notes
* you can also do this with linux filesystem permissions; `chmod 111 music` will make it possible to access files and folders inside the `music` folder but not list the immediate contents -- also works with other software, not just copyparty * you can also do this with linux filesystem permissions; `chmod 111 music` will make it possible to access files and folders inside the `music` folder but not list the immediate contents -- also works with other software, not just copyparty
* can I make copyparty download a file to my server if I give it a URL? * can I make copyparty download a file to my server if I give it a URL?
* not really, but there is a [terrible hack](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/wget.py) which makes it possible * yes, using [hooks](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py)
# accounts and volumes # accounts and volumes
@@ -932,6 +929,8 @@ some examples,
## other flags ## other flags
* `:c,magic` enables filetype detection for nameless uploads, same as `--magic` * `:c,magic` enables filetype detection for nameless uploads, same as `--magic`
* needs https://pypi.org/project/python-magic/ `python3 -m pip install --user -U python-magic`
* on windows grab this instead `python3 -m pip install --user -U python-magic-bin`
## database location ## database location
@@ -1000,9 +999,18 @@ copyparty can invoke external programs to collect additional metadata for files
if something doesn't work, try `--mtag-v` for verbose error messages if something doesn't work, try `--mtag-v` for verbose error messages
## upload events ## event hooks
trigger a script/program on each upload like so: trigger a program on uploads, renames etc ([examples](./bin/hooks/))
you can set hooks before and/or after an event happens, and currently you can hook uploads, moves/renames, and deletes
there's a bunch of flags and stuff, see `--help-hooks`
### upload events
the older, more powerful approach ([examples](./bin/mtag/)):
``` ```
-v /mnt/inc:inc:w:c,mte=+x1:c,mtp=x1=ad,kn,/usr/bin/notify-send -v /mnt/inc:inc:w:c,mte=+x1:c,mtp=x1=ad,kn,/usr/bin/notify-send
@@ -1012,11 +1020,12 @@ so filesystem location `/mnt/inc` shared at `/inc`, write-only for everyone, app
that'll run the command `notify-send` with the path to the uploaded file as the first and only argument (so on linux it'll show a notification on-screen) that'll run the command `notify-send` with the path to the uploaded file as the first and only argument (so on linux it'll show a notification on-screen)
note that it will only trigger on new unique files, not dupes note that this is way more complicated than the new [event hooks](#event-hooks) but this approach has the following advantages:
* non-blocking and multithreaded; doesn't hold other uploads back
* you get access to tags from FFmpeg and other mtp parsers
* only trigger on new unique files, not dupes
and it will occupy the parsing threads, so fork anything expensive (or set `kn` to have copyparty fork it for you) -- otoh if you want to intentionally queue/singlethread you can combine it with `--mtag-mt 1` note that it will occupy the parsing threads, so fork anything expensive (or set `kn` to have copyparty fork it for you) -- otoh if you want to intentionally queue/singlethread you can combine it with `--mtag-mt 1`
if this becomes popular maybe there should be a less janky way to do it actually
## hiding from google ## hiding from google
@@ -1142,11 +1151,11 @@ interact with copyparty using non-browser clients
* curl/wget: upload some files (post=file, chunk=stdin) * curl/wget: upload some files (post=file, chunk=stdin)
* `post(){ curl -F act=bput -F f=@"$1" http://127.0.0.1:3923/?pw=wark;}` * `post(){ curl -F act=bput -F f=@"$1" http://127.0.0.1:3923/?pw=wark;}`
`post movie.mkv` `post movie.mkv`
* `post(){ curl -b cppwd=wark -H rand:8 -T "$1" http://127.0.0.1:3923/;}` * `post(){ curl -H pw:wark -H rand:8 -T "$1" http://127.0.0.1:3923/;}`
`post movie.mkv` `post movie.mkv`
* `post(){ wget --header='Cookie: cppwd=wark' --post-file="$1" -O- http://127.0.0.1:3923/?raw;}` * `post(){ wget --header='pw: wark' --post-file="$1" -O- http://127.0.0.1:3923/?raw;}`
`post movie.mkv` `post movie.mkv`
* `chunk(){ curl -b cppwd=wark -T- http://127.0.0.1:3923/;}` * `chunk(){ curl -H pw:wark -T- http://127.0.0.1:3923/;}`
`chunk <movie.mkv` `chunk <movie.mkv`
* bash: when curl and wget is not available or too boring * bash: when curl and wget is not available or too boring
@@ -1170,7 +1179,7 @@ copyparty returns a truncated sha512sum of your PUT/POST as base64; you can gene
b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|tr '+/' '-_'|head -c44;} b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|tr '+/' '-_'|head -c44;}
b512 <movie.mkv b512 <movie.mkv
you can provide passwords using cookie `cppwd=hunter2`, as a url-param `?pw=hunter2`, or with basic-authentication (either as the username or password) you can provide passwords using header `PW: hunter2`, cookie `cppwd=hunter2`, url-param `?pw=hunter2`, or with basic-authentication (either as the username or password)
NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename
@@ -1206,7 +1215,7 @@ below are some tweaks roughly ordered by usefulness:
* `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger) * `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger)
* `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example: * `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example:
* huge amount of short-lived connections * huge amount of short-lived connections
* really heavy traffic (downloads/uploads) * simultaneous downloads and uploads saturating a 20gbps connection
...however it adds an overhead to internal communication so it might be a net loss, see if it works 4 u ...however it adds an overhead to internal communication so it might be a net loss, see if it works 4 u
@@ -1231,6 +1240,11 @@ when uploading files,
some notes on hardening some notes on hardening
* set `--rproxy 0` if your copyparty is directly facing the internet (not through a reverse-proxy)
* cors doesn't work right otherwise
safety profiles:
* option `-s` is a shortcut to set the following options: * option `-s` is a shortcut to set the following options:
* `--no-thumb` disables thumbnails and audio transcoding to stop copyparty from running `FFmpeg`/`Pillow`/`VIPS` on uploaded files, which is a [good idea](https://www.cvedetails.com/vulnerability-list.php?vendor_id=3611) if anonymous upload is enabled * `--no-thumb` disables thumbnails and audio transcoding to stop copyparty from running `FFmpeg`/`Pillow`/`VIPS` on uploaded files, which is a [good idea](https://www.cvedetails.com/vulnerability-list.php?vendor_id=3611) if anonymous upload is enabled
* `--no-mtag-ff` uses `mutagen` to grab music tags instead of `FFmpeg`, which is safer and faster but less accurate * `--no-mtag-ff` uses `mutagen` to grab music tags instead of `FFmpeg`, which is safer and faster but less accurate
@@ -1238,7 +1252,6 @@ some notes on hardening
* `--no-robots` and `--force-js` makes life harder for crawlers, see [hiding from google](#hiding-from-google) * `--no-robots` and `--force-js` makes life harder for crawlers, see [hiding from google](#hiding-from-google)
* option `-ss` is a shortcut for the above plus: * option `-ss` is a shortcut for the above plus:
* `--no-logues` and `--no-readme` disables support for readme's and prologues / epilogues in directory listings, which otherwise lets people upload arbitrary `<script>` tags
* `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support * `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support
* `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance * `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance
* however note if you edit one file it will also affect the other copies * however note if you edit one file it will also affect the other copies
@@ -1249,6 +1262,7 @@ some notes on hardening
* option `-sss` is a shortcut for the above plus: * option `-sss` is a shortcut for the above plus:
* `--no-dav` disables webdav support * `--no-dav` disables webdav support
* `--no-logues` and `--no-readme` disables support for readme's and prologues / epilogues in directory listings, which otherwise lets people upload arbitrary (but sandboxed) `<script>` tags
* `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk * `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk
* `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks * `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks
@@ -1264,6 +1278,22 @@ other misc notes:
behavior that might be unexpected behavior that might be unexpected
* users without read-access to a folder can still see the `.prologue.html` / `.epilogue.html` / `README.md` contents, for the purpose of showing a description on how to use the uploader for example * users without read-access to a folder can still see the `.prologue.html` / `.epilogue.html` / `README.md` contents, for the purpose of showing a description on how to use the uploader for example
* users can submit `<script>`s which autorun for other visitors in a few ways;
* uploading a `README.md` -- avoid with `--no-readme`
* renaming `some.html` to `.epilogue.html` -- avoid with either `--no-logues` or `--no-dot-ren`
* the directory-listing embed is sandboxed (so any malicious scripts can't do any damage) but the markdown editor is not
## cors
cross-site request config
by default, except for `GET` and `HEAD` operations, all requests must either:
* not contain an `Origin` header at all
* or have an `Origin` matching the server domain
* or the header `PW` with your password as value
cors can be configured with `--acao` and `--acam`, or the protections entirely disabled with `--allow-csrf`
# recovering from crashes # recovering from crashes

9
SECURITY.md Normal file
View File

@@ -0,0 +1,9 @@
# Security Policy
if you hit something extra juicy pls let me know on either of the following
* email -- `copyparty@ocv.ze` except `ze` should be `me`
* [mastodon dm](https://layer8.space/@tripflag) -- `@tripflag@layer8.space`
* [github private vulnerability report](https://github.com/9001/copyparty/security/advisories/new), wow that form is complicated
* [twitter dm](https://twitter.com/tripflag) (if im somehow not banned yet)
no bug bounties sorry! all i can offer is greetz in the release notes

19
bin/hooks/README.md Normal file
View File

@@ -0,0 +1,19 @@
standalone programs which are executed by copyparty when an event happens (upload, file rename, delete, ...)
these programs either take zero arguments, or a filepath (the affected file), or a json message with filepath + additional info
> **note:** in addition to event hooks (the stuff described here), copyparty has another api to run your programs/scripts while providing way more information such as audio tags / video codecs / etc and optionally daisychaining data between scripts in a processing pipeline; if that's what you want then see [mtp plugins](../mtag/) instead
# after upload
* [notify.py](notify.py) shows a desktop notification
* [discord-announce.py](discord-announce.py) announces new uploads on discord using webhooks
* [reject-mimetype.py](reject-mimetype.py) rejects uploads unless the mimetype is acceptable
# before upload
* [reject-extension.py](reject-extension.py) rejects uploads if they match a list of file extensions
# on message
* [wget.py](wget.py) lets you download files by POSTing URLs to copyparty

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
import sys
import json
import requests
from copyparty.util import humansize, quotep
_ = r"""
announces a new upload on discord
example usage as global config:
--xau f,t5,j,bin/hooks/discord-announce.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xau=f,t5,j,bin/hooks/discord-announce.py
parameters explained,
f = fork; don't wait for it to finish
t5 = timeout if it's still running after 5 sec
j = provide upload information as json; not just the filename
replace "xau" with "xbu" to announce Before upload starts instead of After completion
# how to discord:
first create the webhook url; https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
then use this to design your message: https://discohook.org/
"""
def main():
WEBHOOK = "https://discord.com/api/webhooks/1234/base64"
# read info from copyparty
inf = json.loads(sys.argv[1])
vpath = inf["vp"]
filename = vpath.split("/")[-1]
url = f"https://{inf['host']}/{quotep(vpath)}"
# compose the message to discord
j = {
"title": filename,
"url": url,
"description": url.rsplit("/", 1)[0],
"color": 0x449900,
"fields": [
{"name": "Size", "value": humansize(inf["sz"])},
{"name": "User", "value": inf["user"]},
{"name": "IP", "value": inf["ip"]},
],
}
for v in j["fields"]:
v["inline"] = True
r = requests.post(WEBHOOK, json={"embeds": [j]})
print(f"discord: {r}\n", end="")
if __name__ == "__main__":
main()

30
bin/hooks/notify.py Normal file
View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
import sys
from plyer import notification
_ = r"""
show os notification on upload; works on windows, linux, macos
depdencies:
python3 -m pip install --user -U plyer
example usage as global config:
--xau f,bin/hooks/notify.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xau=f,bin/hooks/notify.py
parameters explained,
xau = execute after upload
f = fork so it doesn't block uploads
"""
def main():
notification.notify(title="new file uploaded", message=sys.argv[1], timeout=10)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
import sys
_ = r"""
reject file uploads by file extension
example usage as global config:
--xbu c,bin/hooks/reject-extension.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xbu=c,bin/hooks/reject-extension.py
parameters explained,
xbu = execute before upload
c = check result, reject upload if error
"""
def main():
bad = "exe scr com pif bat ps1 jar msi"
ext = sys.argv[1].split(".")[-1]
sys.exit(1 if ext in bad.split() else 0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,39 @@
#!/usr/bin/env python3
import sys
import magic
_ = r"""
reject file uploads by mimetype
dependencies (linux, macos):
python3 -m pip install --user -U python-magic
dependencies (windows):
python3 -m pip install --user -U python-magic-bin
example usage as global config:
--xau c,bin/hooks/reject-mimetype.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xau=c,bin/hooks/reject-mimetype.py
parameters explained,
xau = execute after upload
c = check result, reject upload if error
"""
def main():
ok = ["image/jpeg", "image/png"]
mt = magic.from_file(sys.argv[1], mime=True)
print(mt)
sys.exit(1 if mt not in ok else 0)
if __name__ == "__main__":
main()

54
bin/hooks/wget.py Normal file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
import os
import sys
import json
import subprocess as sp
_ = r"""
use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the
message/pager function on the website)
example usage as global config:
--xm f,j,t3600,bin/hooks/wget.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xm=f,j,t3600,bin/hooks/wget.py
parameters explained,
f = fork so it doesn't block uploads
j = provide message information as json; not just the text
c3 = mute all output
t3600 = timeout and kill download after 1 hour
"""
def main():
inf = json.loads(sys.argv[1])
url = inf["txt"]
if "://" not in url:
url = "https://" + url
os.chdir(inf["ap"])
name = url.split("?")[0].split("/")[-1]
tfn = "-- DOWNLOADING " + name
print(f"{tfn}\n", end="")
open(tfn, "wb").close()
cmd = ["wget", "--trust-server-names", "-nv", "--", url]
try:
sp.check_call(cmd)
except:
t = "-- FAILED TO DONWLOAD " + name
print(f"{t}\n", end="")
open(t, "wb").close()
os.unlink(tfn)
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,9 @@
standalone programs which take an audio file as argument standalone programs which take an audio file as argument
you may want to forget about all this fancy complicated stuff and just use [event hooks](../hooks/) instead (which doesn't need `-e2ts` or ffmpeg)
----
**NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen` **NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen`
some of these rely on libraries which are not MIT-compatible some of these rely on libraries which are not MIT-compatible
@@ -17,6 +21,7 @@ these do not have any problematic dependencies at all:
* [cksum.py](./cksum.py) computes various checksums * [cksum.py](./cksum.py) computes various checksums
* [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser) * [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser)
* [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty * [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty
* also available as an [event hook](../hooks/wget.py)
# dependencies # dependencies

View File

@@ -1,6 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
DEPRECATED -- replaced by event hooks;
https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py
---
use copyparty as a file downloader by POSTing URLs as use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the application/x-www-form-urlencoded (for example using the
message/pager function on the website) message/pager function on the website)

View File

@@ -997,7 +997,7 @@ def main():
ap.add_argument( ap.add_argument(
"-cf", metavar="NUM_BLOCKS", type=int, default=nf, help="file cache" "-cf", metavar="NUM_BLOCKS", type=int, default=nf, help="file cache"
) )
ap.add_argument("-a", metavar="PASSWORD", help="password") ap.add_argument("-a", metavar="PASSWORD", help="password or $filepath")
ap.add_argument("-d", action="store_true", help="enable debug") ap.add_argument("-d", action="store_true", help="enable debug")
ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify") ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify")
ap.add_argument("-td", action="store_true", help="disable certificate check") ap.add_argument("-td", action="store_true", help="disable certificate check")

View File

@@ -3,7 +3,7 @@ from __future__ import print_function, unicode_literals
""" """
up2k.py: upload to copyparty up2k.py: upload to copyparty
2022-12-12, v1.0, ed <irc.rizon.net>, MIT-Licensed 2023-01-13, v1.2, ed <irc.rizon.net>, MIT-Licensed
https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py
- dependencies: requests - dependencies: requests
@@ -506,25 +506,31 @@ def handshake(ar, file, search):
url += quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8", "replace") url += quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8", "replace")
while True: while True:
sc = 600
txt = ""
try: try:
r = req_ses.post(url, headers=headers, json=req) r = req_ses.post(url, headers=headers, json=req)
break sc = r.status_code
txt = r.text
if sc < 400:
break
raise Exception("http {0}: {1}".format(sc, txt))
except Exception as ex: except Exception as ex:
em = str(ex).split("SSLError(")[-1] em = str(ex).split("SSLError(")[-1].split("\nURL: ")[0].strip()
if sc == 422 or "<pre>partial upload exists at a different" in txt:
file.recheck = True
return [], False
elif sc == 409 or "<pre>upload rejected, file already exists" in txt:
return [], False
elif "<pre>you don't have " in txt:
raise
eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em)) eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em))
time.sleep(1) time.sleep(1)
sc = r.status_code
if sc >= 400:
txt = r.text
if sc == 422 or "<pre>partial upload exists at a different" in txt:
file.recheck = True
return [], False
elif sc == 409 or "<pre>upload rejected, file already exists" in txt:
return [], False
raise Exception("http {0}: {1}".format(sc, txt))
try: try:
r = r.json() r = r.json()
except: except:
@@ -575,9 +581,9 @@ class Ctl(object):
(hashing, handshakes, uploads) (hashing, handshakes, uploads)
""" """
def __init__(self, ar): def _scan(self):
ar = self.ar
eprint("\nscanning {0} locations\n".format(len(ar.files))) eprint("\nscanning {0} locations\n".format(len(ar.files)))
self.ar = ar
nfiles = 0 nfiles = 0
nbytes = 0 nbytes = 0
err = [] err = []
@@ -606,8 +612,15 @@ class Ctl(object):
return return
eprint("found {0} files, {1}\n\n".format(nfiles, humansize(nbytes))) eprint("found {0} files, {1}\n\n".format(nfiles, humansize(nbytes)))
self.nfiles = nfiles return nfiles, nbytes
self.nbytes = nbytes
def __init__(self, ar, stats=None):
self.ar = ar
self.stats = stats or self._scan()
if not self.stats:
return
self.nfiles, self.nbytes = self.stats
if ar.td: if ar.td:
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
@@ -694,7 +707,7 @@ class Ctl(object):
handshake(self.ar, file, search) handshake(self.ar, file, search)
def _fancy(self): def _fancy(self):
if VT100: if VT100 and not self.ar.ns:
atexit.register(self.cleanup_vt100) atexit.register(self.cleanup_vt100)
ss.scroll_region(3) ss.scroll_region(3)
@@ -718,7 +731,7 @@ class Ctl(object):
else: else:
idles = 0 idles = 0
if VT100: if VT100 and not self.ar.ns:
maxlen = ss.w - len(str(self.nfiles)) - 14 maxlen = ss.w - len(str(self.nfiles)) - 14
txt = "\033[s\033[{0}H".format(ss.g) txt = "\033[s\033[{0}H".format(ss.g)
for y, k, st, f in [ for y, k, st, f in [
@@ -758,7 +771,7 @@ class Ctl(object):
eta = str(datetime.timedelta(seconds=int(eta))) eta = str(datetime.timedelta(seconds=int(eta)))
sleft = humansize(self.nbytes - self.up_b) sleft = humansize(self.nbytes - self.up_b)
nleft = self.nfiles - self.up_f nleft = self.nfiles - self.up_f
tail = "\033[K\033[u" if VT100 else "\r" tail = "\033[K\033[u" if VT100 and not self.ar.ns else "\r"
t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft) t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft)
eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail)) eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail))
@@ -797,6 +810,9 @@ class Ctl(object):
zb = self.ar.url.encode("utf-8") zb = self.ar.url.encode("utf-8")
zb += quotep(rd.replace(b"\\", b"/")) zb += quotep(rd.replace(b"\\", b"/"))
r = req_ses.get(zb + b"?ls&dots", headers=headers) r = req_ses.get(zb + b"?ls&dots", headers=headers)
if not r:
raise Exception("HTTP {}".format(r.status_code))
j = r.json() j = r.json()
for f in j["dirs"] + j["files"]: for f in j["dirs"] + j["files"]:
rfn = f["href"].split("?")[0].rstrip("/") rfn = f["href"].split("?")[0].rstrip("/")
@@ -984,10 +1000,10 @@ source file/folder selection uses rsync syntax, meaning that:
ap.add_argument("url", type=unicode, help="server url, including destination folder") ap.add_argument("url", type=unicode, help="server url, including destination folder")
ap.add_argument("files", type=unicode, nargs="+", help="files and/or folders to process") ap.add_argument("files", type=unicode, nargs="+", help="files and/or folders to process")
ap.add_argument("-v", action="store_true", help="verbose") ap.add_argument("-v", action="store_true", help="verbose")
ap.add_argument("-a", metavar="PASSWORD", help="password") ap.add_argument("-a", metavar="PASSWORD", help="password or $filepath")
ap.add_argument("-s", action="store_true", help="file-search (disables upload)") ap.add_argument("-s", action="store_true", help="file-search (disables upload)")
ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible") ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible")
ap = app.add_argument_group("compatibility") ap = app.add_argument_group("compatibility")
ap.add_argument("--cls", action="store_true", help="clear screen before start") ap.add_argument("--cls", action="store_true", help="clear screen before start")
ap.add_argument("--ws", action="store_true", help="copyparty is running on windows; wait before deleting files after uploading") ap.add_argument("--ws", action="store_true", help="copyparty is running on windows; wait before deleting files after uploading")
@@ -1001,6 +1017,7 @@ source file/folder selection uses rsync syntax, meaning that:
ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections") ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections")
ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing") ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading") ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
ap.add_argument("-ns", action="store_true", help="no status panel (for slow consoles)")
ap.add_argument("--safe", action="store_true", help="use simple fallback approach") ap.add_argument("--safe", action="store_true", help="use simple fallback approach")
ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)") ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)")
@@ -1031,20 +1048,26 @@ source file/folder selection uses rsync syntax, meaning that:
if "://" not in ar.url: if "://" not in ar.url:
ar.url = "http://" + ar.url ar.url = "http://" + ar.url
if ar.a and ar.a.startswith("$"):
fn = ar.a[1:]
print("reading password from file [{}]".format(fn))
with open(fn, "rb") as f:
ar.a = f.read().decode("utf-8").strip()
if ar.cls: if ar.cls:
print("\x1b\x5b\x48\x1b\x5b\x32\x4a\x1b\x5b\x33\x4a", end="") print("\x1b\x5b\x48\x1b\x5b\x32\x4a\x1b\x5b\x33\x4a", end="")
ctl = Ctl(ar) ctl = Ctl(ar)
if ar.dr and not ar.drd: if ar.dr and not ar.drd:
# run another pass for the deletes print("\npass 2/2: delete")
if getattr(ctl, "up_br") and ar.ws: if getattr(ctl, "up_br") and ar.ws:
# wait for up2k to mtime if there was uploads # wait for up2k to mtime if there was uploads
time.sleep(4) time.sleep(4)
ar.drd = True ar.drd = True
ar.z = True ar.z = True
Ctl(ar) Ctl(ar, ctl.stats)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -29,11 +29,11 @@ however if your copyparty is behind a reverse-proxy, you may want to use [`share
* disables thumbnails and folder-type detection in windows explorer * disables thumbnails and folder-type detection in windows explorer
* makes it way faster (especially for slow/networked locations (such as partyfuse)) * makes it way faster (especially for slow/networked locations (such as partyfuse))
### [`webdav-basicauth.reg`](webdav-basicauth.reg) ### [`webdav-cfg.reg`](webdav-cfg.bat)
* enables webdav basic-auth over plaintext http; takes effect after a reboot OR after running `webdav-unlimit.bat` * improves the native webdav support in windows;
* removes the 47.6 MiB filesize limit when downloading from webdav
### [`webdav-unlimit.bat`](webdav-unlimit.bat) * optionally enables webdav basic-auth over plaintext http
* removes the 47.6 MiB filesize limit when downloading from webdav * optionally helps disable wpad, removing the 10sec latency
### [`cfssl.sh`](cfssl.sh) ### [`cfssl.sh`](cfssl.sh)
* creates CA and server certificates using cfssl * creates CA and server certificates using cfssl

View File

@@ -14,5 +14,5 @@ name="$SVCNAME"
command_background=true command_background=true
pidfile="/var/run/$SVCNAME.pid" pidfile="/var/run/$SVCNAME.pid"
command="/usr/bin/python /usr/local/bin/copyparty-sfx.py" command="/usr/bin/python3 /usr/local/bin/copyparty-sfx.py"
command_args="-q -v /mnt::rw" command_args="-q -v /mnt::rw"

View File

@@ -1,6 +1,10 @@
<!-- <!--
save this as .epilogue.html inside a write-only folder to declutter the UI, makes it look like save this as .epilogue.html inside a write-only folder to declutter the UI, makes it look like
https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png
only works if you disable the prologue/epilogue sandbox with --no-sb-lg
which should probably be combined with --no-dot-ren to prevent damage
(`no_sb_lg` can also be set per-volume with volflags)
--> -->
<style> <style>

View File

@@ -1,7 +1,7 @@
@echo off @echo off
rem removes the 47.6 MiB filesize limit when downloading from webdav rem removes the 47.6 MiB filesize limit when downloading from webdav
rem + optionally allows/enables password-auth over plaintext http rem + optionally allows/enables password-auth over plaintext http
rem + optionally helps disable wpad rem + optionally helps disable wpad, removing the 10sec latency
setlocal enabledelayedexpansion setlocal enabledelayedexpansion

View File

@@ -229,9 +229,10 @@ def get_srvname() -> str:
ret = f.read().decode("utf-8", "replace").strip() ret = f.read().decode("utf-8", "replace").strip()
except: except:
ret = "" ret = ""
while len(ret) < 7: namelen = 5
while len(ret) < namelen:
ret += base64.b32encode(os.urandom(4))[:7].decode("utf-8").lower() ret += base64.b32encode(os.urandom(4))[:7].decode("utf-8").lower()
ret = re.sub("[234567=]", "", ret)[:7] ret = re.sub("[234567=]", "", ret)[:namelen]
with open(fp, "wb") as f: with open(fp, "wb") as f:
f.write(ret.encode("utf-8") + b"\n") f.write(ret.encode("utf-8") + b"\n")
@@ -253,7 +254,7 @@ def ensure_locale() -> None:
except: except:
continue continue
t = "setlocale {} failed,\n sorting and dates will be funky" t = "setlocale {} failed,\n sorting and dates might get funky\n"
warn(t.format(safe)) warn(t.format(safe))
@@ -498,6 +499,9 @@ def get_sects():
\033[0muploads, general: \033[0muploads, general:
\033[36mnodupe\033[35m rejects existing files (instead of symlinking them) \033[36mnodupe\033[35m rejects existing files (instead of symlinking them)
\033[36mhardlink\033[35m does dedup with hardlinks instead of symlinks
\033[36mneversymlink\033[35m disables symlink fallback; full copy instead
\033[36mcopydupes\033[35m disables dedup, always saves full copies of dupes
\033[36mnosub\033[35m forces all uploads into the top folder of the vfs \033[36mnosub\033[35m forces all uploads into the top folder of the vfs
\033[36mmagic$\033[35m enables filetype detection for nameless uploads \033[36mmagic$\033[35m enables filetype detection for nameless uploads
\033[36mgz\033[35m allows server-side gzip of uploads with ?gz (also c,xz) \033[36mgz\033[35m allows server-side gzip of uploads with ?gz (also c,xz)
@@ -555,6 +559,51 @@ def get_sects():
\033[0m""" \033[0m"""
), ),
], ],
[
"hooks",
"execute commands before/after various events",
dedent(
"""
execute a command (a program or script) before or after various events;
\033[36mxbu\033[35m executes CMD before a file upload starts
\033[36mxau\033[35m executes CMD after a file upload finishes
\033[36mxbr\033[35m executes CMD before a file rename/move
\033[36mxar\033[35m executes CMD after a file rename/move
\033[36mxbd\033[35m executes CMD before a file delete
\033[36mxad\033[35m executes CMD after a file delete
\033[36mxm\033[35m executes CMD on message
\033[0m
can be defined as --args or volflags; for example \033[36m
--xau notify-send
-v .::r:c,xau=notify-send
\033[0m
commands specified as --args are appended to volflags;
each --arg and volflag can be specified multiple times,
each command will execute in order unless one returns non-zero
optionally prefix the command with comma-sep. flags similar to -mtp:
\033[36mf\033[35m forks the process, doesn't wait for completion
\033[36mc\033[35m checks return code, blocks the action if non-zero
\033[36mj\033[35m provides json with info as 1st arg instead of filepath
\033[36mwN\033[35m waits N sec after command has been started before continuing
\033[36mtN\033[35m sets an N sec timeout before the command is abandoned
\033[36mkt\033[35m kills the entire process tree on timeout (default),
\033[36mkm\033[35m kills just the main process
\033[36mkn\033[35m lets it continue running until copyparty is terminated
\033[36mc0\033[35m show all process output (default)
\033[36mc1\033[35m show only stderr
\033[36mc2\033[35m show only stdout
\033[36mc3\033[35m mute all process otput
\033[0m
except for \033[36mxm\033[0m, only one hook / one action can run at a time,
so it's recommended to use the \033[36mf\033[0m flag unless you really need
to wait for the hook to finish before continuing (without \033[36mf\033[0m
the upload speed can easily drop to 10% for small files)"""
),
],
[ [
"urlform", "urlform",
"how to handle url-form POSTs", "how to handle url-form POSTs",
@@ -650,9 +699,9 @@ def add_upload(ap):
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600") ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600")
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (very slow on windows)") ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (very slow on windows)")
ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even when it might be dangerous (multiprocessing, filesystems lacking sparse-files support, ...)") ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even when it might be dangerous (multiprocessing, filesystems lacking sparse-files support, ...)")
ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)") ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem) (volflag=hardlink)")
ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made") ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made (volflag=neversymlink)")
ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead") ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead (volflag=copydupes")
ap2.add_argument("--no-dupe", action="store_true", help="reject duplicate files during upload; only matches within the same volume (volflag=nodupe)") ap2.add_argument("--no-dupe", action="store_true", help="reject duplicate files during upload; only matches within the same volume (volflag=nodupe)")
ap2.add_argument("--no-snap", action="store_true", help="disable snapshots -- forget unfinished uploads on shutdown; don't create .hist/up2k.snap files -- abandoned/interrupted uploads must be cleaned up manually") ap2.add_argument("--no-snap", action="store_true", help="disable snapshots -- forget unfinished uploads on shutdown; don't create .hist/up2k.snap files -- abandoned/interrupted uploads must be cleaned up manually")
ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads (volflag=magic)") ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads (volflag=magic)")
@@ -672,6 +721,8 @@ def add_network(ap):
ap2.add_argument("--rp-loc", metavar="PATH", type=u, default="", help="if reverse-proxying on a location instead of a dedicated domain/subdomain, provide the base location here (eg. /foo/bar)") ap2.add_argument("--rp-loc", metavar="PATH", type=u, default="", help="if reverse-proxying on a location instead of a dedicated domain/subdomain, provide the base location here (eg. /foo/bar)")
if ANYWIN: if ANYWIN:
ap2.add_argument("--reuseaddr", action="store_true", help="set reuseaddr on listening sockets on windows; allows rapid restart of copyparty at the expense of being able to accidentally start multiple instances") ap2.add_argument("--reuseaddr", action="store_true", help="set reuseaddr on listening sockets on windows; allows rapid restart of copyparty at the expense of being able to accidentally start multiple instances")
else:
ap2.add_argument("--freebind", action="store_true", help="allow listening on IPs which do not yet exist, for example if the network interfaces haven't finished going up. Only makes sense for IPs other than '0.0.0.0', '127.0.0.1', '::', and '::1'. May require running as root (unless net.ipv6.ip_nonlocal_bind)")
ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes") ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes")
ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds") ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds")
ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds") ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds")
@@ -692,12 +743,13 @@ def add_zeroconf(ap):
ap2.add_argument("-z", action="store_true", help="enable all zeroconf backends (mdns, ssdp)") ap2.add_argument("-z", action="store_true", help="enable all zeroconf backends (mdns, ssdp)")
ap2.add_argument("--z-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes\n └─example: \033[32meth0, wlo1, virhost0, 192.168.123.0/24, fd00:fda::/96\033[0m") ap2.add_argument("--z-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes\n └─example: \033[32meth0, wlo1, virhost0, 192.168.123.0/24, fd00:fda::/96\033[0m")
ap2.add_argument("--z-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--z-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--z-chk", metavar="SEC", type=int, default=10, help="check for network changes every SEC seconds (0=disable)")
ap2.add_argument("-zv", action="store_true", help="verbose all zeroconf backends") ap2.add_argument("-zv", action="store_true", help="verbose all zeroconf backends")
ap2.add_argument("--mc-hop", metavar="SEC", type=int, default=0, help="rejoin multicast groups every SEC seconds (workaround for some switches/routers which cause mDNS to suddenly stop working after some time); try [\033[32m300\033[0m] or [\033[32m180\033[0m]") ap2.add_argument("--mc-hop", metavar="SEC", type=int, default=0, help="rejoin multicast groups every SEC seconds (workaround for some switches/routers which cause mDNS to suddenly stop working after some time); try [\033[32m300\033[0m] or [\033[32m180\033[0m]")
def add_zc_mdns(ap): def add_zc_mdns(ap):
ap2 = ap.add_argument_group("Zeroconf-mDNS options:") ap2 = ap.add_argument_group("Zeroconf-mDNS options")
ap2.add_argument("--zm", action="store_true", help="announce the enabled protocols over mDNS (multicast DNS-SD) -- compatible with KDE, gnome, macOS, ...") ap2.add_argument("--zm", action="store_true", help="announce the enabled protocols over mDNS (multicast DNS-SD) -- compatible with KDE, gnome, macOS, ...")
ap2.add_argument("--zm-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zm-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--zm-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zm-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
@@ -716,7 +768,7 @@ def add_zc_mdns(ap):
def add_zc_ssdp(ap): def add_zc_ssdp(ap):
ap2 = ap.add_argument_group("Zeroconf-SSDP options:") ap2 = ap.add_argument_group("Zeroconf-SSDP options")
ap2.add_argument("--zs", action="store_true", help="announce the enabled protocols over SSDP -- compatible with Windows") ap2.add_argument("--zs", action="store_true", help="announce the enabled protocols over SSDP -- compatible with Windows")
ap2.add_argument("--zs-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zs-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--zs-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zs-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
@@ -755,6 +807,23 @@ def add_smb(ap):
ap2.add_argument("--smbvvv", action="store_true", help="verbosest") ap2.add_argument("--smbvvv", action="store_true", help="verbosest")
def add_hooks(ap):
ap2 = ap.add_argument_group('hooks (see --help-hooks)')
ap2.add_argument("--xbu", metavar="CMD", type=u, action="append", help="execute CMD before a file upload starts")
ap2.add_argument("--xau", metavar="CMD", type=u, action="append", help="execute CMD after a file upload finishes")
ap2.add_argument("--xbr", metavar="CMD", type=u, action="append", help="execute CMD before a file move/rename")
ap2.add_argument("--xar", metavar="CMD", type=u, action="append", help="execute CMD after a file move/rename")
ap2.add_argument("--xbd", metavar="CMD", type=u, action="append", help="execute CMD before a file delete")
ap2.add_argument("--xad", metavar="CMD", type=u, action="append", help="execute CMD after a file delete")
ap2.add_argument("--xm", metavar="CMD", type=u, action="append", help="execute CMD on message")
def add_yolo(ap):
ap2 = ap.add_argument_group('yolo options')
ap2.add_argument("--allow-csrf", action="store_true", help="disable csrf protections; let other domains/sites impersonate you through cross-site requests")
ap2.add_argument("--getmod", action="store_true", help="permit ?move=[...] and ?delete as GET")
def add_optouts(ap): def add_optouts(ap):
ap2 = ap.add_argument_group('opt-outs') ap2 = ap.add_argument_group('opt-outs')
ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)") ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)")
@@ -764,6 +833,7 @@ def add_optouts(ap):
ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations") ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations")
ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI") ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI")
ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI") ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI")
ap2.add_argument("-nb", action="store_true", help="no powered-by-copyparty branding in UI")
ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar") ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar")
ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (as specified by the 'lifetime' volflag)") ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (as specified by the 'lifetime' volflag)")
@@ -771,8 +841,8 @@ def add_optouts(ap):
def add_safety(ap, fk_salt): def add_safety(ap, fk_salt):
ap2 = ap.add_argument_group('safety options') ap2 = ap.add_argument_group('safety options')
ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js") ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js")
ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, webdav, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --no-dot-mv --no-dot-ren --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih") ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, webdav, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih")
ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss --no-dav -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r") ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss --no-dav --no-logues --no-readme -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r")
ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments \033[33mUSER\033[0m,\033[33mVOL\033[0m,\033[33mFLAGS\033[0m; example [\033[32m**,*,ln,p,r\033[0m]") ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments \033[33mUSER\033[0m,\033[33mVOL\033[0m,\033[33mFLAGS\033[0m; example [\033[32m**,*,ln,p,r\033[0m]")
ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter") ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter")
ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter") ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter")
@@ -788,6 +858,8 @@ def add_safety(ap, fk_salt):
ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than \033[33mN\033[0m 404's in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes (disabled by default since turbo-up2k counts as 404s)") ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than \033[33mN\033[0m 404's in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes (disabled by default since turbo-up2k counts as 404s)")
ap2.add_argument("--aclose", metavar="MIN", type=int, default=10, help="if a client maxes out the server connection limit, downgrade it from connection:keep-alive to connection:close for MIN minutes (and also kill its active connections) -- disable with 0") ap2.add_argument("--aclose", metavar="MIN", type=int, default=10, help="if a client maxes out the server connection limit, downgrade it from connection:keep-alive to connection:close for MIN minutes (and also kill its active connections) -- disable with 0")
ap2.add_argument("--loris", metavar="B", type=int, default=60, help="if a client maxes out the server connection limit without sending headers, ban it for B minutes; disable with [\033[32m0\033[0m]") ap2.add_argument("--loris", metavar="B", type=int, default=60, help="if a client maxes out the server connection limit without sending headers, ban it for B minutes; disable with [\033[32m0\033[0m]")
ap2.add_argument("--acao", metavar="V[,V]", type=u, default="*", help="Access-Control-Allow-Origin; list of origins (domains/IPs without port) to accept requests from; [\033[32mhttps://1.2.3.4\033[0m]. Default [\033[32m*\033[0m] allows requests from all sites but removes cookies and http-auth; only ?pw=hunter2 survives")
ap2.add_argument("--acam", metavar="V[,V]", type=u, default="GET,HEAD", help="Access-Control-Allow-Methods; list of methods to accept from offsite ('*' behaves like described in --acao)")
def add_shutdown(ap): def add_shutdown(ap):
@@ -906,6 +978,11 @@ def add_ui(ap, retry):
ap2.add_argument("--textfiles", metavar="CSV", type=u, default="txt,nfo,diz,cue,readme", help="file extensions to present as plaintext") ap2.add_argument("--textfiles", metavar="CSV", type=u, default="txt,nfo,diz,cue,readme", help="file extensions to present as plaintext")
ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)") ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)")
ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents") ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents")
ap2.add_argument("--pb-url", metavar="URL", type=u, default="https://github.com/9001/copyparty", help="powered-by link; disable with -np")
ap2.add_argument("--md-sbf", metavar="FLAGS", type=u, default="downloads forms modals popups scripts top-navigation-by-user-activation", help="list of capabilities to ALLOW for README.md docs (volflag=md_sbf); see https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe#attr-sandbox")
ap2.add_argument("--lg-sbf", metavar="FLAGS", type=u, default="downloads forms modals popups scripts top-navigation-by-user-activation", help="list of capabilities to ALLOW for prologue/epilogue docs (volflag=lg_sbf)")
ap2.add_argument("--no-sb-md", action="store_true", help="don't sandbox README.md documents (volflags: no_sb_md | sb_md)")
ap2.add_argument("--no-sb-lg", action="store_true", help="don't sandbox prologue/epilogue docs (volflags: no_sb_lg | sb_lg); enables non-js support")
def add_debug(ap): def add_debug(ap):
@@ -964,6 +1041,8 @@ def run_argparse(
add_safety(ap, fk_salt) add_safety(ap, fk_salt)
add_optouts(ap) add_optouts(ap)
add_shutdown(ap) add_shutdown(ap)
add_yolo(ap)
add_hooks(ap)
add_ui(ap, retry) add_ui(ap, retry)
add_admin(ap) add_admin(ap)
add_logging(ap) add_logging(ap)

View File

@@ -1,8 +1,8 @@
# coding: utf-8 # coding: utf-8
VERSION = (1, 5, 2) VERSION = (1, 6, 0)
CODENAME = "babel" CODENAME = "cors k"
BUILD_DT = (2022, 12, 12) BUILD_DT = (2023, 1, 29)
S_VERSION = ".".join(map(str, VERSION)) S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT) S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

View File

@@ -587,7 +587,7 @@ class VFS(object):
# if multiselect: add all items to archive root # if multiselect: add all items to archive root
# if single folder: the folder itself is the top-level item # if single folder: the folder itself is the top-level item
folder = "" if flt or not wrap else (vrem.split("/")[-1] or "top") folder = "" if flt or not wrap else (vrem.split("/")[-1].lstrip(".") or "top")
g = self.walk(folder, vrem, [], uname, [[True, False]], dots, scandir, False) g = self.walk(folder, vrem, [], uname, [[True, False]], dots, scandir, False)
for _, _, vpath, apath, files, rd, vd in g: for _, _, vpath, apath, files, rd, vd in g:
@@ -812,7 +812,7 @@ class AuthSrv(object):
value: Union[str, bool, list[str]], value: Union[str, bool, list[str]],
is_list: bool, is_list: bool,
) -> None: ) -> None:
if name not in ["mtp"]: if name not in ["mtp", "xbu", "xau", "xbr", "xar", "xbd", "xad", "xm"]:
flags[name] = value flags[name] = value
return return
@@ -1119,14 +1119,33 @@ class AuthSrv(object):
vol.flags[k] = True vol.flags[k] = True
for ga, vf in ( for ga, vf in (
("no_sb_md", "no_sb_md"),
("no_sb_lg", "no_sb_lg"),
("no_forget", "noforget"), ("no_forget", "noforget"),
("no_dupe", "nodupe"), ("no_dupe", "nodupe"),
("hardlink", "hardlink"),
("never_symlink", "neversymlink"),
("no_dedup", "copydupes"),
("magic", "magic"), ("magic", "magic"),
("xlink", "xlink"), ("xlink", "xlink"),
): ):
if getattr(self.args, ga): if getattr(self.args, ga):
vol.flags[vf] = True vol.flags[vf] = True
for ve, vd in (
("sb_md", "no_sb_md"),
("sb_lg", "no_sb_lg"),
):
if ve in vol.flags:
vol.flags.pop(vd, None)
for ga, vf in (
("md_sbf", "md_sbf"),
("lg_sbf", "lg_sbf"),
):
if vf not in vol.flags:
vol.flags[vf] = getattr(self.args, ga)
for k1, k2 in IMPLICATIONS: for k1, k2 in IMPLICATIONS:
if k1 in vol.flags: if k1 in vol.flags:
vol.flags[k2] = True vol.flags[k2] = True
@@ -1151,8 +1170,32 @@ class AuthSrv(object):
if "mth" not in vol.flags: if "mth" not in vol.flags:
vol.flags["mth"] = self.args.mth vol.flags["mth"] = self.args.mth
# append parsers from argv to volflags # append additive args from argv to volflags
self._read_volflag(vol.flags, "mtp", self.args.mtp, True) hooks = "xbu xau xbr xar xbd xad xm".split()
for name in ["mtp"] + hooks:
self._read_volflag(vol.flags, name, getattr(self.args, name), True)
for hn in hooks:
cmds = vol.flags.get(hn)
if not cmds:
continue
ncmds = []
for cmd in cmds:
hfs = []
ocmd = cmd
while "," in cmd[:6]:
zs, cmd = cmd.split(",", 1)
hfs.append(zs)
if "c" in hfs and "f" in hfs:
t = "cannot combine flags c and f; removing f from eventhook [{}]"
self.log(t.format(ocmd), 1)
hfs = [x for x in hfs if x != "f"]
ocmd = ",".join(hfs + [cmd])
ncmds.append(ocmd)
vol.flags[hn] = ncmds
# d2d drops all database features for a volume # d2d drops all database features for a volume
for grp, rm in [["d2d", "e2d"], ["d2t", "e2t"], ["d2d", "e2v"]]: for grp, rm in [["d2d", "e2d"], ["d2t", "e2t"], ["d2d", "e2v"]]:
@@ -1193,6 +1236,9 @@ class AuthSrv(object):
self.log(t.format(vol.vpath), 1) self.log(t.format(vol.vpath), 1)
del vol.flags["lifetime"] del vol.flags["lifetime"]
if vol.flags.get("neversymlink") and not vol.flags.get("hardlink"):
vol.flags["copydupes"] = True
# verify tags mentioned by -mt[mp] are used by -mte # verify tags mentioned by -mt[mp] are used by -mte
local_mtp = {} local_mtp = {}
local_only_mtp = {} local_only_mtp = {}

View File

@@ -63,6 +63,7 @@ from .util import (
read_socket_unbounded, read_socket_unbounded,
relchk, relchk,
ren_open, ren_open,
runhook,
hidedir, hidedir,
s3enc, s3enc,
sanitize_fn, sanitize_fn,
@@ -125,9 +126,9 @@ class HttpCli(object):
self.mode = " " self.mode = " "
self.req = " " self.req = " "
self.http_ver = " " self.http_ver = " "
self.host = " "
self.ua = " " self.ua = " "
self.is_rclone = False self.is_rclone = False
self.is_ancient = False
self.ouparam: dict[str, str] = {} self.ouparam: dict[str, str] = {}
self.uparam: dict[str, str] = {} self.uparam: dict[str, str] = {}
self.cookies: dict[str, str] = {} self.cookies: dict[str, str] = {}
@@ -156,8 +157,8 @@ class HttpCli(object):
self.trailing_slash = True self.trailing_slash = True
self.out_headerlist: list[tuple[str, str]] = [] self.out_headerlist: list[tuple[str, str]] = []
self.out_headers = { self.out_headers = {
"Access-Control-Allow-Origin": "*", "Vary": "Origin, PW, Cookie",
"Cache-Control": "no-store; max-age=0", "Cache-Control": "no-store, max-age=0",
} }
h = self.args.html_head h = self.args.html_head
if self.args.no_robots: if self.args.no_robots:
@@ -252,7 +253,6 @@ class HttpCli(object):
self.ua = self.headers.get("user-agent", "") self.ua = self.headers.get("user-agent", "")
self.is_rclone = self.ua.startswith("rclone/") self.is_rclone = self.ua.startswith("rclone/")
self.is_ancient = self.ua.startswith("Mozilla/4.")
zs = self.headers.get("connection", "").lower() zs = self.headers.get("connection", "").lower()
self.keepalive = "close" not in zs and ( self.keepalive = "close" not in zs and (
@@ -261,6 +261,9 @@ class HttpCli(object):
self.is_https = ( self.is_https = (
self.headers.get("x-forwarded-proto", "").lower() == "https" or self.tls self.headers.get("x-forwarded-proto", "").lower() == "https" or self.tls
) )
self.host = self.headers.get("host") or "{}:{}".format(
*list(self.s.getsockname()[:2])
)
n = self.args.rproxy n = self.args.rproxy
if n: if n:
@@ -279,6 +282,7 @@ class HttpCli(object):
self.log_src = self.conn.set_rproxy(self.ip) self.log_src = self.conn.set_rproxy(self.ip)
self.is_vproxied = bool(self.args.R) self.is_vproxied = bool(self.args.R)
self.host = self.headers.get("x-forwarded-host") or self.host
if self.is_banned(): if self.is_banned():
return False return False
@@ -294,7 +298,10 @@ class HttpCli(object):
else: else:
self.keepalive = False self.keepalive = False
if self.args.ihead: ptn: Optional[Pattern[str]] = self.conn.lf_url # mypy404
self.do_log = not ptn or not ptn.search(self.req)
if self.args.ihead and self.do_log:
keys = self.args.ihead keys = self.args.ihead
if "*" in keys: if "*" in keys:
keys = list(sorted(self.headers.keys())) keys = list(sorted(self.headers.keys()))
@@ -339,11 +346,12 @@ class HttpCli(object):
if zso: if zso:
zsll = [x.split("=", 1) for x in zso.split(";") if "=" in x] zsll = [x.split("=", 1) for x in zso.split(";") if "=" in x]
cookies = {k.strip(): unescape_cookie(zs) for k, zs in zsll} cookies = {k.strip(): unescape_cookie(zs) for k, zs in zsll}
for kc, ku in [["cppwd", "pw"], ["b", "b"]]: cookie_pw = cookies.get("cppws") or cookies.get("cppwd") or ""
if kc in cookies and ku not in uparam: if "b" in cookies and "b" not in uparam:
uparam[ku] = cookies[kc] uparam["b"] = cookies["b"]
else: else:
cookies = {} cookies = {}
cookie_pw = ""
if len(uparam) > 10 or len(cookies) > 50: if len(uparam) > 10 or len(cookies) > 50:
raise Pebkac(400, "u wot m8") raise Pebkac(400, "u wot m8")
@@ -356,25 +364,24 @@ class HttpCli(object):
if ANYWIN: if ANYWIN:
ok = ok and not relchk(self.vpath) ok = ok and not relchk(self.vpath)
if not ok: if not ok and (self.vpath != "*" or self.mode != "OPTIONS"):
self.log("invalid relpath [{}]".format(self.vpath)) self.log("invalid relpath [{}]".format(self.vpath))
return self.tx_404() and self.keepalive return self.tx_404() and self.keepalive
pwd = ""
zso = self.headers.get("authorization") zso = self.headers.get("authorization")
bauth = ""
if zso: if zso:
try: try:
zb = zso.split(" ")[1].encode("ascii") zb = zso.split(" ")[1].encode("ascii")
zs = base64.b64decode(zb).decode("utf-8") zs = base64.b64decode(zb).decode("utf-8")
# try "pwd", "x:pwd", "pwd:x" # try "pwd", "x:pwd", "pwd:x"
for zs in [zs] + zs.split(":", 1)[::-1]: for bauth in [zs] + zs.split(":", 1)[::-1]:
if self.asrv.iacct.get(zs): if self.asrv.iacct.get(bauth):
pwd = zs
break break
except: except:
pass pass
self.pw = uparam.get("pw") or pwd self.pw = uparam.get("pw") or self.headers.get("pw") or bauth or cookie_pw
self.uname = self.asrv.iacct.get(self.pw) or "*" self.uname = self.asrv.iacct.get(self.pw) or "*"
self.rvol = self.asrv.vfs.aread[self.uname] self.rvol = self.asrv.vfs.aread[self.uname]
self.wvol = self.asrv.vfs.awrite[self.uname] self.wvol = self.asrv.vfs.awrite[self.uname]
@@ -383,17 +390,17 @@ class HttpCli(object):
self.gvol = self.asrv.vfs.aget[self.uname] self.gvol = self.asrv.vfs.aget[self.uname]
self.upvol = self.asrv.vfs.apget[self.uname] self.upvol = self.asrv.vfs.apget[self.uname]
if self.pw: if self.pw and (
self.out_headerlist.append(("Set-Cookie", self.get_pwd_cookie(self.pw)[0])) self.pw != cookie_pw or self.conn.freshen_pwd + 30 < time.time()
):
self.conn.freshen_pwd = time.time()
self.get_pwd_cookie(self.pw)
if self.is_rclone: if self.is_rclone:
uparam["dots"] = "" uparam["dots"] = ""
uparam["b"] = "" uparam["b"] = ""
cookies["b"] = "" cookies["b"] = ""
ptn: Optional[Pattern[str]] = self.conn.lf_url # mypy404
self.do_log = not ptn or not ptn.search(self.req)
( (
self.can_read, self.can_read,
self.can_write, self.can_write,
@@ -404,15 +411,22 @@ class HttpCli(object):
) = self.asrv.vfs.can_access(self.vpath, self.uname) ) = self.asrv.vfs.can_access(self.vpath, self.uname)
try: try:
# getattr(self.mode) is not yet faster than this cors_k = self._cors()
if self.mode in ["GET", "HEAD"]: if self.mode in ("GET", "HEAD"):
return self.handle_get() and self.keepalive return self.handle_get() and self.keepalive
elif self.mode == "POST": if self.mode == "OPTIONS":
return self.handle_options() and self.keepalive
if not cors_k:
origin = self.headers.get("origin", "<?>")
self.log("cors-reject {} from {}".format(self.mode, origin), 3)
raise Pebkac(403, "no surfing")
# getattr(self.mode) is not yet faster than this
if self.mode == "POST":
return self.handle_post() and self.keepalive return self.handle_post() and self.keepalive
elif self.mode == "PUT": elif self.mode == "PUT":
return self.handle_put() and self.keepalive return self.handle_put() and self.keepalive
elif self.mode == "OPTIONS":
return self.handle_options() and self.keepalive
elif self.mode == "PROPFIND": elif self.mode == "PROPFIND":
return self.handle_propfind() and self.keepalive return self.handle_propfind() and self.keepalive
elif self.mode == "DELETE": elif self.mode == "DELETE":
@@ -631,6 +645,63 @@ class HttpCli(object):
return True return True
def _cors(self) -> bool:
ih = self.headers
origin = ih.get("origin")
if not origin:
sfsite = ih.get("sec-fetch-site")
if sfsite and sfsite.lower().startswith("cross"):
origin = ":|" # sandboxed iframe
else:
return True
oh = self.out_headers
origin = origin.lower()
good_origins = self.args.acao + [
"{}://{}".format(
"https" if self.is_https else "http",
self.host.lower().split(":")[0],
)
]
if re.sub(r"(:[0-9]{1,5})?/?$", "", origin) in good_origins:
good_origin = True
bad_hdrs = ("",)
else:
good_origin = False
bad_hdrs = ("", "pw")
# '*' blocks all credentials (cookies, http-auth);
# exact-match for Origin is necessary to unlock those,
# however yolo-requests (?pw=) are always allowed
acah = ih.get("access-control-request-headers", "")
acao = (origin if good_origin else None) or (
"*" if "*" in good_origins else None
)
if self.args.allow_csrf:
acao = origin or acao or "*" # explicitly permit impersonation
acam = ", ".join(self.conn.hsrv.mallow) # and all methods + headers
oh["Access-Control-Allow-Credentials"] = "true"
good_origin = True
else:
acam = ", ".join(self.args.acam)
# wash client-requested headers and roll with that
if "range" not in acah.lower():
acah += ",Range" # firefox
req_h = acah.split(",")
req_h = [x.strip() for x in req_h]
req_h = [x for x in req_h if x.lower() not in bad_hdrs]
acah = ", ".join(req_h)
if not acao:
return False
oh["Access-Control-Allow-Origin"] = acao
oh["Access-Control-Allow-Methods"] = acam.upper()
if acah:
oh["Access-Control-Allow-Headers"] = acah
return good_origin
def handle_get(self) -> bool: def handle_get(self) -> bool:
if self.do_log: if self.do_log:
logmsg = "{:4} {}".format(self.mode, self.req) logmsg = "{:4} {}".format(self.mode, self.req)
@@ -679,15 +750,16 @@ class HttpCli(object):
if "tree" in self.uparam: if "tree" in self.uparam:
return self.tx_tree() return self.tx_tree()
if "delete" in self.uparam:
return self.handle_rm([])
if "move" in self.uparam:
return self.handle_mv()
if "scan" in self.uparam: if "scan" in self.uparam:
return self.scanvol() return self.scanvol()
if self.args.getmod:
if "delete" in self.uparam:
return self.handle_rm([])
if "move" in self.uparam:
return self.handle_mv()
if not self.vpath: if not self.vpath:
if "reload" in self.uparam: if "reload" in self.uparam:
return self.handle_reload() return self.handle_reload()
@@ -835,7 +907,7 @@ class HttpCli(object):
raise Pebkac(404) raise Pebkac(404)
fgen = itertools.chain([topdir], fgen) # type: ignore fgen = itertools.chain([topdir], fgen) # type: ignore
vtop = vjoin(vn.vpath, rem) vtop = vjoin(self.args.R, vjoin(vn.vpath, rem))
chunksz = 0x7FF8 # preferred by nginx or cf (dunno which) chunksz = 0x7FF8 # preferred by nginx or cf (dunno which)
@@ -935,7 +1007,7 @@ class HttpCli(object):
el = xroot.find(r"./{DAV:}response") el = xroot.find(r"./{DAV:}response")
assert el assert el
e2 = mktnod("D:href", quotep("/" + self.vpath)) e2 = mktnod("D:href", quotep(self.args.SRS + self.vpath))
el.insert(0, e2) el.insert(0, e2)
el = xroot.find(r"./{DAV:}response/{DAV:}propstat") el = xroot.find(r"./{DAV:}response/{DAV:}propstat")
@@ -990,7 +1062,9 @@ class HttpCli(object):
lk.append(mkenod("D:timeout", mktnod("D:href", "Second-3310"))) lk.append(mkenod("D:timeout", mktnod("D:href", "Second-3310")))
lk.append(mkenod("D:locktoken", mktnod("D:href", uuid.uuid4().urn))) lk.append(mkenod("D:locktoken", mktnod("D:href", uuid.uuid4().urn)))
lk.append(mkenod("D:lockroot", mktnod("D:href", "/" + quotep(self.vpath)))) lk.append(
mkenod("D:lockroot", mktnod("D:href", quotep(self.args.SRS + self.vpath)))
)
lk2 = mkenod("D:activelock") lk2 = mkenod("D:activelock")
xroot = mkenod("D:prop", mkenod("D:lockdiscovery", lk2)) xroot = mkenod("D:prop", mkenod("D:lockdiscovery", lk2))
@@ -1082,26 +1156,16 @@ class HttpCli(object):
if self.do_log: if self.do_log:
self.log("OPTIONS " + self.req) self.log("OPTIONS " + self.req)
ret = { oh = self.out_headers
"Allow": "GET, HEAD, POST, PUT, OPTIONS", oh["Allow"] = ", ".join(self.conn.hsrv.mallow)
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "*",
"Access-Control-Allow-Headers": "*",
}
wd = {
"Dav": "1, 2",
"Ms-Author-Via": "DAV",
}
if not self.args.no_dav: if not self.args.no_dav:
# PROPPATCH, LOCK, UNLOCK, COPY: noop (spec-must) # PROPPATCH, LOCK, UNLOCK, COPY: noop (spec-must)
zs = ", PROPFIND, PROPPATCH, LOCK, UNLOCK, MKCOL, COPY, MOVE, DELETE" oh["Dav"] = "1, 2"
ret["Allow"] += zs oh["Ms-Author-Via"] = "DAV"
ret.update(wd)
# winxp-webdav doesnt know what 204 is # winxp-webdav doesnt know what 204 is
self.send_headers(0, 200, headers=ret) self.send_headers(0, 200)
return True return True
def handle_delete(self) -> bool: def handle_delete(self) -> bool:
@@ -1141,8 +1205,6 @@ class HttpCli(object):
return self.handle_stash(False) return self.handle_stash(False)
ctype = self.headers.get("content-type", "").lower() ctype = self.headers.get("content-type", "").lower()
if not ctype:
raise Pebkac(400, "you can't post without a content-type header")
if "multipart/form-data" in ctype: if "multipart/form-data" in ctype:
return self.handle_post_multipart() return self.handle_post_multipart()
@@ -1154,6 +1216,12 @@ class HttpCli(object):
): ):
return self.handle_post_json() return self.handle_post_json()
if "move" in self.uparam:
return self.handle_mv()
if "delete" in self.uparam:
return self.handle_rm([])
if "application/octet-stream" in ctype: if "application/octet-stream" in ctype:
return self.handle_post_binary() return self.handle_post_binary()
@@ -1182,9 +1250,27 @@ class HttpCli(object):
plain = zb.decode("utf-8", "replace") plain = zb.decode("utf-8", "replace")
if buf.startswith(b"msg="): if buf.startswith(b"msg="):
plain = plain[4:] plain = plain[4:]
vfs, rem = self.asrv.vfs.get(
self.vpath, self.uname, False, False
)
xm = vfs.flags.get("xm")
if xm:
runhook(
self.log,
xm,
vfs.canonical(rem),
self.vpath,
self.host,
self.uname,
self.ip,
time.time(),
len(xm),
plain,
)
t = "urlform_dec {} @ {}\n {}\n" t = "urlform_dec {} @ {}\n {}\n"
self.log(t.format(len(plain), self.vpath, plain)) self.log(t.format(len(plain), self.vpath, plain))
except Exception as ex: except Exception as ex:
self.log(repr(ex)) self.log(repr(ex))
@@ -1225,7 +1311,7 @@ class HttpCli(object):
# post_sz, sha_hex, sha_b64, remains, path, url # post_sz, sha_hex, sha_b64, remains, path, url
reader, remains = self.get_body_reader() reader, remains = self.get_body_reader()
vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True) vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True)
rnd, _, lifetime = self.upload_flags(vfs) rnd, _, lifetime, xbu, xau = self.upload_flags(vfs)
lim = vfs.get_dbv(rem)[0].lim lim = vfs.get_dbv(rem)[0].lim
fdir = vfs.canonical(rem) fdir = vfs.canonical(rem)
if lim: if lim:
@@ -1305,6 +1391,8 @@ class HttpCli(object):
if rnd and not self.args.nw: if rnd and not self.args.nw:
fn = self.rand_name(fdir, fn, rnd) fn = self.rand_name(fdir, fn, rnd)
path = os.path.join(fdir, fn)
if is_put and not self.args.no_dav: if is_put and not self.args.no_dav:
# allow overwrite if... # allow overwrite if...
# * volflag 'daw' is set # * volflag 'daw' is set
@@ -1313,7 +1401,6 @@ class HttpCli(object):
# * file exists and is empty # * file exists and is empty
# * and there is no .PARTIAL # * and there is no .PARTIAL
path = os.path.join(fdir, fn)
tnam = fn + ".PARTIAL" tnam = fn + ".PARTIAL"
if self.args.dotpart: if self.args.dotpart:
tnam = "." + tnam tnam = "." + tnam
@@ -1325,6 +1412,24 @@ class HttpCli(object):
): ):
params["overwrite"] = "a" params["overwrite"] = "a"
if xbu:
at = time.time() - lifetime
if not runhook(
self.log,
xbu,
path,
self.vpath,
self.host,
self.uname,
self.ip,
at,
remains,
"",
):
t = "upload denied by xbu"
self.log(t, 1)
raise Pebkac(403, t)
with ren_open(fn, *open_a, **params) as zfw: with ren_open(fn, *open_a, **params) as zfw:
f, fn = zfw["orz"] f, fn = zfw["orz"]
path = os.path.join(fdir, fn) path = os.path.join(fdir, fn)
@@ -1364,6 +1469,24 @@ class HttpCli(object):
fn = fn2 fn = fn2
path = path2 path = path2
at = time.time() - lifetime
if xau and not runhook(
self.log,
xau,
path,
self.vpath,
self.host,
self.uname,
self.ip,
at,
post_sz,
"",
):
t = "upload denied by xau"
self.log(t, 1)
os.unlink(path)
raise Pebkac(403, t)
vfs, rem = vfs.get_dbv(rem) vfs, rem = vfs.get_dbv(rem)
self.conn.hsrv.broker.say( self.conn.hsrv.broker.say(
"up2k.hash_file", "up2k.hash_file",
@@ -1372,7 +1495,7 @@ class HttpCli(object):
rem, rem,
fn, fn,
self.ip, self.ip,
time.time() - lifetime, at,
) )
vsuf = "" vsuf = ""
@@ -1389,7 +1512,7 @@ class HttpCli(object):
url = "{}://{}/{}".format( url = "{}://{}/{}".format(
"https" if self.is_https else "http", "https" if self.is_https else "http",
self.headers.get("host") or "{}:{}".format(*list(self.s.getsockname()[:2])), self.host,
self.args.RS + vpath + vsuf, self.args.RS + vpath + vsuf,
) )
@@ -1565,6 +1688,8 @@ class HttpCli(object):
body["vtop"] = dbv.vpath body["vtop"] = dbv.vpath
body["ptop"] = dbv.realpath body["ptop"] = dbv.realpath
body["prel"] = vrem body["prel"] = vrem
body["host"] = self.host
body["user"] = self.uname
body["addr"] = self.ip body["addr"] = self.ip
body["vcfg"] = dbv.flags body["vcfg"] = dbv.flags
@@ -1786,19 +1911,19 @@ class HttpCli(object):
self.out_headerlist = [ self.out_headerlist = [
x x
for x in self.out_headerlist for x in self.out_headerlist
if x[0] != "Set-Cookie" or "cppwd=" not in x[1] if x[0] != "Set-Cookie" or "cppwd" != x[1][:5]
] ]
dst = "/" dst = self.args.SRS
if self.vpath: if self.vpath:
dst += quotep(self.vpath) dst += quotep(self.vpath)
ck, msg = self.get_pwd_cookie(pwd) msg = self.get_pwd_cookie(pwd)
html = self.j2s("msg", h1=msg, h2='<a href="' + dst + '">ack</a>', redir=dst) html = self.j2s("msg", h1=msg, h2='<a href="' + dst + '">ack</a>', redir=dst)
self.reply(html.encode("utf-8"), headers={"Set-Cookie": ck}) self.reply(html.encode("utf-8"))
return True return True
def get_pwd_cookie(self, pwd: str) -> tuple[str, str]: def get_pwd_cookie(self, pwd: str) -> str:
if pwd in self.asrv.iacct: if pwd in self.asrv.iacct:
msg = "login ok" msg = "login ok"
dur = int(60 * 60 * self.args.logout) dur = int(60 * 60 * self.args.logout)
@@ -1815,11 +1940,18 @@ class HttpCli(object):
pwd = "x" # nosec pwd = "x" # nosec
dur = None dur = None
r = gencookie("cppwd", pwd, dur) if pwd == "x":
if self.is_ancient: # reset both plaintext and tls
r = r.rsplit(" ", 1)[0] # (only affects active tls cookies when tls)
for k in ("cppwd", "cppws") if self.tls else ("cppwd",):
ck = gencookie(k, pwd, self.args.R, False, dur)
self.out_headerlist.append(("Set-Cookie", ck))
else:
k = "cppws" if self.tls else "cppwd"
ck = gencookie(k, pwd, self.args.R, self.tls, dur)
self.out_headerlist.append(("Set-Cookie", ck))
return r, msg return msg
def handle_mkdir(self) -> bool: def handle_mkdir(self) -> bool:
assert self.parser assert self.parser
@@ -1886,7 +2018,7 @@ class HttpCli(object):
self.redirect(vpath, "?edit") self.redirect(vpath, "?edit")
return True return True
def upload_flags(self, vfs: VFS) -> tuple[int, bool, int]: def upload_flags(self, vfs: VFS) -> tuple[int, bool, int, list[str], list[str]]:
srnd = self.uparam.get("rand", self.headers.get("rand", "")) srnd = self.uparam.get("rand", self.headers.get("rand", ""))
rnd = int(srnd) if srnd and not self.args.nw else 0 rnd = int(srnd) if srnd and not self.args.nw else 0
ac = self.uparam.get( ac = self.uparam.get(
@@ -1900,7 +2032,13 @@ class HttpCli(object):
else: else:
lifetime = 0 lifetime = 0
return rnd, want_url, lifetime return (
rnd,
want_url,
lifetime,
vfs.flags.get("xbu") or [],
vfs.flags.get("xau") or [],
)
def handle_plain_upload(self) -> bool: def handle_plain_upload(self) -> bool:
assert self.parser assert self.parser
@@ -1917,7 +2055,7 @@ class HttpCli(object):
if not nullwrite: if not nullwrite:
bos.makedirs(fdir_base) bos.makedirs(fdir_base)
rnd, want_url, lifetime = self.upload_flags(vfs) rnd, want_url, lifetime, xbu, xau = self.upload_flags(vfs)
files: list[tuple[int, str, str, str, str, str]] = [] files: list[tuple[int, str, str, str, str, str]] = []
# sz, sha_hex, sha_b64, p_file, fname, abspath # sz, sha_hex, sha_b64, p_file, fname, abspath
@@ -1959,6 +2097,24 @@ class HttpCli(object):
tnam = fname = os.devnull tnam = fname = os.devnull
fdir = abspath = "" fdir = abspath = ""
if xbu:
at = time.time() - lifetime
if not runhook(
self.log,
xbu,
abspath,
self.vpath,
self.host,
self.uname,
self.ip,
at,
0,
"",
):
t = "upload denied by xbu"
self.log(t, 1)
raise Pebkac(403, t)
if lim: if lim:
lim.chk_bup(self.ip) lim.chk_bup(self.ip)
lim.chk_nup(self.ip) lim.chk_nup(self.ip)
@@ -2001,6 +2157,24 @@ class HttpCli(object):
files.append( files.append(
(sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath) (sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath)
) )
at = time.time() - lifetime
if xau and not runhook(
self.log,
xau,
abspath,
self.vpath,
self.host,
self.uname,
self.ip,
at,
sz,
"",
):
t = "upload denied by xau"
self.log(t, 1)
os.unlink(abspath)
raise Pebkac(403, t)
dbv, vrem = vfs.get_dbv(rem) dbv, vrem = vfs.get_dbv(rem)
self.conn.hsrv.broker.say( self.conn.hsrv.broker.say(
"up2k.hash_file", "up2k.hash_file",
@@ -2009,7 +2183,7 @@ class HttpCli(object):
vrem, vrem,
fname, fname,
self.ip, self.ip,
time.time() - lifetime, at,
) )
self.conn.nbyte += sz self.conn.nbyte += sz
@@ -2069,8 +2243,7 @@ class HttpCli(object):
jpart = { jpart = {
"url": "{}://{}/{}".format( "url": "{}://{}/{}".format(
"https" if self.is_https else "http", "https" if self.is_https else "http",
self.headers.get("host") self.host,
or "{}:{}".format(*list(self.s.getsockname()[:2])),
rel_url, rel_url,
), ),
"sha512": sha_hex[:56], "sha512": sha_hex[:56],
@@ -2273,8 +2446,17 @@ class HttpCli(object):
if stat.S_ISDIR(st.st_mode): if stat.S_ISDIR(st.st_mode):
continue continue
if stat.S_ISBLK(st.st_mode):
fd = bos.open(fs_path, os.O_RDONLY)
try:
sz = os.lseek(fd, 0, os.SEEK_END)
finally:
os.close(fd)
else:
sz = st.st_size
file_ts = max(file_ts, int(st.st_mtime)) file_ts = max(file_ts, int(st.st_mtime))
editions[ext or "plain"] = (fs_path, st.st_size) editions[ext or "plain"] = (fs_path, sz)
except: except:
pass pass
if not self.vpath.startswith(".cpr/"): if not self.vpath.startswith(".cpr/"):
@@ -2451,7 +2633,7 @@ class HttpCli(object):
if fn: if fn:
fn = fn.rstrip("/").split("/")[-1] fn = fn.rstrip("/").split("/")[-1]
else: else:
fn = self.headers.get("host", "hey") fn = self.host.split(":")[0]
safe = (string.ascii_letters + string.digits).replace("%", "") safe = (string.ascii_letters + string.digits).replace("%", "")
afn = "".join([x if x in safe.replace('"', "") else "_" for x in fn]) afn = "".join([x if x in safe.replace('"', "") else "_" for x in fn])
@@ -2606,7 +2788,7 @@ class HttpCli(object):
def tx_svcs(self) -> bool: def tx_svcs(self) -> bool:
aname = re.sub("[^0-9a-zA-Z]+", "", self.args.name) or "a" aname = re.sub("[^0-9a-zA-Z]+", "", self.args.name) or "a"
ep = self.headers["host"] ep = self.host
host = ep.split(":")[0] host = ep.split(":")[0]
hport = ep[ep.find(":") :] if ":" in ep else "" hport = ep[ep.find(":") :] if ":" in ep else ""
rip = ( rip = (
@@ -2614,6 +2796,7 @@ class HttpCli(object):
if self.args.rclone_mdns or not self.args.zm if self.args.rclone_mdns or not self.args.zm
else self.conn.hsrv.nm.map(self.ip) or host else self.conn.hsrv.nm.map(self.ip) or host
) )
vp = (self.uparam["hc"] or "").lstrip("/")
html = self.j2s( html = self.j2s(
"svcs", "svcs",
args=self.args, args=self.args,
@@ -2621,7 +2804,8 @@ class HttpCli(object):
s="s" if self.is_https else "", s="s" if self.is_https else "",
rip=rip, rip=rip,
ep=ep, ep=ep,
vp=(self.uparam["hc"] or "").lstrip("/"), vp=vp,
rvp=vjoin(self.args.R, vp),
host=host, host=host,
hport=hport, hport=hport,
aname=aname, aname=aname,
@@ -2652,7 +2836,11 @@ class HttpCli(object):
"dbwt": None, "dbwt": None,
} }
if self.uparam.get("ls") in ["v", "t", "txt"]: fmt = self.uparam.get("ls", "")
if not fmt and self.ua.startswith("curl/"):
fmt = "v"
if fmt in ["v", "t", "txt"]:
if self.uname == "*": if self.uname == "*":
txt = "howdy stranger (you're not logged in)" txt = "howdy stranger (you're not logged in)"
else: else:
@@ -2697,21 +2885,22 @@ class HttpCli(object):
return True return True
def set_k304(self) -> bool: def set_k304(self) -> bool:
ck = gencookie("k304", self.uparam["k304"], 60 * 60 * 24 * 299) ck = gencookie("k304", self.uparam["k304"], self.args.R, False, 86400 * 299)
self.out_headerlist.append(("Set-Cookie", ck)) self.out_headerlist.append(("Set-Cookie", ck))
self.redirect("", "?h#cc") self.redirect("", "?h#cc")
return True return True
def set_am_js(self) -> bool: def set_am_js(self) -> bool:
v = "n" if self.uparam["am_js"] == "n" else "y" v = "n" if self.uparam["am_js"] == "n" else "y"
ck = gencookie("js", v, 60 * 60 * 24 * 299) ck = gencookie("js", v, self.args.R, False, 86400 * 299)
self.out_headerlist.append(("Set-Cookie", ck)) self.out_headerlist.append(("Set-Cookie", ck))
self.reply(b"promoted\n") self.reply(b"promoted\n")
return True return True
def set_cfg_reset(self) -> bool: def set_cfg_reset(self) -> bool:
for k in ("k304", "js", "cppwd"): for k in ("k304", "js", "cppwd", "cppws"):
self.out_headerlist.append(("Set-Cookie", gencookie(k, "x", None))) cookie = gencookie(k, "x", self.args.R, False, None)
self.out_headerlist.append(("Set-Cookie", cookie))
self.redirect("", "?h#cc") self.redirect("", "?h#cc")
return True return True
@@ -2849,6 +3038,7 @@ class HttpCli(object):
raise Pebkac(500, "sqlite3 is not available on the server; cannot unpost") raise Pebkac(500, "sqlite3 is not available on the server; cannot unpost")
filt = self.uparam.get("filter") filt = self.uparam.get("filter")
filt = unquotep(filt or "")
lm = "ups [{}]".format(filt) lm = "ups [{}]".format(filt)
self.log(lm) self.log(lm)
@@ -2971,7 +3161,7 @@ class HttpCli(object):
biggest = 0 biggest = 0
if arg == "v": if arg == "v":
fmt = "\033[0;7;36m{{}} {{:>{}}}\033[0m {{}}" fmt = "\033[0;7;36m{{}}{{:>{}}}\033[0m {{}}"
nfmt = "{}" nfmt = "{}"
biggest = 0 biggest = 0
f2 = "".join( f2 = "".join(
@@ -2991,7 +3181,7 @@ class HttpCli(object):
a = x["dt"].replace("-", " ").replace(":", " ").split(" ") a = x["dt"].replace("-", " ").replace(":", " ").split(" ")
x["dt"] = f2.format(*list(a)) x["dt"] = f2.format(*list(a))
sz = humansize(x["sz"], True) sz = humansize(x["sz"], True)
x["sz"] = "\033[0;3{}m{:>5}".format(ctab.get(sz[-1:], 0), sz) x["sz"] = "\033[0;3{}m {:>5}".format(ctab.get(sz[-1:], 0), sz)
else: else:
fmt = "{{}} {{:{},}} {{}}" fmt = "{{}} {{:{},}} {{}}"
nfmt = "{:,}" nfmt = "{:,}"
@@ -3142,6 +3332,10 @@ class HttpCli(object):
is_ls = "ls" in self.uparam is_ls = "ls" in self.uparam
is_js = self.args.force_js or self.cookies.get("js") == "y" is_js = self.args.force_js or self.cookies.get("js") == "y"
if not is_ls and self.ua.startswith("curl/"):
self.uparam["ls"] = "v"
is_ls = True
tpl = "browser" tpl = "browser"
if "b" in self.uparam: if "b" in self.uparam:
tpl = "browser2" tpl = "browser2"
@@ -3164,6 +3358,7 @@ class HttpCli(object):
readme = f.read().decode("utf-8") readme = f.read().decode("utf-8")
break break
vf = vn.flags
ls_ret = { ls_ret = {
"dirs": [], "dirs": [],
"files": [], "files": [],
@@ -3196,6 +3391,8 @@ class HttpCli(object):
"have_zip": (not self.args.no_zip), "have_zip": (not self.args.no_zip),
"have_unpost": int(self.args.unpost), "have_unpost": int(self.args.unpost),
"have_b_u": (self.can_write and self.uparam.get("b") == "u"), "have_b_u": (self.can_write and self.uparam.get("b") == "u"),
"sb_md": "" if "no_sb_md" in vf else (vf.get("md_sbf") or "y"),
"sb_lg": "" if "no_sb_lg" in vf else (vf.get("lg_sbf") or "y"),
"url_suf": url_suf, "url_suf": url_suf,
"logues": logues, "logues": logues,
"readme": readme, "readme": readme,

View File

@@ -65,6 +65,7 @@ class HttpConn(object):
self.ico: Ico = Ico(self.args) # mypy404 self.ico: Ico = Ico(self.args) # mypy404
self.t0: float = time.time() # mypy404 self.t0: float = time.time() # mypy404
self.freshen_pwd: float = 0.0
self.stopping = False self.stopping = False
self.nreq: int = -1 # mypy404 self.nreq: int = -1 # mypy404
self.nbyte: int = 0 # mypy404 self.nbyte: int = 0 # mypy404

View File

@@ -81,8 +81,7 @@ class HttpSrv(object):
self.bans: dict[str, int] = {} self.bans: dict[str, int] = {}
self.aclose: dict[str, int] = {} self.aclose: dict[str, int] = {}
self.ip = "" self.bound: set[tuple[str, int]] = set()
self.port = 0
self.name = "hsrv" + nsuf self.name = "hsrv" + nsuf
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.stopping = False self.stopping = False
@@ -110,6 +109,11 @@ class HttpSrv(object):
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz") zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
self.prism = os.path.exists(zs) self.prism = os.path.exists(zs)
self.mallow = "GET HEAD POST PUT DELETE OPTIONS".split()
if not self.args.no_dav:
zs = "PROPFIND PROPPATCH LOCK UNLOCK MKCOL COPY MOVE"
self.mallow += zs.split()
if self.args.zs: if self.args.zs:
from .ssdp import SSDPr from .ssdp import SSDPr
@@ -142,7 +146,11 @@ class HttpSrv(object):
pass pass
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None: def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
self.nm = NetMap([self.ip], netdevs) ips = set()
for ip, _ in self.bound:
ips.add(ip)
self.nm = NetMap(list(ips), netdevs)
def start_threads(self, n: int) -> None: def start_threads(self, n: int) -> None:
self.tp_nthr += n self.tp_nthr += n
@@ -184,12 +192,13 @@ class HttpSrv(object):
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sck.settimeout(None) # < does not inherit, ^ opts above do sck.settimeout(None) # < does not inherit, ^ opts above do
self.ip, self.port = sck.getsockname()[:2] ip, port = sck.getsockname()[:2]
self.srvs.append(sck) self.srvs.append(sck)
self.bound.add((ip, port))
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners) self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
Daemon( Daemon(
self.thr_listen, self.thr_listen,
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", self.ip, self.port), "httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
(sck,), (sck,),
) )

View File

@@ -25,7 +25,7 @@ from .stolen.dnslib import (
DNSQuestion, DNSQuestion,
DNSRecord, DNSRecord,
) )
from .util import CachedSet, Daemon, Netdev, min_ex from .util import CachedSet, Daemon, Netdev, list_ips, min_ex
if TYPE_CHECKING: if TYPE_CHECKING:
from .svchub import SvcHub from .svchub import SvcHub
@@ -55,10 +55,11 @@ class MDNS_Sck(MC_Sck):
self.bp_bye = b"" self.bp_bye = b""
self.last_tx = 0.0 self.last_tx = 0.0
self.tx_ex = False
class MDNS(MCast): class MDNS(MCast):
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args al = hub.args
grp4 = "" if al.zm6 else MDNS4 grp4 = "" if al.zm6 else MDNS4
grp6 = "" if al.zm4 else MDNS6 grp6 = "" if al.zm4 else MDNS6
@@ -66,7 +67,8 @@ class MDNS(MCast):
hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv
) )
self.srv: dict[socket.socket, MDNS_Sck] = {} self.srv: dict[socket.socket, MDNS_Sck] = {}
self.logsrc = "mDNS-{}".format(ngen)
self.ngen = ngen
self.ttl = 300 self.ttl = 300
zs = self.args.name + ".local." zs = self.args.name + ".local."
@@ -89,7 +91,7 @@ class MDNS(MCast):
self.defend: dict[MDNS_Sck, float] = {} # server -> deadline self.defend: dict[MDNS_Sck, float] = {} # server -> deadline
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("mDNS", msg, c) self.log_func(self.logsrc, msg, c)
def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]: def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]:
zms = self.args.zms zms = self.args.zms
@@ -287,12 +289,15 @@ class MDNS(MCast):
rx: list[socket.socket] = rdy[0] # type: ignore rx: list[socket.socket] = rdy[0] # type: ignore
self.rx4.cln() self.rx4.cln()
self.rx6.cln() self.rx6.cln()
buf = b""
addr = ("0", 0)
for sck in rx: for sck in rx:
buf, addr = sck.recvfrom(4096)
try: try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr, sck) self.eat(buf, addr, sck)
except: except:
if not self.running: if not self.running:
self.log("stopped", 2)
return return
t = "{} {} \033[33m|{}| {}\n{}".format( t = "{} {} \033[33m|{}| {}\n{}".format(
@@ -309,14 +314,18 @@ class MDNS(MCast):
self.log(t.format(self.hn[:-1]), 2) self.log(t.format(self.hn[:-1]), 2)
self.probing = 0 self.probing = 0
self.log("stopped", 2)
def stop(self, panic=False) -> None: def stop(self, panic=False) -> None:
self.running = False self.running = False
if not panic: for srv in self.srv.values():
for srv in self.srv.values(): try:
try: if panic:
srv.sck.close()
else:
srv.sck.sendto(srv.bp_bye, (srv.grp, 5353)) srv.sck.sendto(srv.bp_bye, (srv.grp, 5353))
except: except:
pass pass
self.srv = {} self.srv = {}
@@ -374,6 +383,14 @@ class MDNS(MCast):
# avahi broadcasting 127.0.0.1-only packets # avahi broadcasting 127.0.0.1-only packets
return return
# check if we've been given additional IPs
for ip in list_ips():
if ip in cips:
self.sips.add(ip)
if not self.sips.isdisjoint(cips):
return
t = "mdns zeroconf: " t = "mdns zeroconf: "
if self.probing: if self.probing:
t += "Cannot start; hostname '{}' is occupied" t += "Cannot start; hostname '{}' is occupied"
@@ -507,6 +524,15 @@ class MDNS(MCast):
if now < srv.last_tx + cooldown: if now < srv.last_tx + cooldown:
return False return False
srv.sck.sendto(msg, (srv.grp, 5353)) try:
srv.last_tx = now srv.sck.sendto(msg, (srv.grp, 5353))
srv.last_tx = now
except Exception as ex:
if srv.tx_ex:
return True
srv.tx_ex = True
t = "tx({},|{}|,{}): {}"
self.log(t.format(srv.ip, len(msg), cooldown, ex), 3)
return True return True

View File

@@ -14,8 +14,8 @@ from ipaddress import (
ip_network, ip_network,
) )
from .__init__ import TYPE_CHECKING from .__init__ import MACOS, TYPE_CHECKING
from .util import MACOS, Netdev, min_ex, spack from .util import Netdev, find_prefix, min_ex, spack
if TYPE_CHECKING: if TYPE_CHECKING:
from .svchub import SvcHub from .svchub import SvcHub
@@ -110,9 +110,7 @@ class MCast(object):
) )
ips = [x for x in ips if x not in ("::1", "127.0.0.1")] ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
ips = find_prefix(ips, netdevs)
# ip -> ip/prefix
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips]
on = self.on[:] on = self.on[:]
off = self.off[:] off = self.off[:]

View File

@@ -75,6 +75,7 @@ class SSDPr(object):
c = html_escape c = html_escape
sip, sport = hc.s.getsockname()[:2] sip, sport = hc.s.getsockname()[:2]
sip = sip.replace("::ffff:", "")
proto = "https" if self.args.https_only else "http" proto = "https" if self.args.https_only else "http"
ubase = "{}://{}:{}".format(proto, sip, sport) ubase = "{}://{}:{}".format(proto, sip, sport)
zsl = self.args.zsl zsl = self.args.zsl
@@ -88,19 +89,22 @@ class SSDPr(object):
class SSDPd(MCast): class SSDPd(MCast):
"""communicates with ssdp clients over multicast""" """communicates with ssdp clients over multicast"""
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args al = hub.args
vinit = al.zsv and not al.zmv vinit = al.zsv and not al.zmv
super(SSDPd, self).__init__( super(SSDPd, self).__init__(
hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit
) )
self.srv: dict[socket.socket, SSDP_Sck] = {} self.srv: dict[socket.socket, SSDP_Sck] = {}
self.logsrc = "SSDP-{}".format(ngen)
self.ngen = ngen
self.rxc = CachedSet(0.7) self.rxc = CachedSet(0.7)
self.txc = CachedSet(5) # win10: every 3 sec self.txc = CachedSet(5) # win10: every 3 sec
self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I) self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I)
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("SSDP", msg, c) self.log_func(self.logsrc, msg, c)
def run(self) -> None: def run(self) -> None:
try: try:
@@ -126,24 +130,34 @@ class SSDPd(MCast):
self.log("listening") self.log("listening")
while self.running: while self.running:
rdy = select.select(self.srv, [], [], 180) rdy = select.select(self.srv, [], [], self.args.z_chk or 180)
rx: list[socket.socket] = rdy[0] # type: ignore rx: list[socket.socket] = rdy[0] # type: ignore
self.rxc.cln() self.rxc.cln()
buf = b""
addr = ("0", 0)
for sck in rx: for sck in rx:
buf, addr = sck.recvfrom(4096)
try: try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr) self.eat(buf, addr)
except: except:
if not self.running: if not self.running:
return break
t = "{} {} \033[33m|{}| {}\n{}".format( t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex() self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
) )
self.log(t, 6) self.log(t, 6)
self.log("stopped", 2)
def stop(self) -> None: def stop(self) -> None:
self.running = False self.running = False
for srv in self.srv.values():
try:
srv.sck.close()
except:
pass
self.srv = {} self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int]) -> None: def eat(self, buf: bytes, addr: tuple[str, int]) -> None:
@@ -160,7 +174,7 @@ class SSDPd(MCast):
self.rxc.add(buf) self.rxc.add(buf)
if not buf.startswith(b"M-SEARCH * HTTP/1."): if not buf.startswith(b"M-SEARCH * HTTP/1."):
raise Exception("not an ssdp message") return
if not self.ptn_st.search(buf): if not self.ptn_st.search(buf):
return return
@@ -184,7 +198,8 @@ BOOTID.UPNP.ORG: 0
CONFIGID.UPNP.ORG: 1 CONFIGID.UPNP.ORG: 1
""" """
zs = zs.format(formatdate(usegmt=True), srv.ip, srv.hport, self.args.zsid) v4 = srv.ip.replace("::ffff:", "")
zs = zs.format(formatdate(usegmt=True), v4, srv.hport, self.args.zsid)
zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace") zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace")
srv.sck.sendto(zb, addr[:2]) srv.sck.sendto(zb, addr[:2])

View File

@@ -4,6 +4,7 @@ from __future__ import print_function, unicode_literals
import argparse import argparse
import base64 import base64
import calendar import calendar
import errno
import gzip import gzip
import logging import logging
import os import os
@@ -96,13 +97,13 @@ class SvcHub(object):
if args.sss or args.s >= 3: if args.sss or args.s >= 3:
args.ss = True args.ss = True
args.no_dav = True args.no_dav = True
args.no_logues = True
args.no_readme = True
args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz" args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz"
args.ls = args.ls or "**,*,ln,p,r" args.ls = args.ls or "**,*,ln,p,r"
if args.ss or args.s >= 2: if args.ss or args.s >= 2:
args.s = True args.s = True
args.no_logues = True
args.no_readme = True
args.unpost = 0 args.unpost = 0
args.no_del = True args.no_del = True
args.no_mv = True args.no_mv = True
@@ -150,9 +151,6 @@ class SvcHub(object):
ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)] ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)]
args.theme = "{0}{1} {0} {1}".format(ch, bri) args.theme = "{0}{1} {0} {1}".format(ch, bri)
if not args.hardlink and args.never_symlink:
args.no_dedup = True
if args.log_fk: if args.log_fk:
args.log_fk = re.compile(args.log_fk) args.log_fk = re.compile(args.log_fk)
@@ -236,6 +234,7 @@ class SvcHub(object):
if not args.zms: if not args.zms:
args.zms = zms args.zms = zms
self.zc_ngen = 0
self.mdns: Optional["MDNS"] = None self.mdns: Optional["MDNS"] = None
self.ssdp: Optional["SSDPd"] = None self.ssdp: Optional["SSDPd"] = None
@@ -295,12 +294,25 @@ class SvcHub(object):
al.zs_on = al.zs_on or al.z_on al.zs_on = al.zs_on or al.z_on
al.zm_off = al.zm_off or al.z_off al.zm_off = al.zm_off or al.z_off
al.zs_off = al.zs_off or al.z_off al.zs_off = al.zs_off or al.z_off
for n in ("zm_on", "zm_off", "zs_on", "zs_off"): ns = "zm_on zm_off zs_on zs_off acao acam"
for n in ns.split(" "):
vs = getattr(al, n).split(",") vs = getattr(al, n).split(",")
vs = [x.strip() for x in vs] vs = [x.strip() for x in vs]
vs = [x for x in vs if x] vs = [x for x in vs if x]
setattr(al, n, vs) setattr(al, n, vs)
ns = "acao acam"
for n in ns.split(" "):
vs = getattr(al, n)
vd = {zs: 1 for zs in vs}
setattr(al, n, vd)
ns = "acao"
for n in ns.split(" "):
vs = getattr(al, n)
vs = [x.lower() for x in vs]
setattr(al, n, vs)
R = al.rp_loc R = al.rp_loc
if "//" in R or ":" in R: if "//" in R or ":" in R:
t = "found URL in --rp-loc; it should be just the location, for example /foo/bar" t = "found URL in --rp-loc; it should be just the location, for example /foo/bar"
@@ -309,6 +321,7 @@ class SvcHub(object):
al.R = R = R.strip("/") al.R = R = R.strip("/")
al.SR = "/" + R if R else "" al.SR = "/" + R if R else ""
al.RS = R + "/" if R else "" al.RS = R + "/" if R else ""
al.SRS = "/" + R + "/" if R else "/"
return True return True
@@ -402,24 +415,10 @@ class SvcHub(object):
def run(self) -> None: def run(self) -> None:
self.tcpsrv.run() self.tcpsrv.run()
if getattr(self.args, "z_chk", 0) and (
if getattr(self.args, "zm", False): getattr(self.args, "zm", False) or getattr(self.args, "zs", False)
try: ):
from .mdns import MDNS Daemon(self.tcpsrv.netmon, "netmon")
self.mdns = MDNS(self)
Daemon(self.mdns.run, "mdns")
except:
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
if getattr(self.args, "zs", False):
try:
from .ssdp import SSDPd
self.ssdp = SSDPd(self)
Daemon(self.ssdp.run, "ssdp")
except:
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
Daemon(self.thr_httpsrv_up, "sig-hsrv-up2") Daemon(self.thr_httpsrv_up, "sig-hsrv-up2")
@@ -451,6 +450,33 @@ class SvcHub(object):
else: else:
self.stop_thr() self.stop_thr()
def start_zeroconf(self) -> None:
self.zc_ngen += 1
if getattr(self.args, "zm", False):
try:
from .mdns import MDNS
if self.mdns:
self.mdns.stop(True)
self.mdns = MDNS(self, self.zc_ngen)
Daemon(self.mdns.run, "mdns")
except:
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
if getattr(self.args, "zs", False):
try:
from .ssdp import SSDPd
if self.ssdp:
self.ssdp.stop()
self.ssdp = SSDPd(self, self.zc_ngen)
Daemon(self.ssdp.run, "ssdp")
except:
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
def reload(self) -> str: def reload(self) -> str:
if self.reloading: if self.reloading:
return "cannot reload; already in progress" return "cannot reload; already in progress"
@@ -635,13 +661,20 @@ class SvcHub(object):
print(msg.encode("utf-8", "replace").decode(), end="") print(msg.encode("utf-8", "replace").decode(), end="")
except: except:
print(msg.encode("ascii", "replace").decode(), end="") print(msg.encode("ascii", "replace").decode(), end="")
except OSError as ex:
if ex.errno != errno.EPIPE:
raise
if self.logf: if self.logf:
self.logf.write(msg) self.logf.write(msg)
def pr(self, *a: Any, **ka: Any) -> None: def pr(self, *a: Any, **ka: Any) -> None:
with self.log_mutex: try:
print(*a, **ka) with self.log_mutex:
print(*a, **ka)
except OSError as ex:
if ex.errno != errno.EPIPE:
raise
def check_mp_support(self) -> str: def check_mp_support(self) -> str:
if MACOS: if MACOS:

View File

@@ -5,6 +5,7 @@ import os
import re import re
import socket import socket
import sys import sys
import time
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, unicode from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, unicode
from .stolen.qrcodegen import QrCode from .stolen.qrcodegen import QrCode
@@ -28,6 +29,9 @@ if TYPE_CHECKING:
if not hasattr(socket, "IPPROTO_IPV6"): if not hasattr(socket, "IPPROTO_IPV6"):
setattr(socket, "IPPROTO_IPV6", 41) setattr(socket, "IPPROTO_IPV6", 41)
if not hasattr(socket, "IP_FREEBIND"):
setattr(socket, "IP_FREEBIND", 15)
class TcpSrv(object): class TcpSrv(object):
""" """
@@ -46,6 +50,8 @@ class TcpSrv(object):
self.stopping = False self.stopping = False
self.srv: list[socket.socket] = [] self.srv: list[socket.socket] = []
self.bound: list[tuple[str, int]] = [] self.bound: list[tuple[str, int]] = []
self.netdevs: dict[str, Netdev] = {}
self.netlist = ""
self.nsrv = 0 self.nsrv = 0
self.qr = "" self.qr = ""
pad = False pad = False
@@ -221,8 +227,16 @@ class TcpSrv(object):
except: except:
pass # will create another ipv4 socket instead pass # will create another ipv4 socket instead
if not ANYWIN and self.args.freebind:
srv.setsockopt(socket.SOL_IP, socket.IP_FREEBIND, 1)
try: try:
srv.bind((ip, port)) srv.bind((ip, port))
sport = srv.getsockname()[1]
if port != sport:
# linux 6.0.16 lets you bind a port which is in use
# except it just gives you a random port instead
raise OSError(E_ADDR_IN_USE[0], "")
self.srv.append(srv) self.srv.append(srv)
except (OSError, socket.error) as ex: except (OSError, socket.error) as ex:
if ex.errno in E_ADDR_IN_USE: if ex.errno in E_ADDR_IN_USE:
@@ -241,6 +255,14 @@ class TcpSrv(object):
ip, port = srv.getsockname()[:2] ip, port = srv.getsockname()[:2]
try: try:
srv.listen(self.args.nc) srv.listen(self.args.nc)
try:
ok = srv.getsockopt(socket.SOL_SOCKET, socket.SO_ACCEPTCONN)
except:
ok = 1 # macos
if not ok:
# some linux don't throw on listen(0.0.0.0) after listen(::)
raise Exception("failed to listen on {}".format(srv.getsockname()))
except: except:
if ip == "0.0.0.0" and ("::", port) in bound: if ip == "0.0.0.0" and ("::", port) in bound:
# dualstack # dualstack
@@ -268,7 +290,11 @@ class TcpSrv(object):
self.srv = srvs self.srv = srvs
self.bound = bound self.bound = bound
self.nsrv = len(srvs) self.nsrv = len(srvs)
self._distribute_netdevs()
def _distribute_netdevs(self):
self.hub.broker.say("set_netdevs", self.netdevs) self.hub.broker.say("set_netdevs", self.netdevs)
self.hub.start_zeroconf()
def shutdown(self) -> None: def shutdown(self) -> None:
self.stopping = True self.stopping = True
@@ -280,6 +306,27 @@ class TcpSrv(object):
self.log("tcpsrv", "ok bye") self.log("tcpsrv", "ok bye")
def netmon(self):
while not self.stopping:
time.sleep(self.args.z_chk)
netdevs = self.detect_interfaces(self.args.i)
if not netdevs:
continue
added = "nothing"
removed = "nothing"
for k, v in netdevs.items():
if k not in self.netdevs:
added = "{} = {}".format(k, v)
for k, v in self.netdevs.items():
if k not in netdevs:
removed = "{} = {}".format(k, v)
t = "network change detected:\n added {}\nremoved {}"
self.log("tcpsrv", t.format(added, removed), 3)
self.netdevs = netdevs
self._distribute_netdevs()
def detect_interfaces(self, listen_ips: list[str]) -> dict[str, Netdev]: def detect_interfaces(self, listen_ips: list[str]) -> dict[str, Netdev]:
from .stolen.ifaddr import get_adapters from .stolen.ifaddr import get_adapters
@@ -300,6 +347,12 @@ class TcpSrv(object):
except: except:
pass pass
netlist = str(sorted(eps.items()))
if netlist == self.netlist and self.netdevs:
return {}
self.netlist = netlist
if "0.0.0.0" not in listen_ips and "::" not in listen_ips: if "0.0.0.0" not in listen_ips and "::" not in listen_ips:
eps = {k: v for k, v in eps.items() if k.split("/")[0] in listen_ips} eps = {k: v for k, v in eps.items() if k.split("/")[0] in listen_ips}

View File

@@ -3,6 +3,7 @@ from __future__ import print_function, unicode_literals
import base64 import base64
import hashlib import hashlib
import logging
import os import os
import shutil import shutil
import subprocess as sp import subprocess as sp
@@ -61,12 +62,16 @@ try:
HAVE_AVIF = True HAVE_AVIF = True
except: except:
pass pass
logging.getLogger("PIL").setLevel(logging.WARNING)
except: except:
pass pass
try: try:
HAVE_VIPS = True HAVE_VIPS = True
import pyvips import pyvips
logging.getLogger("pyvips").setLevel(logging.WARNING)
except: except:
HAVE_VIPS = False HAVE_VIPS = False
@@ -242,40 +247,40 @@ class ThumbSrv(object):
abspath, tpath = task abspath, tpath = task
ext = abspath.split(".")[-1].lower() ext = abspath.split(".")[-1].lower()
png_ok = False png_ok = False
fun = None funs = []
if not bos.path.exists(tpath): if not bos.path.exists(tpath):
for lib in self.args.th_dec: for lib in self.args.th_dec:
if fun: if lib == "pil" and ext in self.fmt_pil:
break funs.append(self.conv_pil)
elif lib == "pil" and ext in self.fmt_pil:
fun = self.conv_pil
elif lib == "vips" and ext in self.fmt_vips: elif lib == "vips" and ext in self.fmt_vips:
fun = self.conv_vips funs.append(self.conv_vips)
elif lib == "ff" and ext in self.fmt_ffi or ext in self.fmt_ffv: elif lib == "ff" and ext in self.fmt_ffi or ext in self.fmt_ffv:
fun = self.conv_ffmpeg funs.append(self.conv_ffmpeg)
elif lib == "ff" and ext in self.fmt_ffa: elif lib == "ff" and ext in self.fmt_ffa:
if tpath.endswith(".opus") or tpath.endswith(".caf"): if tpath.endswith(".opus") or tpath.endswith(".caf"):
fun = self.conv_opus funs.append(self.conv_opus)
elif tpath.endswith(".png"): elif tpath.endswith(".png"):
fun = self.conv_waves funs.append(self.conv_waves)
png_ok = True png_ok = True
else: else:
fun = self.conv_spec funs.append(self.conv_spec)
if not png_ok and tpath.endswith(".png"): if not png_ok and tpath.endswith(".png"):
raise Pebkac(400, "png only allowed for waveforms") raise Pebkac(400, "png only allowed for waveforms")
if fun: for fun in funs:
try: try:
fun(abspath, tpath) fun(abspath, tpath)
break
except Exception as ex: except Exception as ex:
msg = "{} could not create thumbnail of {}\n{}" msg = "{} could not create thumbnail of {}\n{}"
msg = msg.format(fun.__name__, abspath, min_ex()) msg = msg.format(fun.__name__, abspath, min_ex())
c: Union[str, int] = 1 if "<Signals.SIG" in msg else "90" c: Union[str, int] = 1 if "<Signals.SIG" in msg else "90"
self.log(msg, c) self.log(msg, c)
if getattr(ex, "returncode", 0) != 321: if getattr(ex, "returncode", 0) != 321:
with open(tpath, "wb") as _: if fun == funs[-1]:
pass with open(tpath, "wb") as _:
pass
else: else:
# ffmpeg may spawn empty files on windows # ffmpeg may spawn empty files on windows
try: try:
@@ -363,7 +368,8 @@ class ThumbSrv(object):
img = pyvips.Image.thumbnail(abspath, w, **kw) img = pyvips.Image.thumbnail(abspath, w, **kw)
break break
except: except:
pass if c == crops[-1]:
raise
img.write_to_file(tpath, Q=40) img.write_to_file(tpath, Q=40)

View File

@@ -44,6 +44,7 @@ from .util import (
ren_open, ren_open,
rmdirs, rmdirs,
rmdirs_up, rmdirs_up,
runhook,
s2hms, s2hms,
s3dec, s3dec,
s3enc, s3enc,
@@ -441,6 +442,7 @@ class Up2k(object):
# only need to protect register_vpath but all in one go feels right # only need to protect register_vpath but all in one go feels right
for vol in vols: for vol in vols:
try: try:
bos.makedirs(vol.realpath) # gonna happen at snap anyways
bos.listdir(vol.realpath) bos.listdir(vol.realpath)
except: except:
self.volstate[vol.vpath] = "OFFLINE (cannot access folder)" self.volstate[vol.vpath] = "OFFLINE (cannot access folder)"
@@ -642,9 +644,15 @@ class Up2k(object):
ff = "\033[0;35m{}{:.0}" ff = "\033[0;35m{}{:.0}"
fv = "\033[0;36m{}:\033[90m{}" fv = "\033[0;36m{}:\033[90m{}"
fx = set(("html_head",)) fx = set(("html_head",))
fd = {"dbd": "dbd"}
fl = {
k: v
for k, v in flags.items()
if k not in fd or v != getattr(self.args, fd[k])
}
a = [ a = [
(ft if v is True else ff if v is False else fv).format(k, str(v)) (ft if v is True else ff if v is False else fv).format(k, str(v))
for k, v in flags.items() for k, v in fl.items()
if k not in fx if k not in fx
] ]
if a: if a:
@@ -842,6 +850,7 @@ class Up2k(object):
seen = seen + [rcdir] seen = seen + [rcdir]
unreg: list[str] = [] unreg: list[str] = []
files: list[tuple[int, int, str]] = [] files: list[tuple[int, int, str]] = []
fat32 = True
assert self.pp and self.mem_cur assert self.pp and self.mem_cur
self.pp.msg = "a{} {}".format(self.pp.n, cdir) self.pp.msg = "a{} {}".format(self.pp.n, cdir)
@@ -866,6 +875,9 @@ class Up2k(object):
lmod = int(inf.st_mtime) lmod = int(inf.st_mtime)
sz = inf.st_size sz = inf.st_size
if fat32 and inf.st_mtime % 2:
fat32 = False
if stat.S_ISDIR(inf.st_mode): if stat.S_ISDIR(inf.st_mode):
rap = absreal(abspath) rap = absreal(abspath)
if dev and inf.st_dev != dev: if dev and inf.st_dev != dev:
@@ -953,6 +965,9 @@ class Up2k(object):
self.log(t.format(top, rp, len(in_db), rep_db)) self.log(t.format(top, rp, len(in_db), rep_db))
dts = -1 dts = -1
if fat32 and abs(dts - lmod) == 1:
dts = lmod
if dts == lmod and dsz == sz and (nohash or dw[0] != "#" or not sz): if dts == lmod and dsz == sz and (nohash or dw[0] != "#" or not sz):
continue continue
@@ -1480,6 +1495,10 @@ class Up2k(object):
t0 = time.time() t0 = time.time()
for ptop, flags in self.flags.items(): for ptop, flags in self.flags.items():
if "mtp" in flags: if "mtp" in flags:
if ptop not in self.entags:
t = "skipping mtp for unavailable volume {}"
self.log(t.format(ptop), 1)
continue
self._run_one_mtp(ptop, gid) self._run_one_mtp(ptop, gid)
td = time.time() - t0 td = time.time() - t0
@@ -2046,6 +2065,8 @@ class Up2k(object):
"sprs": sprs, # dontcare; finished anyways "sprs": sprs, # dontcare; finished anyways
"size": dsize, "size": dsize,
"lmod": dtime, "lmod": dtime,
"host": cj["host"],
"user": cj["user"],
"addr": ip, "addr": ip,
"at": at, "at": at,
"hash": [], "hash": [],
@@ -2143,7 +2164,8 @@ class Up2k(object):
if not self.args.nw: if not self.args.nw:
bos.unlink(dst) # TODO ed pls bos.unlink(dst) # TODO ed pls
try: try:
self._symlink(src, dst, lmod=cj["lmod"]) dst_flags = self.flags[job["ptop"]]
self._symlink(src, dst, dst_flags, lmod=cj["lmod"])
except: except:
if not n4g: if not n4g:
raise raise
@@ -2174,6 +2196,8 @@ class Up2k(object):
} }
# client-provided, sanitized by _get_wark: name, size, lmod # client-provided, sanitized by _get_wark: name, size, lmod
for k in [ for k in [
"host",
"user",
"addr", "addr",
"vtop", "vtop",
"ptop", "ptop",
@@ -2239,7 +2263,12 @@ class Up2k(object):
return zfw["orz"][1] return zfw["orz"][1]
def _symlink( def _symlink(
self, src: str, dst: str, verbose: bool = True, lmod: float = 0 self,
src: str,
dst: str,
flags: dict[str, Any],
verbose: bool = True,
lmod: float = 0,
) -> None: ) -> None:
if verbose: if verbose:
self.log("linking dupe:\n {0}\n {1}".format(src, dst)) self.log("linking dupe:\n {0}\n {1}".format(src, dst))
@@ -2249,7 +2278,7 @@ class Up2k(object):
linked = False linked = False
try: try:
if self.args.no_dedup: if "copydupes" in flags:
raise Exception("disabled in config") raise Exception("disabled in config")
lsrc = src lsrc = src
@@ -2279,12 +2308,12 @@ class Up2k(object):
ldst = ldst.replace("/", "\\") ldst = ldst.replace("/", "\\")
try: try:
if self.args.hardlink: if "hardlink" in flags:
os.link(fsenc(src), fsenc(dst)) os.link(fsenc(src), fsenc(dst))
linked = True linked = True
except Exception as ex: except Exception as ex:
self.log("cannot hardlink: " + repr(ex)) self.log("cannot hardlink: " + repr(ex))
if self.args.never_symlink: if "neversymlink" in flags:
raise Exception("symlink-fallback disabled in cfg") raise Exception("symlink-fallback disabled in cfg")
if not linked: if not linked:
@@ -2403,6 +2432,26 @@ class Up2k(object):
# self.log("--- " + wark + " " + dst + " finish_upload atomic " + dst, 4) # self.log("--- " + wark + " " + dst + " finish_upload atomic " + dst, 4)
atomic_move(src, dst) atomic_move(src, dst)
upt = job.get("at") or time.time()
xau = self.flags[ptop].get("xau")
if xau and not runhook(
self.log,
xau,
dst,
djoin(job["vtop"], job["prel"], job["name"]),
job["host"],
job["user"],
job["addr"],
upt,
job["size"],
"",
):
t = "upload blocked by xau"
self.log(t, 1)
bos.unlink(dst)
self.registry[ptop].pop(wark, None)
raise Pebkac(403, t)
times = (int(time.time()), int(job["lmod"])) times = (int(time.time()), int(job["lmod"]))
if ANYWIN: if ANYWIN:
z1 = (dst, job["size"], times, job["sprs"]) z1 = (dst, job["size"], times, job["sprs"])
@@ -2414,7 +2463,6 @@ class Up2k(object):
pass pass
z2 = [job[x] for x in "ptop wark prel name lmod size addr".split()] z2 = [job[x] for x in "ptop wark prel name lmod size addr".split()]
upt = job.get("at") or time.time()
wake_sr = False wake_sr = False
try: try:
flt = job["life"] flt = job["life"]
@@ -2448,7 +2496,7 @@ class Up2k(object):
if os.path.exists(d2): if os.path.exists(d2):
continue continue
self._symlink(dst, d2, lmod=lmod) self._symlink(dst, d2, self.flags[ptop], lmod=lmod)
if cur: if cur:
self.db_rm(cur, rd, fn) self.db_rm(cur, rd, fn)
self.db_add(cur, wark, rd, fn, *z2[-4:]) self.db_add(cur, wark, rd, fn, *z2[-4:])
@@ -2610,6 +2658,8 @@ class Up2k(object):
self.log("rm: skip type-{:x} file [{}]".format(st.st_mode, atop)) self.log("rm: skip type-{:x} file [{}]".format(st.st_mode, atop))
return 0, [], [] return 0, [], []
xbd = vn.flags.get("xbd")
xad = vn.flags.get("xad")
n_files = 0 n_files = 0
for dbv, vrem, _, adir, files, rd, vd in g: for dbv, vrem, _, adir, files, rd, vd in g:
for fn in [x[0] for x in files]: for fn in [x[0] for x in files]:
@@ -2625,6 +2675,12 @@ class Up2k(object):
vpath = "{}/{}".format(dbv.vpath, volpath).strip("/") vpath = "{}/{}".format(dbv.vpath, volpath).strip("/")
self.log("rm {}\n {}".format(vpath, abspath)) self.log("rm {}\n {}".format(vpath, abspath))
_ = dbv.get(volpath, uname, *permsets[0]) _ = dbv.get(volpath, uname, *permsets[0])
if xbd and not runhook(
self.log, xbd, abspath, vpath, "", uname, "", 0, 0, ""
):
self.log("delete blocked by xbd: {}".format(abspath), 1)
continue
with self.mutex: with self.mutex:
cur = None cur = None
try: try:
@@ -2636,6 +2692,8 @@ class Up2k(object):
cur.connection.commit() cur.connection.commit()
bos.unlink(abspath) bos.unlink(abspath)
if xad:
runhook(self.log, xad, abspath, vpath, "", uname, "", 0, 0, "")
ok: list[str] = [] ok: list[str] = []
ng: list[str] = [] ng: list[str] = []
@@ -2728,6 +2786,13 @@ class Up2k(object):
if bos.path.exists(dabs): if bos.path.exists(dabs):
raise Pebkac(400, "mv2: target file exists") raise Pebkac(400, "mv2: target file exists")
xbr = svn.flags.get("xbr")
xar = dvn.flags.get("xar")
if xbr and not runhook(self.log, xbr, sabs, svp, "", uname, "", 0, 0, ""):
t = "move blocked by xbr: {}".format(svp)
self.log(t, 1)
raise Pebkac(405, t)
bos.makedirs(os.path.dirname(dabs)) bos.makedirs(os.path.dirname(dabs))
if bos.path.islink(sabs): if bos.path.islink(sabs):
@@ -2736,7 +2801,7 @@ class Up2k(object):
self.log(t.format(sabs, dabs, dlabs)) self.log(t.format(sabs, dabs, dlabs))
mt = bos.path.getmtime(sabs, False) mt = bos.path.getmtime(sabs, False)
bos.unlink(sabs) bos.unlink(sabs)
self._symlink(dlabs, dabs, False, lmod=mt) self._symlink(dlabs, dabs, dvn.flags, False, lmod=mt)
# folders are too scary, schedule rescan of both vols # folders are too scary, schedule rescan of both vols
self.need_rescan.add(svn.vpath) self.need_rescan.add(svn.vpath)
@@ -2744,6 +2809,9 @@ class Up2k(object):
with self.rescan_cond: with self.rescan_cond:
self.rescan_cond.notify_all() self.rescan_cond.notify_all()
if xar:
runhook(self.log, xar, dabs, dvp, "", uname, "", 0, 0, "")
return "k" return "k"
c1, w, ftime_, fsize_, ip, at = self._find_from_vpath(svn.realpath, srem) c1, w, ftime_, fsize_, ip, at = self._find_from_vpath(svn.realpath, srem)
@@ -2757,21 +2825,6 @@ class Up2k(object):
ftime = ftime_ ftime = ftime_
fsize = fsize_ or 0 fsize = fsize_ or 0
if w:
assert c1
if c2 and c2 != c1:
self._copy_tags(c1, c2, w)
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
self._relink(w, svn.realpath, srem, dabs)
curs.add(c1)
if c2:
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
curs.add(c2)
else:
self.log("not found in src db: [{}]".format(svp))
try: try:
atomic_move(sabs, dabs) atomic_move(sabs, dabs)
except OSError as ex: except OSError as ex:
@@ -2788,6 +2841,24 @@ class Up2k(object):
os.unlink(b1) os.unlink(b1)
if w:
assert c1
if c2 and c2 != c1:
self._copy_tags(c1, c2, w)
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
self._relink(w, svn.realpath, srem, dabs)
curs.add(c1)
if c2:
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
curs.add(c2)
else:
self.log("not found in src db: [{}]".format(svp))
if xar:
runhook(self.log, xar, dabs, dvp, "", uname, "", 0, 0, "")
return "k" return "k"
def _copy_tags( def _copy_tags(
@@ -2912,14 +2983,14 @@ class Up2k(object):
bos.unlink(slabs) bos.unlink(slabs)
bos.rename(sabs, slabs) bos.rename(sabs, slabs)
bos.utime(slabs, (int(time.time()), int(mt)), False) bos.utime(slabs, (int(time.time()), int(mt)), False)
self._symlink(slabs, sabs, False) self._symlink(slabs, sabs, self.flags.get(ptop) or {}, False)
full[slabs] = (ptop, rem) full[slabs] = (ptop, rem)
sabs = slabs sabs = slabs
if not dabs: if not dabs:
dabs = list(sorted(full.keys()))[0] dabs = list(sorted(full.keys()))[0]
for alink in links: for alink, parts in links.items():
lmod = None lmod = None
try: try:
if alink != sabs and absreal(alink) != sabs: if alink != sabs and absreal(alink) != sabs:
@@ -2931,7 +3002,8 @@ class Up2k(object):
except: except:
pass pass
self._symlink(dabs, alink, False, lmod=lmod or 0) flags = self.flags.get(parts[0]) or {}
self._symlink(dabs, alink, flags, False, lmod=lmod or 0)
return len(full) + len(links) return len(full) + len(links)
@@ -3007,6 +3079,25 @@ class Up2k(object):
# if len(job["name"].split(".")) > 8: # if len(job["name"].split(".")) > 8:
# raise Exception("aaa") # raise Exception("aaa")
xbu = self.flags[job["ptop"]].get("xbu")
ap_chk = djoin(pdir, job["name"])
vp_chk = djoin(job["vtop"], job["prel"], job["name"])
if xbu and not runhook(
self.log,
xbu,
ap_chk,
vp_chk,
job["host"],
job["user"],
job["addr"],
job["t0"],
job["size"],
"",
):
t = "upload blocked by xbu: {}".format(vp_chk)
self.log(t, 1)
raise Pebkac(403, t)
tnam = job["name"] + ".PARTIAL" tnam = job["name"] + ".PARTIAL"
if self.args.dotpart: if self.args.dotpart:
tnam = "." + tnam tnam = "." + tnam
@@ -3221,7 +3312,7 @@ class Up2k(object):
continue continue
# TODO is undef if vol 404 on startup # TODO is undef if vol 404 on startup
entags = self.entags[ptop] entags = self.entags.get(ptop)
if not entags: if not entags:
self.log("no entags okay.jpg", c=3) self.log("no entags okay.jpg", c=3)
continue continue

View File

@@ -6,6 +6,7 @@ import contextlib
import errno import errno
import hashlib import hashlib
import hmac import hmac
import json
import logging import logging
import math import math
import mimetypes import mimetypes
@@ -228,6 +229,7 @@ application msi=x-ms-installer cab=vnd.ms-cab-compressed rpm=x-rpm crx=x-chrome-
application epub=epub+zip mobi=x-mobipocket-ebook lit=x-ms-reader rss=rss+xml atom=atom+xml torrent=x-bittorrent application epub=epub+zip mobi=x-mobipocket-ebook lit=x-ms-reader rss=rss+xml atom=atom+xml torrent=x-bittorrent
application p7s=pkcs7-signature dcm=dicom shx=vnd.shx shp=vnd.shp dbf=x-dbf gml=gml+xml gpx=gpx+xml amf=x-amf application p7s=pkcs7-signature dcm=dicom shx=vnd.shx shp=vnd.shp dbf=x-dbf gml=gml+xml gpx=gpx+xml amf=x-amf
application swf=x-shockwave-flash m3u=vnd.apple.mpegurl db3=vnd.sqlite3 sqlite=vnd.sqlite3 application swf=x-shockwave-flash m3u=vnd.apple.mpegurl db3=vnd.sqlite3 sqlite=vnd.sqlite3
text ass=plain ssa=plain
image jpg=jpeg xpm=x-xpixmap psd=vnd.adobe.photoshop jpf=jpx tif=tiff ico=x-icon djvu=vnd.djvu image jpg=jpeg xpm=x-xpixmap psd=vnd.adobe.photoshop jpf=jpx tif=tiff ico=x-icon djvu=vnd.djvu
image heic=heic-sequence heif=heif-sequence hdr=vnd.radiance svg=svg+xml image heic=heic-sequence heif=heif-sequence hdr=vnd.radiance svg=svg+xml
audio caf=x-caf mp3=mpeg m4a=mp4 mid=midi mpc=musepack aif=aiff au=basic qcp=qcelp audio caf=x-caf mp3=mpeg m4a=mp4 mid=midi mpc=musepack aif=aiff au=basic qcp=qcelp
@@ -361,8 +363,11 @@ class Daemon(threading.Thread):
name: Optional[str] = None, name: Optional[str] = None,
a: Optional[Iterable[Any]] = None, a: Optional[Iterable[Any]] = None,
r: bool = True, r: bool = True,
ka: Optional[dict[Any, Any]] = None,
) -> None: ) -> None:
threading.Thread.__init__(self, target=target, name=name, args=a or ()) threading.Thread.__init__(
self, target=target, name=name, args=a or (), kwargs=ka
)
self.daemon = True self.daemon = True
if r: if r:
self.start() self.start()
@@ -378,6 +383,9 @@ class Netdev(object):
def __str__(self): def __str__(self):
return "{}-{}{}".format(self.idx, self.name, self.desc) return "{}-{}{}".format(self.idx, self.name, self.desc)
def __repr__(self):
return "'{}-{}'".format(self.idx, self.name)
def __lt__(self, rhs): def __lt__(self, rhs):
return str(self) < str(rhs) return str(self) < str(rhs)
@@ -437,9 +445,7 @@ class HLog(logging.Handler):
else: else:
c = 1 c = 1
if record.name.startswith("PIL") and lv < logging.WARNING: if record.name == "pyftpdlib":
return
elif record.name == "pyftpdlib":
m = self.ptn_ftp.match(msg) m = self.ptn_ftp.match(msg)
if m: if m:
ip = m.group(1) ip = m.group(1)
@@ -469,7 +475,7 @@ class NetMap(object):
) )
ips = [x for x in ips if x not in ("::1", "127.0.0.1")] ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips] ips = find_prefix(ips, netdevs)
self.cache: dict[str, str] = {} self.cache: dict[str, str] = {}
self.b2sip: dict[bytes, str] = {} self.b2sip: dict[bytes, str] = {}
@@ -1187,7 +1193,7 @@ def ren_open(
else: else:
fpath = fname fpath = fname
if suffix and os.path.exists(fsenc(fpath)): if suffix and os.path.lexists(fsenc(fpath)):
fpath += suffix fpath += suffix
fname += suffix fname += suffix
ext += suffix ext += suffix
@@ -1549,14 +1555,16 @@ def gen_filekey_dbg(
return ret return ret
def gencookie(k: str, v: str, dur: Optional[int]) -> str: def gencookie(k: str, v: str, r: str, tls: bool, dur: Optional[int]) -> str:
v = v.replace(";", "") v = v.replace(";", "")
if dur: if dur:
exp = formatdate(time.time() + dur, usegmt=True) exp = formatdate(time.time() + dur, usegmt=True)
else: else:
exp = "Fri, 15 Aug 1997 01:00:00 GMT" exp = "Fri, 15 Aug 1997 01:00:00 GMT"
return "{}={}; Path=/; Expires={}; SameSite=Lax".format(k, v, exp) return "{}={}; Path=/{}; Expires={}; HttpOnly{}; SameSite=Lax".format(
k, v, r, exp, "; Secure" if tls else ""
)
def humansize(sz: float, terse: bool = False) -> str: def humansize(sz: float, terse: bool = False) -> str:
@@ -1712,6 +1720,15 @@ def ipnorm(ip: str) -> str:
return ip return ip
def find_prefix(ips: list[str], netdevs: dict[str, Netdev]) -> list[str]:
ret = []
for ip in ips:
hit = next((x for x in netdevs if x.startswith(ip + "/")), None)
if hit:
ret.append(hit)
return ret
def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str: def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str:
"""html.escape but also newlines""" """html.escape but also newlines"""
s = s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") s = s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
@@ -2008,6 +2025,20 @@ def read_socket_chunked(
raise Pebkac(400, t.format(x)) raise Pebkac(400, t.format(x))
def list_ips() -> list[str]:
from .stolen.ifaddr import get_adapters
ret: set[str] = set()
for nic in get_adapters():
for ipo in nic.ips:
if len(ipo.ip) < 7:
ret.add(ipo.ip[0]) # ipv6 is (ip,0,0)
else:
ret.add(ipo.ip)
return list(ret)
def yieldfile(fn: str) -> Generator[bytes, None, None]: def yieldfile(fn: str) -> Generator[bytes, None, None]:
with open(fsenc(fn), "rb", 512 * 1024) as f: with open(fsenc(fn), "rb", 512 * 1024) as f:
while True: while True:
@@ -2428,6 +2459,124 @@ def retchk(
raise Exception(t) raise Exception(t)
def _runhook(
log: "NamedLogger",
cmd: str,
ap: str,
vp: str,
host: str,
uname: str,
ip: str,
at: float,
sz: int,
txt: str,
) -> bool:
chk = False
fork = False
jtxt = False
wait = 0
tout = 0
kill = "t"
cap = 0
ocmd = cmd
while "," in cmd[:6]:
arg, cmd = cmd.split(",", 1)
if arg == "c":
chk = True
elif arg == "f":
fork = True
elif arg == "j":
jtxt = True
elif arg.startswith("w"):
wait = float(arg[1:])
elif arg.startswith("t"):
tout = float(arg[1:])
elif arg.startswith("c"):
cap = int(arg[1:]) # 0=none 1=stdout 2=stderr 3=both
elif arg.startswith("k"):
kill = arg[1:] # [t]ree [m]ain [n]one
else:
t = "hook: invalid flag {} in {}"
log(t.format(arg, ocmd))
env = os.environ.copy()
# try:
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
zsl = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(zsl))
env["PYTHONPATH"] = pypath
# except: if not E.ox: raise
ka = {
"env": env,
"timeout": tout,
"kill": kill,
"capture": cap,
}
if jtxt:
ja = {
"ap": ap,
"vp": vp,
"ip": ip,
"host": host,
"user": uname,
"at": at or time.time(),
"sz": sz,
"txt": txt,
}
arg = json.dumps(ja)
else:
arg = txt or ap
acmd = [cmd, arg]
if cmd.endswith(".py"):
acmd = [sys.executable] + acmd
bcmd = [fsenc(x) for x in acmd]
t0 = time.time()
if fork:
Daemon(runcmd, ocmd, [acmd], ka=ka)
else:
rc, v, err = runcmd(bcmd, **ka) # type: ignore
if chk and rc:
retchk(rc, bcmd, err, log, 5)
return False
wait -= time.time() - t0
if wait > 0:
time.sleep(wait)
return True
def runhook(
log: "NamedLogger",
cmds: list[str],
ap: str,
vp: str,
host: str,
uname: str,
ip: str,
at: float,
sz: int,
txt: str,
) -> bool:
vp = vp.replace("\\", "/")
for cmd in cmds:
try:
if not _runhook(log, cmd, ap, vp, host, uname, ip, at, sz, txt):
return False
except Exception as ex:
log("hook: {}".format(ex))
if ",c," in "," + cmd:
return False
break
return True
def gzip_orig_sz(fn: str) -> int: def gzip_orig_sz(fn: str) -> int:
with open(fsenc(fn), "rb") as f: with open(fsenc(fn), "rb") as f:
f.seek(-4, 2) f.seek(-4, 2)

View File

@@ -27,7 +27,7 @@ window.baguetteBox = (function () {
isOverlayVisible = false, isOverlayVisible = false,
touch = {}, // start-pos touch = {}, // start-pos
touchFlag = false, // busy touchFlag = false, // busy
re_i = /.+\.(gif|jpe?g|png|webp)(\?|$)/i, re_i = /.+\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp)(\?|$)/i,
re_v = /.+\.(webm|mkv|mp4)(\?|$)/i, re_v = /.+\.(webm|mkv|mp4)(\?|$)/i,
anims = ['slideIn', 'fadeIn', 'none'], anims = ['slideIn', 'fadeIn', 'none'],
data = {}, // all galleries data = {}, // all galleries
@@ -277,8 +277,8 @@ window.baguetteBox = (function () {
playpause(); playpause();
else if (k == "KeyU" || k == "KeyO") else if (k == "KeyU" || k == "KeyO")
relseek(k == "KeyU" ? -10 : 10); relseek(k == "KeyU" ? -10 : 10);
else if (k.indexOf('Digit') === 0) else if (k.indexOf('Digit') === 0 && v)
vid().currentTime = vid().duration * parseInt(k.slice(-1)) * 0.1; v.currentTime = v.duration * parseInt(k.slice(-1)) * 0.1;
else if (k == "KeyM" && v) { else if (k == "KeyM" && v) {
v.muted = vmute = !vmute; v.muted = vmute = !vmute;
mp_ctl(); mp_ctl();

View File

@@ -572,6 +572,11 @@ html.dy {
* { * {
line-height: 1.2em; line-height: 1.2em;
} }
::selection {
color: var(--bg-d1);
background: var(--fg);
text-shadow: none;
}
html,body,tr,th,td,#files,a { html,body,tr,th,td,#files,a {
color: inherit; color: inherit;
background: none; background: none;
@@ -754,8 +759,9 @@ html.y #files thead th {
display: inline; display: inline;
} }
#path a { #path a {
margin: 0 0 0 -.2em; padding: 0 .35em;
padding: 0 0 0 .4em; position: relative;
z-index: 1;
/* ie: */ /* ie: */
border-bottom: .1em solid #777\9; border-bottom: .1em solid #777\9;
margin-right: 1em\9; margin-right: 1em\9;
@@ -763,18 +769,17 @@ html.y #files thead th {
#path a:first-child { #path a:first-child {
padding-left: .8em; padding-left: .8em;
} }
#path a:not(:last-child):after { #path i {
content: '';
width: 1.05em; width: 1.05em;
height: 1.05em; height: 1.05em;
margin: -.2em .3em -.2em -.4em; margin: -.5em .15em -.15em -.7em;
display: inline-block; display: inline-block;
border: 1px solid rgba(255,224,192,0.3); border: 1px solid rgba(255,224,192,0.3);
border-width: .05em .05em 0 0; border-width: .05em .05em 0 0;
transform: rotate(45deg); transform: rotate(45deg);
background: linear-gradient(45deg, rgba(0,0,0,0) 40%, rgba(0,0,0,0.25) 75%, rgba(0,0,0,0.35)); background: linear-gradient(45deg, rgba(0,0,0,0) 40%, rgba(0,0,0,0.25) 75%, rgba(0,0,0,0.35));
} }
html.y #path a:not(:last-child)::after { html.y #path i {
background: none; background: none;
border-color: rgba(0,0,0,0.2); border-color: rgba(0,0,0,0.2);
border-width: .1em .1em 0 0; border-width: .1em .1em 0 0;
@@ -793,6 +798,17 @@ html.y #path a:hover {
.logue:empty { .logue:empty {
display: none; display: none;
} }
.logue>iframe {
background: var(--bgg);
border-radius: .3em;
visibility: hidden;
border: none;
width: 100%;
height: 0;
}
.logue>iframe.focus {
box-shadow: 0 0 .1em .1em var(--a);
}
#pro.logue { #pro.logue {
margin-bottom: .8em; margin-bottom: .8em;
} }
@@ -817,6 +833,9 @@ html.y #path a:hover {
.mdo { .mdo {
max-width: 52em; max-width: 52em;
} }
.mdo.sb {
max-width: unset;
}
.mdo, .mdo,
.mdo * { .mdo * {
line-height: 1.4em; line-height: 1.4em;
@@ -2557,7 +2576,6 @@ html.b #u2conf a.b:hover {
#u2conf input[type="checkbox"]:checked+label:hover { #u2conf input[type="checkbox"]:checked+label:hover {
background: var(--u2-o-1h-bg); background: var(--u2-o-1h-bg);
} }
#op_up2k.srch #u2conf td:nth-child(1)>*,
#op_up2k.srch #u2conf td:nth-child(2)>*, #op_up2k.srch #u2conf td:nth-child(2)>*,
#op_up2k.srch #u2conf td:nth-child(3)>* { #op_up2k.srch #u2conf td:nth-child(3)>* {
background: #777; background: #777;

View File

@@ -85,7 +85,7 @@
<div id="bdoc"></div> <div id="bdoc"></div>
{%- endif %} {%- endif %}
<div id="pro" class="logue">{{ logues[0] }}</div> <div id="pro" class="logue">{{ "" if sb_lg else logues[0] }}</div>
<table id="files"> <table id="files">
<thead> <thead>
@@ -119,7 +119,7 @@
</tbody> </tbody>
</table> </table>
<div id="epi" class="logue">{{ logues[1] }}</div> <div id="epi" class="logue">{{ "" if sb_lg else logues[1] }}</div>
<h2><a href="{{ r }}/?h" id="goh">control-panel</a></h2> <h2><a href="{{ r }}/?h" id="goh">control-panel</a></h2>
@@ -150,12 +150,14 @@
have_del = {{ have_del|tojson }}, have_del = {{ have_del|tojson }},
have_unpost = {{ have_unpost }}, have_unpost = {{ have_unpost }},
have_zip = {{ have_zip|tojson }}, have_zip = {{ have_zip|tojson }},
sb_md = "{{ sb_md }}",
sb_lg = "{{ sb_lg }}",
lifetime = {{ lifetime }}, lifetime = {{ lifetime }},
turbolvl = {{ turbolvl }}, turbolvl = {{ turbolvl }},
u2sort = "{{ u2sort }}", u2sort = "{{ u2sort }}",
have_emp = {{ have_emp|tojson }}, have_emp = {{ have_emp|tojson }},
txt_ext = "{{ txt_ext }}", txt_ext = "{{ txt_ext }}",
{% if no_prism %}no_prism = 1,{% endif %} logues = {{ logues|tojson if sb_lg else "[]" }},
readme = {{ readme|tojson }}, readme = {{ readme|tojson }},
ls0 = {{ ls0|tojson }}; ls0 = {{ ls0|tojson }};

View File

@@ -260,6 +260,8 @@ var Ls = {
"fbd_more": '<div id="blazy">showing <code>{0}</code> of <code>{1}</code> files; <a href="#" id="bd_more">show {2}</a> or <a href="#" id="bd_all">show all</a></div>', "fbd_more": '<div id="blazy">showing <code>{0}</code> of <code>{1}</code> files; <a href="#" id="bd_more">show {2}</a> or <a href="#" id="bd_all">show all</a></div>',
"fbd_all": '<div id="blazy">showing <code>{0}</code> of <code>{1}</code> files; <a href="#" id="bd_all">show all</a></div>', "fbd_all": '<div id="blazy">showing <code>{0}</code> of <code>{1}</code> files; <a href="#" id="bd_all">show all</a></div>',
"f_dls": 'the file links in the current folder have\nbeen changed into download links',
"ft_paste": "paste {0} items$NHotkey: ctrl-V", "ft_paste": "paste {0} items$NHotkey: ctrl-V",
"fr_eperm": 'cannot rename:\nyou do not have “move” permission in this folder', "fr_eperm": 'cannot rename:\nyou do not have “move” permission in this folder',
"fd_eperm": 'cannot delete:\nyou do not have “delete” permission in this folder', "fd_eperm": 'cannot delete:\nyou do not have “delete” permission in this folder',
@@ -703,6 +705,8 @@ var Ls = {
"fbd_more": '<div id="blazy">viser <code>{0}</code> av <code>{1}</code> filer; <a href="#" id="bd_more">vis {2}</a> eller <a href="#" id="bd_all">vis alle</a></div>', "fbd_more": '<div id="blazy">viser <code>{0}</code> av <code>{1}</code> filer; <a href="#" id="bd_more">vis {2}</a> eller <a href="#" id="bd_all">vis alle</a></div>',
"fbd_all": '<div id="blazy">viser <code>{0}</code> av <code>{1}</code> filer; <a href="#" id="bd_all">vis alle</a></div>', "fbd_all": '<div id="blazy">viser <code>{0}</code> av <code>{1}</code> filer; <a href="#" id="bd_all">vis alle</a></div>',
"f_dls": 'linkene i denne mappen er nå\nomgjort til nedlastningsknapper',
"ft_paste": "Lim inn {0} filer$NSnarvei: ctrl-V", "ft_paste": "Lim inn {0} filer$NSnarvei: ctrl-V",
"fr_eperm": 'kan ikke endre navn:\ndu har ikke “move”-rettigheten i denne mappen', "fr_eperm": 'kan ikke endre navn:\ndu har ikke “move”-rettigheten i denne mappen',
"fd_eperm": 'kan ikke slette:\ndu har ikke “delete”-rettigheten i denne mappen', "fd_eperm": 'kan ikke slette:\ndu har ikke “delete”-rettigheten i denne mappen',
@@ -847,7 +851,7 @@ var Ls = {
"u_hashdone": 'befaring ferdig', "u_hashdone": 'befaring ferdig',
"u_hashing": 'les', "u_hashing": 'les',
"u_fixed": "OK!&nbsp; Løste seg 👍", "u_fixed": "OK!&nbsp; Løste seg 👍",
"u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert harmløst, fortsetter\n\nfil: {2}", "u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert greit, fortsetter\n\nfil: {2}",
"u_cuerr2": "server nektet opplastningen (del {0} av {1});\nprøver igjen senere\n\nfil: {2}\n\nerror ", "u_cuerr2": "server nektet opplastningen (del {0} av {1});\nprøver igjen senere\n\nfil: {2}\n\nerror ",
"u_ehstmp": "prøver igjen; se mld nederst", "u_ehstmp": "prøver igjen; se mld nederst",
"u_ehsfin": "server nektet forespørselen om å ferdigstille filen; prøver igjen...", "u_ehsfin": "server nektet forespørselen om å ferdigstille filen; prøver igjen...",
@@ -949,7 +953,7 @@ ebi('op_up2k').innerHTML = (
'<table id="u2conf">\n' + '<table id="u2conf">\n' +
' <tr>\n' + ' <tr>\n' +
' <td class="c"><br />' + L.ul_par + '</td>\n' + ' <td class="c" data-perm="read"><br />' + L.ul_par + '</td>\n' +
' <td class="c" rowspan="2">\n' + ' <td class="c" rowspan="2">\n' +
' <input type="checkbox" id="multitask" />\n' + ' <input type="checkbox" id="multitask" />\n' +
' <label for="multitask" tt="' + L.ut_mt + '">🏃</label>\n' + ' <label for="multitask" tt="' + L.ut_mt + '">🏃</label>\n' +
@@ -970,7 +974,7 @@ ebi('op_up2k').innerHTML = (
' <td data-perm="read" rowspan="2" id="u2c3w"></td>\n' + ' <td data-perm="read" rowspan="2" id="u2c3w"></td>\n' +
' </tr>\n' + ' </tr>\n' +
' <tr>\n' + ' <tr>\n' +
' <td class="c">\n' + ' <td class="c" data-perm="read">\n' +
' <a href="#" class="b" id="nthread_sub">&ndash;</a><input\n' + ' <a href="#" class="b" id="nthread_sub">&ndash;</a><input\n' +
' class="txtbox" id="nthread" value="2" tt="' + L.ut_par + '"/><a\n' + ' class="txtbox" id="nthread" value="2" tt="' + L.ut_par + '"/><a\n' +
' href="#" class="b" id="nthread_add">+</a><br />&nbsp;\n' + ' href="#" class="b" id="nthread_add">+</a><br />&nbsp;\n' +
@@ -2084,8 +2088,13 @@ function prev_song(e) {
return song_skip(-1); return song_skip(-1);
} }
function dl_song() { function dl_song() {
if (!mp || !mp.au) if (!mp || !mp.au) {
return; var o = QSA('#files a[id]');
for (var a = 0; a < o.length; a++)
o[a].setAttribute('download', '');
return toast.inf(10, L.f_dls);
}
var url = mp.tracks[mp.au.tid]; var url = mp.tracks[mp.au.tid];
url += (url.indexOf('?') < 0 ? '?' : '&') + 'cache=987'; url += (url.indexOf('?') < 0 ? '?' : '&') + 'cache=987';
@@ -3383,7 +3392,7 @@ var fileman = (function () {
} }
var xhr = new XHR(); var xhr = new XHR();
xhr.open('GET', f[0].src + '?move=' + dst, true); xhr.open('POST', f[0].src + '?move=' + dst, true);
xhr.onload = xhr.onerror = rename_cb; xhr.onload = xhr.onerror = rename_cb;
xhr.send(); xhr.send();
} }
@@ -3414,7 +3423,7 @@ var fileman = (function () {
} }
toast.show('inf r', 0, esc(L.fd_busy.format(vps.length + 1, vp)), 'r'); toast.show('inf r', 0, esc(L.fd_busy.format(vps.length + 1, vp)), 'r');
xhr.open('GET', vp + '?delete', true); xhr.open('POST', vp + '?delete', true);
xhr.onload = xhr.onerror = delete_cb; xhr.onload = xhr.onerror = delete_cb;
xhr.send(); xhr.send();
} }
@@ -3522,7 +3531,7 @@ var fileman = (function () {
var dst = get_evpath() + vp.split('/').pop(); var dst = get_evpath() + vp.split('/').pop();
xhr.open('GET', vp + '?move=' + dst, true); xhr.open('POST', vp + '?move=' + dst, true);
xhr.onload = xhr.onerror = paste_cb; xhr.onload = xhr.onerror = paste_cb;
xhr.send(); xhr.send();
} }
@@ -4059,7 +4068,7 @@ var thegrid = (function () {
var oth = ebi(this.getAttribute('ref')), var oth = ebi(this.getAttribute('ref')),
href = noq_href(this), href = noq_href(this),
aplay = ebi('a' + oth.getAttribute('id')), aplay = ebi('a' + oth.getAttribute('id')),
is_img = /\.(gif|jpe?g|png|webp|webm|mkv|mp4)(\?|$)/i.test(href), is_img = /\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp|webm|mkv|mp4)(\?|$)/i.test(href),
is_dir = href.endsWith('/'), is_dir = href.endsWith('/'),
in_tree = is_dir && treectl.find(oth.textContent.slice(0, -1)), in_tree = is_dir && treectl.find(oth.textContent.slice(0, -1)),
have_sel = QS('#files tr.sel'), have_sel = QS('#files tr.sel'),
@@ -5178,8 +5187,8 @@ var treectl = (function () {
function rendertree(res, ts, top0, dst, rst) { function rendertree(res, ts, top0, dst, rst) {
var cur = ebi('treeul').getAttribute('ts'); var cur = ebi('treeul').getAttribute('ts');
if (cur && parseInt(cur) > ts) { if (cur && parseInt(cur) > ts + 20 && QS('#treeul>li>a+a')) {
console.log("reject tree"); console.log("reject tree; " + cur + " / " + (ts - cur));
return; return;
} }
ebi('treeul').setAttribute('ts', ts); ebi('treeul').setAttribute('ts', ts);
@@ -5403,7 +5412,7 @@ var treectl = (function () {
for (var a = 0; a < res.dirs.length; a++) for (var a = 0; a < res.dirs.length; a++)
dirs.push(res.dirs[a].href.split('/')[0].split('?')[0]); dirs.push(res.dirs[a].href.split('/')[0].split('?')[0]);
rendertree({ "a": dirs }, Date.now(), ".", get_evpath()); rendertree({ "a": dirs }, this.ts, ".", get_evpath());
} }
r.gentab(this.top, res); r.gentab(this.top, res);
@@ -5411,8 +5420,8 @@ var treectl = (function () {
despin('#files'); despin('#files');
despin('#gfiles'); despin('#gfiles');
ebi('pro').innerHTML = res.logues ? res.logues[0] || "" : ""; sandbox(ebi('pro'), sb_lg, '', res.logues ? res.logues[0] || "" : "");
ebi('epi').innerHTML = res.logues ? res.logues[1] || "" : ""; sandbox(ebi('epi'), sb_lg, '', res.logues ? res.logues[1] || "" : "");
clmod(ebi('epi'), 'mdo'); clmod(ebi('epi'), 'mdo');
if (res.readme) if (res.readme)
@@ -6546,6 +6555,37 @@ var msel = (function () {
})(); })();
var globalcss = (function () {
var ret = '';
return function () {
if (ret)
return ret;
var dcs = document.styleSheets;
for (var a = 0; a < dcs.length; a++) {
var base = dcs[a].href,
ds = dcs[a].cssRules;
if (!base)
continue;
base = base.replace(/[^/]+$/, '');
for (var b = 0; b < ds.length; b++) {
var css = ds[b].cssText.split(/\burl\(/g);
ret += css[0];
for (var c = 1; c < css.length; c++) {
var delim = (/^["']/.exec(css[c])) ? css[c].slice(0, 1) : '';
ret += 'url(' + delim + ((css[c].slice(0, 8).indexOf('://') + 1 || css[c].startsWith('/')) ? '' : base) +
css[c].slice(delim ? 1 : 0);
}
ret += '\n';
}
}
return ret;
};
})();
function show_md(md, name, div, url, depth) { function show_md(md, name, div, url, depth) {
var errmsg = L.md_eshow + name + ':\n\n', var errmsg = L.md_eshow + name + ':\n\n',
now = get_evpath(); now = get_evpath();
@@ -6565,7 +6605,7 @@ function show_md(md, name, div, url, depth) {
md_plug = {} md_plug = {}
md = load_md_plug(md, 'pre'); md = load_md_plug(md, 'pre');
md = load_md_plug(md, 'post'); md = load_md_plug(md, 'post', sb_md);
var marked_opts = { var marked_opts = {
headerPrefix: 'md-', headerPrefix: 'md-',
@@ -6578,7 +6618,8 @@ function show_md(md, name, div, url, depth) {
try { try {
clmod(div, 'mdo', 1); clmod(div, 'mdo', 1);
div.innerHTML = marked.parse(md, marked_opts); if (sandbox(div, sb_md, 'mdo', marked.parse(md, marked_opts)))
return;
ext = md_plug.post; ext = md_plug.post;
ext = ext ? [ext[0].render, ext[0].render2] : []; ext = ext ? [ext[0].render, ext[0].render2] : [];
@@ -6632,6 +6673,86 @@ if (readme)
show_readme(readme); show_readme(readme);
function sandbox(tgt, rules, cls, html) {
if (!rules || (html || '').indexOf('<') == -1) {
tgt.innerHTML = html;
clmod(tgt, 'sb');
return false;
}
clmod(tgt, 'sb', 1);
var tid = tgt.getAttribute('id'),
hash = location.hash,
want = '';
if (hash.startsWith('#md-'))
want = hash.slice(1);
var env = '', tags = QSA('script');
for (var a = 0; a < tags.length; a++) {
var js = tags[a].innerHTML;
if (js && js.indexOf('have_up2k_idx') + 1)
env = js.split(/\blogues *=/)[0] + 'a;';
}
html = '<html class="iframe ' + document.documentElement.className + '"><head><style>' + globalcss() +
'</style><base target="_parent"></head><body id="b" class="logue ' + cls + '">' + html +
'<script>' + env + '</script>' +
'<script src="' + SR + '/.cpr/util.js?_={{ ts }}"></script>' +
'<script>var ebi=document.getElementById.bind(document),d=document.documentElement,' +
'loc=new URL("' + location.href.split('?')[0] + '");' +
'function say(m){window.parent.postMessage(m,"*")};' +
'setTimeout(function(){var its=0,pih=-1,f=function(){' +
'var ih=2+Math.min(parseInt(getComputedStyle(d).height),d.scrollHeight);' +
'if(ih!=pih){pih=ih;say("iheight #' + tid + ' "+ih,"*")}' +
'if(++its<20)return setTimeout(f,20);if(its==20)setInterval(f,200)' +
'};f();' +
'window.onfocus=function(){say("igot #' + tid + '")};' +
'window.onblur=function(){say("ilost #' + tid + '")};' +
'var el="' + want + '"&&ebi("' + want + '");' +
'if(el)say("iscroll #' + tid + ' "+el.offsetTop);' +
(cls == 'mdo' && md_plug.post ?
'const x={' + md_plug.post + '};' +
'if(x.render)x.render(ebi("b"));' +
'if(x.render2)x.render2(ebi("b"));' : '') +
'},1)</script></body></html>';
var fr = mknod('iframe');
fr.setAttribute('sandbox', rules ? 'allow-' + rules.replace(/ /g, ' allow-') : '');
fr.setAttribute('srcdoc', html);
tgt.innerHTML = '';
tgt.appendChild(fr);
return true;
}
window.addEventListener("message", function (e) {
try {
console.log('msg:' + e.data);
var t = e.data.split(/ /g);
if (t[0] == 'iheight') {
var el = QS(t[1] + '>iframe');
el.style.height = t[2] + 'px';
el.style.visibility = 'unset';
}
else if (t[0] == 'iscroll') {
var y1 = QS(t[1]).offsetTop,
y2 = parseInt(t[2]);
console.log(y1, y2);
document.documentElement.scrollTop = y1 + y2;
}
else if (t[0] == 'igot' || t[0] == 'ilost') {
clmod(QS(t[1] + '>iframe'), 'focus', t[0] == 'igot');
}
} catch (ex) {
console.log('msg-err: ' + ex);
}
}, false);
if (sb_lg && logues.length) {
sandbox(ebi('pro'), sb_lg, '', logues[0]);
sandbox(ebi('epi'), sb_lg, '', logues[1]);
}
(function () { (function () {
try { try {
var tr = ebi('files').tBodies[0].rows; var tr = ebi('files').tBodies[0].rows;
@@ -6894,18 +7015,19 @@ function reload_browser() {
filecols.set_style(); filecols.set_style();
var parts = get_evpath().split('/'), var parts = get_evpath().split('/'),
rm = QSA('#path>a+a+a'), rm = ebi('entree'),
ftab = ebi('files'), ftab = ebi('files'),
link = '/', o; link = '', o;
for (a = rm.length - 1; a >= 0; a--) while (rm.nextSibling)
rm[a].parentNode.removeChild(rm[a]); rm.parentNode.removeChild(rm.nextSibling);
for (var a = 1; a < parts.length - 1; a++) { for (var a = 0; a < parts.length - 1; a++) {
link += parts[a] + '/'; link += parts[a] + '/';
o = mknod('a'); o = mknod('a');
o.setAttribute('href', link); o.setAttribute('href', link);
o.textContent = uricom_dec(parts[a]); o.textContent = uricom_dec(parts[a]) || '/';
ebi('path').appendChild(mknod('i'));
ebi('path').appendChild(o); ebi('path').appendChild(o);
} }

View File

@@ -930,7 +930,9 @@ var set_lno = (function () {
(function () { (function () {
function keydown(ev) { function keydown(ev) {
ev = ev || window.event; ev = ev || window.event;
var kc = ev.code || ev.keyCode || ev.which; var kc = ev.code || ev.keyCode || ev.which,
editing = document.activeElement == dom_src;
//console.log(ev.key, ev.code, ev.keyCode, ev.which); //console.log(ev.key, ev.code, ev.keyCode, ev.which);
if (ctrl(ev) && (ev.code == "KeyS" || kc == 83)) { if (ctrl(ev) && (ev.code == "KeyS" || kc == 83)) {
save(); save();
@@ -941,12 +943,17 @@ var set_lno = (function () {
if (d) if (d)
d.click(); d.click();
} }
if (document.activeElement != dom_src) if (editing)
return true; set_lno();
set_lno();
if (ctrl(ev)) { if (ctrl(ev)) {
if (ev.code == "KeyE") {
dom_nsbs.click();
return false;
}
if (!editing)
return true;
if (ev.code == "KeyH" || kc == 72) { if (ev.code == "KeyH" || kc == 72) {
md_header(ev.shiftKey); md_header(ev.shiftKey);
return false; return false;
@@ -971,10 +978,6 @@ var set_lno = (function () {
iter_uni(); iter_uni();
return false; return false;
} }
if (ev.code == "KeyE") {
dom_nsbs.click();
return false;
}
var up = ev.code == "ArrowUp" || kc == 38; var up = ev.code == "ArrowUp" || kc == 38;
var dn = ev.code == "ArrowDown" || kc == 40; var dn = ev.code == "ArrowDown" || kc == 40;
if (up || dn) { if (up || dn) {
@@ -987,6 +990,9 @@ var set_lno = (function () {
} }
} }
else { else {
if (!editing)
return true;
if (ev.code == "Tab" || kc == 9) { if (ev.code == "Tab" || kc == 9) {
md_indent(ev.shiftKey); md_indent(ev.shiftKey);
return false; return false;

View File

@@ -51,12 +51,30 @@ a.g {
border-color: #3a0; border-color: #3a0;
box-shadow: 0 .3em 1em #4c0; box-shadow: 0 .3em 1em #4c0;
} }
#repl { #repl,
#pb a {
border: none; border: none;
background: none; background: none;
color: inherit; color: inherit;
padding: 0; padding: 0;
} }
#repl {
position: fixed;
bottom: .25em;
left: .2em;
}
#pb {
opacity: .5;
position: fixed;
bottom: .25em;
right: .3em;
}
#pb span {
opacity: .6;
}
#pb a {
margin: 0;
}
table { table {
border-collapse: collapse; border-collapse: collapse;
} }

View File

@@ -46,7 +46,7 @@
<tbody> <tbody>
{% for mp in avol %} {% for mp in avol %}
{%- if mp in vstate and vstate[mp] %} {%- if mp in vstate and vstate[mp] %}
<tr><td><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></td><td><a class="s" href="{{ mp }}?scan">rescan</a></td><td>{{ vstate[mp] }}</td></tr> <tr><td><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></td><td><a class="s" href="{{ r }}{{ mp }}?scan">rescan</a></td><td>{{ vstate[mp] }}</td></tr>
{%- endif %} {%- endif %}
{% endfor %} {% endfor %}
</tbody> </tbody>
@@ -62,7 +62,7 @@
<h1 id="f">you can browse:</h1> <h1 id="f">you can browse:</h1>
<ul> <ul>
{% for mp in rvol %} {% for mp in rvol %}
<li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li> <li><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}
@@ -71,7 +71,7 @@
<h1 id="g">you can upload to:</h1> <h1 id="g">you can upload to:</h1>
<ul> <ul>
{% for mp in wvol %} {% for mp in wvol %}
<li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li> <li><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}
@@ -98,6 +98,9 @@
</ul> </ul>
</div> </div>
<a href="#" id="repl">π</a> <a href="#" id="repl">π</a>
{%- if not this.args.nb %}
<span id="pb"><span>powered by</span> <a href="{{ this.args.pb_url }}">copyparty</a></span>
{%- endif %}
<script> <script>
var SR = {{ r|tojson }}, var SR = {{ r|tojson }},

View File

@@ -15,7 +15,7 @@
<body> <body>
<div id="wrap" class="w"> <div id="wrap" class="w">
<div class="cn"> <div class="cn">
<p class="btns"><a href="{{ r }}/{{ vp }}">browse files</a> // <a href="{{ r }}/?h">control panel</a></p> <p class="btns"><a href="/{{ rvp }}">browse files</a> // <a href="{{ r }}/?h">control panel</a></p>
<p>or choose your OS for cooler alternatives:</p> <p>or choose your OS for cooler alternatives:</p>
<div class="ossel"> <div class="ossel">
<a id="swin" href="#">Windows</a> <a id="swin" href="#">Windows</a>
@@ -47,7 +47,7 @@
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p> <p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
<pre> <pre>
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %} rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>W:</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ rvp }} <b>W:</b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
@@ -55,19 +55,19 @@
<p>if you want to use the native WebDAV client in windows instead (slow and buggy), first run <a href="{{ r }}/.cpr/a/webdav-cfg.bat">webdav-cfg.bat</a> to remove the 47 MiB filesize limit (also fixes latency and password login), then connect:</p> <p>if you want to use the native WebDAV client in windows instead (slow and buggy), first run <a href="{{ r }}/.cpr/a/webdav-cfg.bat">webdav-cfg.bat</a> to remove the 47 MiB filesize limit (also fixes latency and password login), then connect:</p>
<pre> <pre>
net use <b>w:</b> http{{ s }}://{{ ep }}/{{ vp }}{% if accs %} k /user:<b>{{ pw }}</b>{% endif %} net use <b>w:</b> http{{ s }}://{{ ep }}/{{ rvp }}{% if accs %} k /user:<b>{{ pw }}</b>{% endif %}
</pre> </pre>
</div> </div>
<div class="os lin"> <div class="os lin">
<pre> <pre>
yum install davfs2 yum install davfs2
{% if accs %}printf '%s\n' <b>{{ pw }}</b> k | {% endif %}mount -t davfs -ouid=1000 http{{ s }}://{{ ep }}/{{ vp }} <b>mp</b> {% if accs %}printf '%s\n' <b>{{ pw }}</b> k | {% endif %}mount -t davfs -ouid=1000 http{{ s }}://{{ ep }}/{{ rvp }} <b>mp</b>
</pre> </pre>
<p>or you can use rclone instead, which is much slower but doesn't require root:</p> <p>or you can use rclone instead, which is much slower but doesn't require root:</p>
<pre> <pre>
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %} rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>mp</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ rvp }} <b>mp</b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
@@ -77,20 +77,20 @@
<!-- gnome-bug: ignores vp --> <!-- gnome-bug: ignores vp -->
<pre> <pre>
{%- if accs %} {%- if accs %}
echo <b>{{ pw }}</b> | gio mount dav{{ s }}://k@{{ ep }}/{{ vp }} echo <b>{{ pw }}</b> | gio mount dav{{ s }}://k@{{ ep }}/{{ rvp }}
{%- else %} {%- else %}
gio mount -a dav{{ s }}://{{ ep }}/{{ vp }} gio mount -a dav{{ s }}://{{ ep }}/{{ rvp }}
{%- endif %} {%- endif %}
</pre> </pre>
</div> </div>
<div class="os mac"> <div class="os mac">
<pre> <pre>
osascript -e ' mount volume "http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }}" ' osascript -e ' mount volume "http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ rvp }}" '
</pre> </pre>
<p>or you can open up a Finder, press command-K and paste this instead:</p> <p>or you can open up a Finder, press command-K and paste this instead:</p>
<pre> <pre>
http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }} http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ rvp }}
</pre> </pre>
{% if s %} {% if s %}
@@ -108,26 +108,26 @@
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p> <p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
<pre> <pre>
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }} rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>W:</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ rvp }} <b>W:</b>
</pre> </pre>
<p>if you want to use the native FTP client in windows instead (please dont), press <code>win+R</code> and run this command:</p> <p>if you want to use the native FTP client in windows instead (please dont), press <code>win+R</code> and run this command:</p>
<pre> <pre>
explorer {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}<b>{{ pw }}</b>:k@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} explorer {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}<b>{{ pw }}</b>:k@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
</pre> </pre>
</div> </div>
<div class="os lin"> <div class="os lin">
<pre> <pre>
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }} rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>mp</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ rvp }} <b>mp</b>
</pre> </pre>
<p>emergency alternative (gnome/gui-only):</p> <p>emergency alternative (gnome/gui-only):</p>
<!-- gnome-bug: ignores vp --> <!-- gnome-bug: ignores vp -->
<pre> <pre>
{%- if accs %} {%- if accs %}
echo <b>{{ pw }}</b> | gio mount ftp{{ "" if args.ftp else "s" }}://k@{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} echo <b>{{ pw }}</b> | gio mount ftp{{ "" if args.ftp else "s" }}://k@{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
{%- else %} {%- else %}
gio mount -a ftp{{ "" if args.ftp else "s" }}://{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} gio mount -a ftp{{ "" if args.ftp else "s" }}://{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
{%- endif %} {%- endif %}
</pre> </pre>
</div> </div>
@@ -135,7 +135,7 @@
<div class="os mac"> <div class="os mac">
<p>note: FTP is read-only on macos; please use WebDAV instead</p> <p>note: FTP is read-only on macos; please use WebDAV instead</p>
<pre> <pre>
open {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}k:<b>{{ pw }}</b>@{% else %}anonymous:@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} open {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}k:<b>{{ pw }}</b>@{% else %}anonymous:@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
</pre> </pre>
</div> </div>
{% endif %} {% endif %}
@@ -149,7 +149,7 @@
<span class="os lin">doesn't need root</span> <span class="os lin">doesn't need root</span>
</p> </p>
<pre> <pre>
partyfuse.py{% if accs %} -a <b>{{ pw }}</b>{% endif %} http{{ s }}://{{ ep }}/{{ vp }} <b><span class="os win">W:</span><span class="os lin mac">mp</span></b> partyfuse.py{% if accs %} -a <b>{{ pw }}</b>{% endif %} http{{ s }}://{{ ep }}/{{ rvp }} <b><span class="os win">W:</span><span class="os lin mac">mp</span></b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>-td</code></em></p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>-td</code></em></p>

View File

@@ -69,6 +69,7 @@ html {
#toastb { #toastb {
max-height: 70vh; max-height: 70vh;
overflow-y: auto; overflow-y: auto;
padding: 1px;
} }
#toast.scroll #toastb { #toast.scroll #toastb {
overflow-y: scroll; overflow-y: scroll;

View File

@@ -672,7 +672,7 @@ function Donut(uc, st) {
favico.upd(); favico.upd();
wintitle(); wintitle();
if (document.visibilityState == 'hidden') if (document.visibilityState == 'hidden')
tenstrobe = setTimeout(enstrobe, 500); //debounce tenstrobe = setTimeout(r.enstrobe, 500); //debounce
} }
}; };
@@ -709,7 +709,7 @@ function Donut(uc, st) {
} }
}; };
function enstrobe() { r.enstrobe = function () {
strobes = ['████████████████', '________________', '████████████████']; strobes = ['████████████████', '________________', '████████████████'];
tstrober = setInterval(strobe, 300); tstrober = setInterval(strobe, 300);
@@ -867,7 +867,7 @@ function up2k_init(subtle) {
bcfg_bind(uc, 'az', 'u2sort', u2sort.indexOf('n') + 1, set_u2sort); bcfg_bind(uc, 'az', 'u2sort', u2sort.indexOf('n') + 1, set_u2sort);
bcfg_bind(uc, 'hashw', 'hashw', !!window.WebAssembly && (!subtle || !CHROME || MOBILE || VCHROME >= 107), set_hashw); bcfg_bind(uc, 'hashw', 'hashw', !!window.WebAssembly && (!subtle || !CHROME || MOBILE || VCHROME >= 107), set_hashw);
bcfg_bind(uc, 'upnag', 'upnag', false, set_upnag); bcfg_bind(uc, 'upnag', 'upnag', false, set_upnag);
bcfg_bind(uc, 'upsfx', 'upsfx', false); bcfg_bind(uc, 'upsfx', 'upsfx', false, set_upsfx);
var st = { var st = {
"files": [], "files": [],
@@ -895,9 +895,9 @@ function up2k_init(subtle) {
"finished": 0 "finished": 0
}, },
"time": { "time": {
"hashing": 0, "hashing": 0.01,
"uploading": 0, "uploading": 0.01,
"busy": 0 "busy": 0.01
}, },
"eta": { "eta": {
"h": "", "h": "",
@@ -1555,11 +1555,11 @@ function up2k_init(subtle) {
st.busy.handshake.length) st.busy.handshake.length)
return false; return false;
if (t.n - st.car > 8) if (t.n - st.car > Math.max(8, parallel_uploads))
// prevent runahead from a stuck upload (slow server hdd) // prevent runahead from a stuck upload (slow server hdd)
return false; return false;
if ((uc.multitask ? 1 : 0) < if ((uc.multitask ? parallel_uploads : 0) <
st.todo.upload.length + st.todo.upload.length +
st.busy.upload.length) st.busy.upload.length)
return false; return false;
@@ -1571,21 +1571,22 @@ function up2k_init(subtle) {
if (!parallel_uploads) if (!parallel_uploads)
return false; return false;
var nhs = st.todo.handshake.length + st.busy.handshake.length,
nup = st.todo.upload.length + st.busy.upload.length;
if (uc.multitask) { if (uc.multitask) {
if (nhs + nup < parallel_uploads)
return true;
if (!uc.az) if (!uc.az)
return st.todo.handshake.length + st.busy.handshake.length < 2; return nhs < 2;
var ahead = st.bytes.hashed - st.bytes.finished, var ahead = st.bytes.hashed - st.bytes.finished,
nmax = ahead < biggest_file / 8 ? 32 : 16; nmax = ahead < biggest_file / 8 ? 32 : 16;
return ahead < biggest_file && return ahead < biggest_file && nhs < nmax;
st.todo.handshake.length + st.busy.handshake.length < nmax;
} }
return handshakes_permitted() && 0 == return handshakes_permitted() && 0 == nhs + nup;
st.todo.handshake.length +
st.busy.handshake.length +
st.todo.upload.length +
st.busy.upload.length;
} }
var tasker = (function () { var tasker = (function () {
@@ -1750,20 +1751,22 @@ function up2k_init(subtle) {
var sr = uc.fsearch, var sr = uc.fsearch,
ok = pvis.ctr.ok, ok = pvis.ctr.ok,
ng = pvis.ctr.ng, ng = pvis.ctr.ng,
spd = Math.floor(st.bytes.finished / st.time.busy),
suf = '\n\n{0} @ {1}/s'.format(shumantime(st.time.busy), humansize(spd)),
t = uc.ask_up ? 0 : 10; t = uc.ask_up ? 0 : 10;
console.log('toast', ok, ng); console.log('toast', ok, ng);
if (ok && ng) if (ok && ng)
toast.warn(t, uc.nagtxt = (sr ? L.ur_sm : L.ur_um).format(ok, ng)); toast.warn(t, uc.nagtxt = (sr ? L.ur_sm : L.ur_um).format(ok, ng) + suf);
else if (ok > 1) else if (ok > 1)
toast.ok(t, uc.nagtxt = (sr ? L.ur_aso : L.ur_auo).format(ok)); toast.ok(t, uc.nagtxt = (sr ? L.ur_aso : L.ur_auo).format(ok) + suf);
else if (ok) else if (ok)
toast.ok(t, uc.nagtxt = sr ? L.ur_1so : L.ur_1uo); toast.ok(t, uc.nagtxt = (sr ? L.ur_1so : L.ur_1uo) + suf);
else if (ng > 1) else if (ng > 1)
toast.err(t, uc.nagtxt = (sr ? L.ur_asn : L.ur_aun).format(ng)); toast.err(t, uc.nagtxt = (sr ? L.ur_asn : L.ur_aun).format(ng) + suf);
else if (ng) else if (ng)
toast.err(t, uc.nagtxt = sr ? L.ur_1sn : L.ur_1un); toast.err(t, uc.nagtxt = (sr ? L.ur_1sn : L.ur_1un) + suf);
timer.rm(etafun); timer.rm(etafun);
timer.rm(donut.do); timer.rm(donut.do);
@@ -2319,9 +2322,10 @@ function up2k_init(subtle) {
} }
var err_pend = rsp.indexOf('partial upload exists at a different') + 1, var err_pend = rsp.indexOf('partial upload exists at a different') + 1,
err_plug = rsp.indexOf('upload blocked by x') + 1,
err_dupe = rsp.indexOf('upload rejected, file already exists') + 1; err_dupe = rsp.indexOf('upload rejected, file already exists') + 1;
if (err_pend || err_dupe) { if (err_pend || err_plug || err_dupe) {
err = rsp; err = rsp;
ofs = err.indexOf('\n/'); ofs = err.indexOf('\n/');
if (ofs !== -1) { if (ofs !== -1) {
@@ -2378,8 +2382,17 @@ function up2k_init(subtle) {
function can_upload_next() { function can_upload_next() {
var upt = st.todo.upload[0], var upt = st.todo.upload[0],
upf = st.files[upt.nfile], upf = st.files[upt.nfile],
nhs = st.busy.handshake.length,
hs = nhs && st.busy.handshake[0],
now = Date.now(); now = Date.now();
if (nhs >= 16)
return false;
if (hs && hs.t_uploaded && Date.now() - hs.t_busied > 10000)
// verification HS possibly held back by uploads
return false;
for (var a = 0, aa = st.busy.handshake.length; a < aa; a++) { for (var a = 0, aa = st.busy.handshake.length; a < aa; a++) {
var hs = st.busy.handshake[a]; var hs = st.busy.handshake[a];
if (hs.n < upt.nfile && hs.t_busied > now - 10 * 1000 && !st.files[hs.n].bytes_uploaded) if (hs.n < upt.nfile && hs.t_busied > now - 10 * 1000 && !st.files[hs.n].bytes_uploaded)
@@ -2419,6 +2432,14 @@ function up2k_init(subtle) {
function orz(xhr) { function orz(xhr) {
var txt = ((xhr.response && xhr.response.err) || xhr.responseText) + ''; var txt = ((xhr.response && xhr.response.err) || xhr.responseText) + '';
if (txt.indexOf('upload blocked by x') + 1) {
apop(st.busy.upload, upt);
apop(t.postlist, npart);
pvis.seth(t.n, 1, "ERROR");
pvis.seth(t.n, 2, txt.split(/\n/)[0]);
pvis.move(t.n, 'ng');
return;
}
if (xhr.status == 200) { if (xhr.status == 200) {
pvis.prog(t, npart, cdr - car); pvis.prog(t, npart, cdr - car);
st.bytes.finished += cdr - car; st.bytes.finished += cdr - car;
@@ -2553,9 +2574,15 @@ function up2k_init(subtle) {
if (dir.target) { if (dir.target) {
clmod(obj, 'err', 1); clmod(obj, 'err', 1);
var v = Math.floor(parseInt(obj.value)); var v = Math.floor(parseInt(obj.value));
if (v < 0 || v > 64 || v !== v) if (v < 0 || v !== v)
return; return;
if (v > 64) {
var p = obj.selectionStart;
v = obj.value = 64;
obj.selectionStart = obj.selectionEnd = p;
}
parallel_uploads = v; parallel_uploads = v;
swrite('nthread', v); swrite('nthread', v);
clmod(obj, 'err'); clmod(obj, 'err');
@@ -2772,6 +2799,21 @@ function up2k_init(subtle) {
if (en && Notification.permission == 'default') if (en && Notification.permission == 'default')
Notification.requestPermission().then(chknag, chknag); Notification.requestPermission().then(chknag, chknag);
set_upsfx(en);
}
function set_upsfx(en) {
if (!en)
return;
toast.inf(10, 'OK -- <a href="#" id="nagtest">test it!</a>')
ebi('nagtest').onclick = function () {
start_actx();
uc.nagtxt = ':^)';
setTimeout(donut.enstrobe, 200);
};
} }
if (uc.upnag && (!window.Notification || Notification.permission != 'granted')) if (uc.upnag && (!window.Notification || Notification.permission != 'granted'))

View File

@@ -195,8 +195,12 @@ function vis_exh(msg, url, lineNo, columnNo, error) {
var lsk = Object.keys(ls); var lsk = Object.keys(ls);
lsk.sort(); lsk.sort();
html.push('<p class="b">'); html.push('<p class="b">');
for (var a = 0; a < lsk.length; a++) for (var a = 0; a < lsk.length; a++) {
if (ls[lsk[a]].length > 9000)
continue;
html.push(' <b>' + esc(lsk[a]) + '</b> <code>' + esc(ls[lsk[a]]) + '</code> '); html.push(' <b>' + esc(lsk[a]) + '</b> <code>' + esc(ls[lsk[a]]) + '</code> ');
}
html.push('</p>'); html.push('</p>');
} }
catch (e) { } catch (e) { }
@@ -1528,25 +1532,33 @@ var md_plug_err = function (ex, js) {
if (ex) if (ex)
console.log(ex, js); console.log(ex, js);
}; };
function load_md_plug(md_text, plug_type) { function load_md_plug(md_text, plug_type, defer) {
if (defer)
md_plug[plug_type] = null;
if (!have_emp) if (!have_emp)
return md_text; return md_text;
var find = '\n```copyparty_' + plug_type + '\n'; var find = '\n```copyparty_' + plug_type + '\n',
var ofs = md_text.indexOf(find); md = md_text.replace(/\r/g, ''),
if (ofs === -1) ofs = md.indexOf(find),
ofs2 = md.indexOf('\n```', ofs + 1);
if (ofs < 0 || ofs2 < 0)
return md_text; return md_text;
var ofs2 = md_text.indexOf('\n```', ofs + 1); var js = md.slice(ofs + find.length, ofs2 + 1);
if (ofs2 == -1) md = md.slice(0, ofs + 1) + md.slice(ofs2 + 4);
return md_text; md = md.replace(/$/g, '\r');
var js = md_text.slice(ofs + find.length, ofs2 + 1); if (defer) { // insert into sandbox
var md = md_text.slice(0, ofs + 1) + md_text.slice(ofs2 + 4); md_plug[plug_type] = js;
return md;
}
var old_plug = md_plug[plug_type]; var old_plug = md_plug[plug_type];
if (!old_plug || old_plug[1] != js) { if (!old_plug || old_plug[1] != js) {
js = 'const x = { ' + js + ' }; x;'; js = 'const loc = new URL("' + location.href + '"), x = { ' + js + ' }; x;';
try { try {
var x = eval(js); var x = eval(js);
if (x['ctor']) { if (x['ctor']) {

View File

@@ -1,3 +1,75 @@
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2023-0112-0515 `v1.5.6` many hands
hello from warsaw airport (goodbye japan ;_;)
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* multiple upload handshakes in parallel
* around **5x faster** when uploading small files
* or **50x faster** if the server is on the other side of the planet
* just crank up the `parallel uploads` like crazy (max is 64)
* upload ui: total time and average speed is shown on completion
## bugfixes
* browser ui didn't allow specifying number of threads for file search
* dont panic if a digit key is pressed while viewing an image
* workaround [linux kernel bug](https://utcc.utoronto.ca/~cks/space/blog/linux/KernelBindBugIn6016) causing log spam on dualstack
* ~~related issue (also mostly harmless) will be fixed next relese 010770684db95bece206943768621f2c7c27bace~~
* they fixed it in linux 6.1 so these workarounds will be gone too
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1230-0754 `v1.5.5` made in japan
hello from tokyo
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* image viewer now supports heif, avif, apng, svg
* [partyfuse and up2k.py](https://github.com/9001/copyparty/tree/hovudstraum/bin): option to read password from textfile
## bugfixes
* thumbnailing could fail if a primitive build of libvips is installed
* ssdp was wonky on dualstack ipv6
* mdns could crash on networks with invalid routes
* support fat32 timestamp precisions
* fixes spurious file reindexing in volumes located on SD cards on android tablets which lie about timestamps until the next device reboot or filesystem remount
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1213-1956 `v1.5.3` folder-sync + turbo-rust
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* one-way folder sync (client to server) using [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/README.md#up2kpy) `-z --dr`
* great rsync alternative when combined with `-e2ds --hardlink` deduplication on the server
* **50x faster** when uploading small files to HDD, especially SMR
* by switching sqlite to WAL which carries a small chance of temporarily forgetting the ~200 most recent uploads if you have a power outage or your OS crashes; see `--help-dbd` if you have `-mtp` plugins which produces metadata you can't afford to lose
* location-based [reverse-proxying](https://github.com/9001/copyparty/#reverse-proxy) (but it's still recommended to use a dedicated domain/subdomain instead)
* IPv6 link-local automatically enabled for TCP and zeroconf on NICs without a routable IPv6
* zeroconf network filters now accept subnets too, for example `--z-on 192.168.0.0/16`
* `.hist` folders are hidden on windows
* ux:
* more accurate total ETA on upload
* sorting of batch-unpost links was unintuitive / dangerous
* hotkey `Y` turns files into download links if nothing's selected
* option to replace or disable the mediaplayer-toggle mouse cursor with `--mpmc`
## bugfixes
* WAL probably/hopefully fixes #10 (we'll know in 6 months roughly)
* repair db inconsistencies (which can happen if terminated during startup)
* [davfs2](https://wiki.archlinux.org/title/Davfs2) did not approve of the authentication prompt
* the `connect` button on the control-panel didn't work on phones
* couldn't specify windows NICs in arguments `--z-on` / `--z-off` and friends
* ssdp xml escaping for `--zsl` URL
* no longer possible to accidentally launch multiple copyparty instances on the same port on windows
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1203-2048 `v1.5.1` babel # 2022-1203-2048 `v1.5.1` babel

View File

@@ -0,0 +1,22 @@
insane ways to use copyparty
## wireless keyboard
problem: you wanna control mpv or whatever software from the couch but you don't have a wireless keyboard
"solution": load some custom javascript which renders a virtual keyboard on the upload UI and each keystroke is actually an upload which gets picked up by a dummy metadata parser which forwards the keystrokes into xdotool
[no joke, this actually exists and it wasn't even my idea or handiwork (thx steen)](https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/meadup.js)
## appxsvc tarpit
problem: `svchost.exe` is using 100% of a cpu core, and upon further inspection (`procmon`) it is `wsappx` desperately trying to install something, repeatedly reading a file named `AppxManifest.xml` and messing with an sqlite3 database
"solution": create a virtual filesystem which is intentionally slow and trick windows into reading it from there instead
* create a file called `AppxManifest.xml` and put something dumb in it
* serve the file from a copyparty instance with `--rsp-slp=9` so every request will hang for 9 sec
* `net use m: http://127.0.0.1:3993/` (mount copyparty using the windows-native webdav client)
* `mklink /d c:\windows\systemapps\microsoftwindows.client.cbs_cw5n1h2txyewy\AppxManifest.xml m:\AppxManifest.xml`

View File

@@ -127,7 +127,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
| method | params | result | | method | params | result |
|--|--|--| |--|--|--|
| GET | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar | | POST | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar |
| method | params | body | result | | method | params | body | result |
|--|--|--|--| |--|--|--|--|
@@ -137,7 +137,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
| mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL | | mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL |
| mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json | | mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json |
| mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL | | mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL |
| GET | `?delete` | | delete URL recursively | | POST | `?delete` | | delete URL recursively |
| jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively | | jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively |
| uPOST | | `msg=foo` | send message `foo` into server log | | uPOST | | `msg=foo` | send message `foo` into server log |
| mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL | | mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL |

557
docs/versus.md Normal file
View File

@@ -0,0 +1,557 @@
# alternatives to copyparty
copyparty compared against all similar software i've bumped into
there is probably some unintentional bias so please submit corrections
currently up to date with [awesome-selfhosted](https://github.com/awesome-selfhosted/awesome-selfhosted) but that probably won't last
## toc
* top
* [recommendations](#recommendations)
* [feature comparisons](#feature-comparisons)
* [general](#general)
* [file transfer](#file-transfer)
* [protocols and client support](#protocols-and-client-support)
* [server configuration](#server-configuration)
* [server capabilities](#server-capabilities)
* [client features](#client-features)
* [integration](#integration)
* [another matrix](#another-matrix)
* [reviews](#reviews)
* [copyparty](#copyparty)
* [hfs2](#hfs2)
* [hfs3](#hfs3)
* [nextcloud](#nextcloud)
* [seafile](#seafile)
* [rclone](#rclone)
* [dufs](#dufs)
* [chibisafe](#chibisafe)
* [kodbox](#kodbox)
* [filebrowser](#filebrowser)
* [filegator](#filegator)
* [updog](#updog)
* [goshs](#goshs)
* [gimme-that](#gimme-that)
* [ass](#ass)
* [linx](#linx)
* [briefly considered](#briefly-considered)
# recommendations
* [kodbox](https://github.com/kalcaddle/kodbox) ([review](#kodbox)) appears to be a fantastic alternative if you're not worried about running chinese software, with several advantages over copyparty
* but anything you want to share must be moved into the kodbox filesystem
* [seafile](https://github.com/haiwen/seafile) ([review](#seafile)) and [nextcloud](https://github.com/nextcloud/server) ([review](#nextcloud)) could be decent alternatives if you need something heavier than copyparty
* but their [license](https://snyk.io/learn/agpl-license/) is [problematic](https://opensource.google/documentation/reference/using/agpl-policy)
* and copyparty is way better at uploads in particular (resumable, accelerated)
* and anything you want to share must be moved into the respective filesystems
* [filebrowser](https://github.com/filebrowser/filebrowser) ([review](#filebrowser)) and [dufs](https://github.com/sigoden/dufs) ([review](#dufs)) are simpler copyparties but with a settings gui
* has some of the same strengths of copyparty, being portable and able to work with an existing folder structure
* ...but copyparty is better at uploads + some other things
# feature comparisons
```
<&Kethsar> copyparty is very much bloat ed, so yeah
```
the table headers in the matrixes below are the different softwares, with a quick review of each software in the next section
the softwares,
* `a` = [copyparty](https://github.com/9001/copyparty)
* `b` = [hfs2](https://github.com/rejetto/hfs2)
* `c` = [hfs3](https://www.rejetto.com/hfs/)
* `d` = [nextcloud](https://github.com/nextcloud/server)
* `e` = [seafile](https://github.com/haiwen/seafile)
* `f` = [rclone](https://github.com/rclone/rclone), specifically `rclone serve webdav .`
* `g` = [dufs](https://github.com/sigoden/dufs)
* `h` = [chibisafe](https://github.com/chibisafe/chibisafe)
* `i` = [kodbox](https://github.com/kalcaddle/kodbox)
* `j` = [filebrowser](https://github.com/filebrowser/filebrowser)
* `k` = [filegator](https://github.com/filegator/filegator)
some softwares not in the matrixes,
* [updog](#updog)
* [goshs](#goshs)
* [gimme-that](#gimmethat)
* [ass](#ass)
* [linx](#linx)
symbol legend,
* `█` = absolutely
* `` = partially
* `•` = maybe?
* ` ` = nope
## general
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| intuitive UX | | | █ | █ | █ | | █ | █ | █ | █ | █ |
| config GUI | | █ | █ | █ | █ | | | █ | █ | █ | |
| good documentation | | | | █ | █ | █ | █ | | | █ | █ |
| runs on iOS | | | | | | | | | | | |
| runs on Android | █ | | | | | █ | | | | | |
| runs on WinXP | █ | █ | | | | █ | | | | | |
| runs on Windows | █ | █ | █ | █ | █ | █ | █ | | █ | █ | █ |
| runs on Linux | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| runs on Macos | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| runs on FreeBSD | █ | | | • | █ | █ | █ | • | █ | █ | |
| portable binary | █ | █ | █ | | | █ | █ | | | █ | |
| zero setup, just go | █ | █ | █ | | | | █ | | | █ | |
| android app | | | | █ | █ | | | | | | |
| iOS app | | | | █ | █ | | | | | | |
* `zero setup` = you can get a mostly working setup by just launching the app, without having to install any software or configure whatever
* `a`/copyparty remarks:
* no gui for server settings; only for client-side stuff
* can theoretically run on iOS / iPads using [iSH](https://ish.app/), but only the iPad will offer sufficient multitasking i think
* [android app](https://f-droid.org/en/packages/me.ocv.partyup/) is for uploading only
* `b`/hfs2 runs on linux through wine
* `f`/rclone must be started with the command `rclone serve webdav .` or similar
* `h`/chibisafe has undocumented windows support
## file transfer
*the thing that copyparty is actually kinda good at*
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| download folder as zip | █ | █ | █ | █ | █ | | █ | | █ | █ | |
| download folder as tar | █ | | | | | | | | | █ | |
| upload | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| parallel uploads | █ | | | █ | █ | | • | | █ | | █ |
| resumable uploads | █ | | | | | | | | █ | | █ |
| upload segmenting | █ | | | | | | | █ | █ | | █ |
| upload acceleration | █ | | | | | | | | █ | | █ |
| upload verification | █ | | | █ | █ | | | | █ | | |
| upload deduplication | █ | | | | █ | | | | █ | | |
| upload a 999 TiB file | █ | | | | █ | █ | • | | █ | | █ |
| keep last-modified time | █ | | | █ | █ | █ | | | | | |
| upload rules | | | | | | | | | | | |
| ┗ max disk usage | █ | █ | | | █ | | | | █ | | |
| ┗ max filesize | █ | | | | | | | █ | | | █ |
| ┗ max items in folder | █ | | | | | | | | | | |
| ┗ max file age | █ | | | | | | | | █ | | |
| ┗ max uploads over time | █ | | | | | | | | | | |
| ┗ compress before write | █ | | | | | | | | | | |
| ┗ randomize filename | █ | | | | | | | █ | █ | | |
| ┗ mimetype reject-list | | | | | | | | | • | | |
| ┗ extension reject-list | | | | | | | | █ | • | | |
| checksums provided | | | | █ | █ | | | | █ | | |
| cloud storage backend | | | | █ | █ | █ | | | | | █ |
* `upload segmenting` = files are sliced into chunks, making it possible to upload files larger than 100 MiB on cloudflare for example
* `upload acceleration` = each file can be uploaded using several TCP connections, which can offer a huge speed boost over huge distances / on flaky connections -- like the good old [download accelerators](https://en.wikipedia.org/wiki/GetRight) except in reverse
* `upload verification` = uploads are checksummed or otherwise confirmed to have been transferred correctly
* `checksums provided` = when downloading a file from the server, the file's checksum is provided for verification client-side
* `cloud storage backend` = able to serve files from (and write to) s3 or similar cloud services; `` means the software can do this with some help from `rclone mount` as a bridge
* `a`/copyparty can reject uploaded files (based on complex conditions), for example [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py)
* `j`/filebrowser remarks:
* can provide checksums for single files on request
* can probably do extension/mimetype rejection similar to copyparty
* `k`/filegator download-as-zip is not streaming; it creates the full zipfile before download can start
## protocols and client support
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| serve https | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| serve webdav | █ | | | █ | █ | █ | █ | | █ | | |
| serve ftp | █ | | | | | █ | | | | | |
| serve ftps | █ | | | | | █ | | | | | |
| serve sftp | | | | | | █ | | | | | |
| serve smb/cifs | | | | | | █ | | | | | |
| serve dlna | | | | | | █ | | | | | |
| listen on unix-socket | | | | █ | █ | | █ | █ | █ | | █ |
| zeroconf | █ | | | | | | | | | | |
| supports netscape 4 | | | | | | █ | | | | | • |
| ...internet explorer 6 | | █ | | █ | | █ | | | | | • |
| mojibake filenames | █ | | | • | • | █ | █ | • | • | • | |
| undecodable filenames | █ | | | • | • | █ | | • | • | | |
* `zeroconf` = the server announces itself on the LAN, automatically appearing on other zeroconf-capable devices
* `mojibake filenames` = filenames decoded with the wrong codec and then reencoded (usually to utf-8), so `宇多田ヒカル` might look like `ëFæ╜ôcâqâJâï`
* `undecodable filenames` = pure binary garbage which cannot be parsed as utf-8
* you can successfully play `$'\355\221'` with mpv through mounting a remote copyparty server with rclone, pog
* `a`/copyparty remarks:
* extremely minimal samba/cifs server
* netscape 4 / ie6 support is mostly listed as a joke altho some people have actually found it useful
## server configuration
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| config from cmd args | █ | | | | | █ | █ | | | █ | |
| config files | █ | █ | █ | | | █ | | █ | | █ | • |
| runtime config reload | █ | █ | █ | | | | | █ | █ | █ | █ |
| same-port http / https | █ | | | | | | | | | | |
| listen multiple ports | █ | | | | | | | | | | |
| virtual file system | █ | █ | █ | | | | █ | | | | |
| reverse-proxy ok | █ | | █ | █ | █ | █ | █ | █ | • | • | • |
| folder-rproxy ok | █ | | | | █ | █ | | • | • | • | • |
* `folder-rproxy` = reverse-proxying without dedicating an entire (sub)domain, using a subfolder instead
## server capabilities
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| accounts | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| single-sign-on | | | | █ | █ | | | | • | | |
| token auth | | | | █ | █ | | | █ | | | |
| per-volume permissions | █ | █ | █ | █ | █ | █ | █ | | █ | █ | |
| per-folder permissions | | | | █ | █ | | █ | | █ | █ | |
| per-file permissions | | | | █ | █ | | █ | | █ | | |
| per-file passwords | █ | | | █ | █ | | █ | | █ | | |
| unmap subfolders | █ | | | | | | █ | | | █ | |
| index.html blocks list | | | | | | | █ | | | • | |
| write-only folders | █ | | | | | | | | | | █ |
| files stored as-is | █ | █ | █ | █ | | █ | █ | | | █ | █ |
| file versioning | | | | █ | █ | | | | | | |
| file encryption | | | | █ | █ | █ | | | | | |
| file indexing | █ | | █ | █ | █ | | | █ | █ | █ | |
| ┗ per-volume db | █ | | • | • | • | | | • | • | | |
| ┗ db stored in folder | █ | | | | | | | • | • | █ | |
| ┗ db stored out-of-tree | █ | | █ | █ | █ | | | • | • | █ | |
| ┗ existing file tree | █ | | █ | | | | | | | █ | |
| file action event hooks | █ | | | | | | | | | █ | |
| one-way folder sync | █ | | | █ | █ | █ | | | | | |
| full sync | | | | █ | █ | | | | | | |
| speed throttle | | █ | █ | | | █ | | | █ | | |
| anti-bruteforce | █ | █ | █ | █ | █ | | | | • | | |
| dyndns updater | | █ | | | | | | | | | |
| self-updater | | | █ | | | | | | | | |
| log rotation | █ | | █ | █ | █ | | | • | █ | | |
| upload tracking / log | █ | █ | • | █ | █ | | | █ | █ | | |
| curl-friendly ls | █ | | | | | | | | | | |
| curl-friendly upload | █ | | | | | █ | █ | • | | | |
* `unmap subfolders` = "shadowing"; mounting a local folder in the middle of an existing filesystem tree in order to disable access below that path
* `files stored as-is` = uploaded files are trivially readable from the server HDD, not sliced into chunks or in weird folder structures or anything like that
* `db stored in folder` = filesystem index can be written to a database file inside the folder itself
* `db stored out-of-tree` = filesystem index can be stored some place else, not necessarily inside the shared folders
* `existing file tree` = will index any existing files it finds
* `file action event hooks` = run script before/after upload, move, rename, ...
* `one-way folder sync` = like rsync, optionally deleting unexpected files at target
* `full sync` = stateful, dropbox-like sync
* `curl-friendly ls` = returns a plaintext folder listing when curled
* `curl-friendly upload` = uploading with curl is just `curl -T some.bin http://.../`
* `a`/copyparty remarks:
* one-way folder sync from local to server can be done efficiently with [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py), or with webdav and conventional rsync
* can hot-reload config files (with just a few exceptions)
* can set per-folder permissions if that folder is made into a separate volume, so there is configuration overhead
* upload history can be visualized using [partyjournal](https://github.com/9001/copyparty/blob/hovudstraum/bin/partyjournal.py)
* `k`/filegator remarks:
* `per-* permissions` -- can limit a user to one folder and its subfolders
* `unmap subfolders` -- can globally filter a list of paths
## client features
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ---------------------- | - | - | - | - | - | - | - | - | - | - | - |
| single-page app | █ | | █ | █ | █ | | | █ | █ | █ | █ |
| themes | █ | █ | | █ | | | | | █ | | |
| directory tree nav | █ | | | | █ | | | | █ | | |
| multi-column sorting | █ | | | | | | | | | | |
| thumbnails | █ | | | | | | | █ | █ | | |
| ┗ image thumbnails | █ | | | █ | █ | | | █ | █ | █ | |
| ┗ video thumbnails | █ | | | █ | █ | | | | █ | | |
| ┗ audio spectrograms | █ | | | | | | | | | | |
| audio player | █ | | | █ | █ | | | | █ | | |
| ┗ gapless playback | █ | | | | | | | | • | | |
| ┗ audio equalizer | █ | | | | | | | | | | |
| ┗ waveform seekbar | █ | | | | | | | | | | |
| ┗ OS integration | █ | | | | | | | | | | |
| ┗ transcode to lossy | █ | | | | | | | | | | |
| video player | █ | | | █ | █ | | | | █ | █ | |
| ┗ video transcoding | | | | | | | | | █ | | |
| audio BPM detector | █ | | | | | | | | | | |
| audio key detector | █ | | | | | | | | | | |
| search by path / name | █ | █ | █ | █ | █ | | █ | | █ | █ | |
| search by date / size | █ | | | | █ | | | █ | █ | | |
| search by bpm / key | █ | | | | | | | | | | |
| search by custom tags | | | | | | | | █ | █ | | |
| search in file contents | | | | █ | █ | | | | █ | | |
| search by custom parser | █ | | | | | | | | | | |
| find local file | █ | | | | | | | | | | |
| undo recent uploads | █ | | | | | | | | | | |
| create directories | █ | | | █ | █ | | █ | █ | █ | █ | █ |
| image viewer | █ | | | █ | █ | | | | █ | █ | █ |
| markdown viewer | █ | | | | █ | | | | █ | | |
| markdown editor | █ | | | | █ | | | | █ | | |
| readme.md in listing | █ | | | █ | | | | | | | |
| rename files | █ | █ | █ | █ | █ | | █ | | █ | █ | █ |
| batch rename | █ | | | | | | | | █ | | |
| cut / paste files | █ | █ | | █ | █ | | | | █ | | |
| move files | █ | █ | | █ | █ | | █ | | █ | █ | █ |
| delete files | █ | █ | | █ | █ | | █ | █ | █ | █ | █ |
| copy files | | | | | █ | | | | █ | █ | █ |
* `single-page app` = multitasking; possible to continue navigating while uploading
* `audio player » os-integration` = use the lockscreen to play/pause, prev/next song
* `find local file` = drop a file into the browser to see if it exists on the server
* `a`/copyparty has teeny-tiny skips playing gapless albums depending on audio codec (opus best)
* `b`/hfs2 has a very basic directory tree view, not showing sibling folders
* `f`/rclone can do some file management (mkdir, rename, delete) when hosting througn webdav
* `j`/filebrowser has a plaintext viewer/editor
* `k`/filegator directory tree is a modal window
## integration
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| OS alert on upload | █ | | | | | | | | | | |
| discord | █ | | | | | | | | | | |
| ┗ announce uploads | █ | | | | | | | | | | |
| ┗ custom embeds | | | | | | | | | | | |
| sharex | █ | | | █ | | █ | | █ | | | |
| flameshot | | | | | | █ | | | | | |
* sharex = yes, but does not provide example sharex config
* `a`/copyparty remarks:
* `OS alert on upload` available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/notify.py)
* `discord » announce uploads` available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/discord-announce.py)
* `j`/filebrowser can probably pull those off with command runners similar to copyparty
## another matrix
| software / feature | lang | lic | size |
| ------------------ | ------ | ------ | ------ |
| copyparty | python | █ mit | 0.6 MB |
| hfs2 | delphi | ░ gpl3 | 2 MB |
| hfs3 | ts | ░ gpl3 | 36 MB |
| nextcloud | php | ‼ agpl | • |
| seafile | c | ‼ agpl | • |
| rclone | c | █ mit | 45 MB |
| dufs | rust | █ apl2 | 2.5 MB |
| chibisafe | ts | █ mit | • |
| kodbox | php | ░ gpl3 | 92 MB |
| filebrowser | go | █ apl2 | 20 MB |
| filegator | php | █ mit | • |
| updog | python | █ mit | 17 MB |
| goshs | go | █ mit | 11 MB |
| gimme-that | python | █ mit | 4.8 MB |
| ass | ts | █ isc | • |
| linx | go | ░ gpl3 | 20 MB |
* `size` = binary (if available) or installed size of program and its dependencies
* copyparty size is for the standalone python file; the windows exe is **6 MiB**
# reviews
* ✅ are advantages over copyparty
* ⚠️ are disadvantages
## [copyparty](https://github.com/9001/copyparty)
* resumable uploads which are verified server-side
* upload segmenting allows for potentially much faster uploads on some connections, and terabyte-sized files even on cloudflare
* both of the above are surprisingly uncommon features
* very cross-platform (python, no dependencies)
## [hfs2](https://github.com/rejetto/hfs2)
* the OG, the legend
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ windows-only
* ✅ config GUI
* vfs with gui config, per-volume permissions
* starting to show its age, hence the rewrite:
## [hfs3](https://www.rejetto.com/hfs/)
* nodejs; cross-platform
* vfs with gui config, per-volume permissions
* still early development, let's revisit later
## [nextcloud](https://github.com/nextcloud/server)
* php, mariadb
* ⚠️ [isolated on-disk file hierarchy] in per-user folders
* not that bad, can probably be remedied with bindmounts or maybe symlinks
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no write-only / upload-only folders
* ⚠️ http/webdav only; no ftp, zeroconf
* ⚠️ less awesome music player
* ⚠️ doesn't run on android or ipads
* ✅ great ui/ux
* ✅ config gui
* ✅ apps (android / iphone)
* copyparty: android upload-only app
* ✅ more granular permissions (per-file)
* ✅ search: fulltext indexing of file contents
* ✅ webauthn passwordless authentication
## [seafile](https://github.com/haiwen/seafile)
* c, mariadb
* ⚠️ [isolated on-disk file hierarchy](https://manual.seafile.com/maintain/seafile_fsck/), incompatible with other software
* *much worse than nextcloud* in that regard
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no write-only / upload-only folders
* ⚠️ http/webdav only; no ftp, zeroconf
* ⚠️ less awesome music player
* ⚠️ doesn't run on android or ipads
* ✅ great ui/ux
* ✅ config gui
* ✅ apps (android / iphone)
* copyparty: android upload-only app
* ✅ more granular permissions (per-file)
* ✅ search: fulltext indexing of file contents
## [rclone](https://github.com/rclone/rclone)
* nice standalone c program
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no web-ui, just a server / downloader / uploader utility
* ✅ works with almost any protocol, cloud provider
* ⚠️ copyparty's webdav server is slightly faster
## [dufs](https://github.com/sigoden/dufs)
* rust; cross-platform (windows, linux, macos)
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ doesn't support crazy filenames
* ✅ per-url access control (copyparty is per-volume)
* basic but really snappy ui
* upload, rename, delete, ... see feature matrix
## [chibisafe](https://github.com/chibisafe/chibisafe)
* nodejs; recommends docker
* *it has upload segmenting!*
* ⚠️ but uploads are still not resumable / accelerated / integrity-checked
* ⚠️ not portable
* ⚠️ isolated on-disk file hierarchy, incompatible with other software
* ⚠️ http/webdav only; no ftp or zeroconf
* ✅ pretty ui
* ✅ control panel for server settings and user management
* ✅ user registration
* ✅ searchable image tags; delete by tag
* ✅ browser extension to upload files to the server
* ✅ reject uploads by file extension
* copyparty: can reject uploads [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py) using plugins
* ✅ token auth (api keys)
## [kodbox](https://github.com/kalcaddle/kodbox)
* this thing is insane
* php; [docker](https://hub.docker.com/r/kodcloud/kodbox)
* *upload segmenting, acceleration, and integrity checking!*
* ⚠️ but uploads are not resumable(?)
* ⚠️ not portable
* ⚠️ isolated on-disk file hierarchy, incompatible with other software
* ⚠️ http/webdav only; no ftp or zeroconf
* ⚠️ some parts of the GUI are in chinese
* ✅ fantastic ui/ux
* ✅ control panel for server settings and user management
* ✅ file tags; file discussions!?
* ✅ video transcoding
* ✅ unzip uploaded archives
* ✅ IDE with syntax hilighting
* ✅ wysiwyg editor for openoffice files
## [filebrowser](https://github.com/filebrowser/filebrowser)
* go; cross-platform (windows, linux, mac)
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ http only; no webdav / ftp / zeroconf
* ⚠️ doesn't support crazy filenames
* ⚠️ no directory tree nav
* ⚠️ limited file search
* ✅ settings gui
* ✅ good ui/ux
* ⚠️ but no directory tree for navigation
* ✅ user signup
* ✅ command runner / remote shell
* supposed to have write-only folders but couldn't get it to work
## [filegator](https://github.com/filegator/filegator)
* go; cross-platform (windows, linux, mac)
* ⚠️ http only; no webdav / ftp / zeroconf
* ⚠️ does not support symlinks
* ⚠️ expensive download-as-zip feature
* ⚠️ doesn't support crazy filenames
* ⚠️ limited file search
* *it has upload segmenting and acceleration*
* ⚠️ but uploads are still not integrity-checked
## [updog](https://github.com/sc0tfree/updog)
* python; cross-platform
* basic directory listing with upload feature
* ⚠️ less portable
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no vfs; single folder, single account
## [goshs](https://github.com/patrickhener/goshs)
* go; cross-platform (windows, linux, mac)
* ⚠️ no vfs; single folder, single account
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ✅ cool clipboard widget
* copyparty: the markdown editor is an ok substitute
* read-only and upload-only modes (same as copyparty's write-only)
* https, webdav
## [gimme-that](https://github.com/nejdetckenobi/gimme-that)
* python, but with c dependencies
* ⚠️ no vfs; single folder, multiple accounts
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ weird folder structure for uploads
* ✅ clamav antivirus check on upload! neat
* optional max-filesize, os-notification on uploads
* copyparty: os-notification available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/notify.py)
## [ass](https://github.com/tycrek/ass)
* nodejs; recommends docker
* ⚠️ not portable
* ⚠️ upload only; no browser
* ⚠️ upload through sharex only; no web-ui
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ✅ token auth
* ✅ gps metadata stripping
* copyparty: possible with [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/image-noexif.py)
* ✅ discord integration (custom embeds, upload webhook)
* copyparty: [upload webhook plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/discord-announce.py)
* ✅ reject uploads by mimetype
* copyparty: can reject uploads [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py) using plugins
* ✅ can use S3 as storage backend; copyparty relies on rclone-mount for that
* ✅ custom 404 pages
## [linx](https://github.com/ZizzyDizzyMC/linx-server/)
* originally [andreimarcu/linx-server](https://github.com/andreimarcu/linx-server) but development has ended
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* some of its unique features have been added to copyparty as former linx users have migrated
* file expiration timers, filename randomization
* ✅ password-protected files
* copyparty: password-protected folders + filekeys to skip the folder password seem to cover most usecases
* ✅ file deletion keys
* ✅ download files as torrents
* ✅ remote uploads (send a link to the server and it downloads it)
* copyparty: available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py)
* ✅ can use S3 as storage backend; copyparty relies on rclone-mount for that
# briefly considered
* [pydio](https://github.com/pydio/cells): python/agpl3, looks great, fantastic ux -- but needs mariadb, systemwide install
* [gossa](https://github.com/pldubouilh/gossa): go/mit, minimalistic, basic file upload, text editor, mkdir and rename (no delete/move)
* [h5ai](https://larsjung.de/h5ai/): php/mit, slick ui, image viewer, directory tree, no upload feature

View File

@@ -3,9 +3,9 @@ FROM alpine:3.16
WORKDIR /z WORKDIR /z
ENV ver_asmcrypto=c72492f4a66e17a0e5dd8ad7874de354f3ccdaa5 \ ENV ver_asmcrypto=c72492f4a66e17a0e5dd8ad7874de354f3ccdaa5 \
ver_hashwasm=4.9.0 \ ver_hashwasm=4.9.0 \
ver_marked=4.2.3 \ ver_marked=4.2.5 \
ver_mde=2.18.0 \ ver_mde=2.18.0 \
ver_codemirror=5.65.10 \ ver_codemirror=5.65.11 \
ver_fontawesome=5.13.0 \ ver_fontawesome=5.13.0 \
ver_zopfli=1.0.3 ver_zopfli=1.0.3

View File

@@ -266,6 +266,14 @@ necho() {
cp -p "$f2" "$f1" cp -p "$f2" "$f1"
); done ); done
# resolve symlinks on windows
[ "$OSTYPE" = msys ] &&
(cd ..; git ls-files -s | awk '/^120000/{print$4}') |
while IFS= read -r x; do
[ $(wc -l <"$x") -gt 1 ] && continue
(cd "${x%/*}"; cp -p "../$(cat "${x##*/}")" ${x##*/})
done
# insert asynchat # insert asynchat
mkdir copyparty/vend mkdir copyparty/vend
for n in asyncore.py asynchat.py; do for n in asyncore.py asynchat.py; do

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
set -e set -e
for f in README.md docs/devnotes.md; do for f in README.md docs/devnotes.md docs/versus.md; do
cat $f | awk ' cat $f | awk '
function pr() { function pr() {
@@ -20,6 +20,8 @@ cat $f | awk '
/^#/{ /^#/{
lv=length($1); lv=length($1);
sub(/[^ ]+ /,""); sub(/[^ ]+ /,"");
sub(/\[/,"");
sub(/\]\([^)]+\)/,"");
bab=$0; bab=$0;
gsub(/ /,"-",bab); gsub(/ /,"-",bab);
gsub(/\./,"",bab); gsub(/\./,"",bab);
@@ -31,9 +33,9 @@ cat $f | awk '
{pr()} {pr()}
' > toc ' > toc
grep -E '^#+ [^ ]+ toc$' -B1000 -A2 <$f >p1 grep -E '^#+ *[^ ]+ toc$' -B1000 -A2 <$f >p1
h2="$(awk '/^#+ [^ ]+ toc$/{o=1;next} o&&/^#/{print;exit}' <$f)" h2="$(awk '/^#+ *[^ ]+ toc$/{o=1;next} o&&/^#/{print;exit}' <$f)"
grep -F "$h2" -B2 -A999999 <$f >p2 grep -F "$h2" -B2 -A999999 <$f >p2

View File

@@ -54,6 +54,8 @@ the values in the `ex:` columns are linkified to `example.com/$value`
and the table can be sorted by clicking the headers and the table can be sorted by clicking the headers
the sandbox also makes `location` unavailable but there is `loc` instead; this website's url is <big><big><b id="whereami">foo</b></big></big>
the difference is that with `copyparty_pre` you'll probably break various copyparty features but if you use `copyparty_post` then future copyparty versions will probably break you the difference is that with `copyparty_pre` you'll probably break various copyparty features but if you use `copyparty_post` then future copyparty versions will probably break you
@@ -136,6 +138,10 @@ render(dom) {
} }
}, },
render2(dom) { render2(dom) {
// loc == window.location except available inside sandbox
ebi('whereami').innerHTML = loc.href;
// this one also works because util.js gets pulled into the sandbox
window.makeSortable(dom.getElementsByTagName('table')[0]); window.makeSortable(dom.getElementsByTagName('table')[0]);
} }
``` ```

View File

@@ -98,7 +98,7 @@ class Cfg(Namespace):
def __init__(self, a=None, v=None, c=None): def __init__(self, a=None, v=None, c=None):
ka = {} ka = {}
ex = "daw dav_inf dav_mac e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vu e2vp ed emp force_js ihead magic nid nih no_acode no_athumb no_dav no_del no_dupe no_logues no_mv no_readme no_robots no_scandir no_thumb no_vthumb no_zip nw xdev xlink xvol" ex = "daw dav_inf dav_mac e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vu e2vp ed emp force_js getmod hardlink ihead magic never_symlink nid nih no_acode no_athumb no_dav no_dedup no_del no_dupe no_logues no_mv no_readme no_robots no_sb_md no_sb_lg no_scandir no_thumb no_vthumb no_zip nw xdev xlink xvol"
ka.update(**{k: False for k in ex.split()}) ka.update(**{k: False for k in ex.split()})
ex = "dotpart no_rescan no_sendfile no_voldump plain_ip" ex = "dotpart no_rescan no_sendfile no_voldump plain_ip"
@@ -110,9 +110,12 @@ class Cfg(Namespace):
ex = "df loris re_maxage rproxy rsp_slp s_wr_slp theme themes turbo" ex = "df loris re_maxage rproxy rsp_slp s_wr_slp theme themes turbo"
ka.update(**{k: 0 for k in ex.split()}) ka.update(**{k: 0 for k in ex.split()})
ex = "doctitle favico html_head log_fk mth textfiles R RS SR" ex = "doctitle favico html_head lg_sbf log_fk md_sbf mth textfiles R RS SR"
ka.update(**{k: "" for k in ex.split()}) ka.update(**{k: "" for k in ex.split()})
ex = "xad xar xau xbd xbr xbu xm"
ka.update(**{k: [] for k in ex.split()})
super(Cfg, self).__init__( super(Cfg, self).__init__(
a=a or [], a=a or [],
v=v or [], v=v or [],
@@ -193,4 +196,5 @@ class VHttpConn(object):
self.nbyte = 0 self.nbyte = 0
self.ico = None self.ico = None
self.thumbcli = None self.thumbcli = None
self.freshen_pwd = 0.0
self.t0 = time.time() self.t0 = time.time()