Compare commits

..

137 Commits

Author SHA1 Message Date
ed
dc2e2cbd4b v1.6.5 2023-02-12 14:11:45 +00:00
ed
5c12dac30f most ffmpeg builds dont support compressed modules 2023-02-12 14:02:43 +00:00
ed
641929191e fix reading smb shares on windows 2023-02-12 13:59:34 +00:00
ed
617321631a docker: add annotations 2023-02-11 21:10:28 +00:00
ed
ddc0c899f8 update archpkg to 1.6.4 2023-02-11 21:01:45 +00:00
ed
cdec42c1ae v1.6.4 2023-02-11 18:02:05 +00:00
ed
c48f469e39 park all clients waiting for a transcode 2023-02-11 17:23:29 +00:00
ed
44909cc7b8 print ffmpeg download url on windows 2023-02-11 17:22:24 +00:00
ed
8f61e1568c transcode chiptunes to opus;
* new audio/MPT formats: apac bonk dfpwm ilbc it itgz itr itz mo3 mod mptm mt2 okt s3gz s3m s3r s3z xm xmgz xmr xmz xpk
* new image/PIL formats: blp dcx emf eps fits flc fli fpx im j2k j2p psd spi wmf
2023-02-11 11:17:37 +00:00
ed
b7be7a0fd8 mirror docker images to ghcr 2023-02-10 23:40:30 +00:00
ed
1526a4e084 add docker packaging 2023-02-10 23:02:01 +00:00
ed
dbdb9574b1 doc-browser: fix md scaling + download hotkey 2023-02-10 21:33:48 +00:00
ed
853ae6386c config load summary + safer windows defaults 2023-02-10 21:32:42 +00:00
ed
a4b56c74c7 support long filepaths on win7 + misc windows fixes 2023-02-10 18:37:37 +00:00
ed
d7f1951e44 fix --cgen for 'g' perms 2023-02-08 22:38:21 +00:00
ed
7e2ff9825e ensure -e2tsr takes effect by ignoring dhash 2023-02-08 22:33:02 +00:00
ed
9b423396ec better description for anonymous permissions 2023-02-07 20:12:45 +00:00
ed
781146b2fb describe all database volflags in --help-flags 2023-02-07 20:07:06 +00:00
ed
84937d1ce0 add v2 config syntax (#20) 2023-02-07 19:54:08 +00:00
ed
98cce66aa4 cgen: update set of multivalue keys 2023-02-06 07:26:23 +00:00
ed
043c2d4858 cgen: fix permissions listing 2023-02-06 07:23:35 +00:00
ed
99cc434779 add config explainer + generator (#20) 2023-02-05 22:09:17 +00:00
ed
5095d17e81 more interesting config example 2023-02-05 21:32:20 +00:00
ed
87d835ae37 dont allow multiple volumes at the same fs-path 2023-02-05 21:16:36 +00:00
ed
6939ca768b pkg/arch: add prisonparty 2023-02-05 00:07:04 +00:00
ed
e3957e8239 systemd: prisonparty improvements 2023-02-05 00:03:40 +00:00
ed
4ad6e45216 only load *.conf files when including a folder 2023-02-05 00:01:10 +00:00
ed
76e5eeea3f prisonparty: fix reload signal 2023-02-05 00:00:18 +00:00
ed
eb17f57761 pypi fixes 2023-02-04 17:35:20 +00:00
ed
b0db14d8b0 indicate forced-randomized filenames 2023-02-04 15:18:09 +00:00
ed
2b644fa81b don't alias randomized filenames 2023-02-04 13:41:43 +00:00
ed
190ccee820 add optional version number on controlpanel 2023-02-04 13:41:34 +00:00
JeremyStarTM
4e7dd32e78 Added "wow this is better than nextcloud" (#19)
* Added "wow this is better than nextcloud"
2023-02-04 13:00:16 +00:00
john smith
5817fb66ae goddamn tabs 2023-02-03 12:50:17 +01:00
john smith
9cb04eef93 misc PKGBUILD fixes 2023-02-03 12:50:17 +01:00
john smith
0019fe7f04 indent PKGBUILD with spaces instead of tabs 2023-02-03 12:50:17 +01:00
john smith
852c6f2de1 remove unnecessary dependencies from PKGBUILD 2023-02-03 12:50:17 +01:00
john smith
c4191de2e7 improve PKGBUILD based on stuff in https://github.com/9001/copyparty/issues/17 2023-02-03 12:50:17 +01:00
ed
4de61defc9 add a link exporter to the unpost ui too 2023-02-02 22:57:59 +00:00
ed
0aa88590d0 should generalize this somehow 2023-02-02 22:35:13 +00:00
ed
405f3ee5fe adjustable toast position 2023-02-02 22:28:31 +00:00
ed
bc339f774a button to show/copy links for all recent uploads 2023-02-02 22:27:53 +00:00
ed
e67b695b23 show filekeys in recent-uploads ui 2023-02-02 21:22:51 +00:00
ed
4a7633ab99 fix outdated docs mentioned in #17 sry 2023-02-02 20:12:32 +00:00
john smith
c58f2ef61f fix PKGBUILD more 2023-02-02 20:48:20 +01:00
john smith
3866e6a3f2 fix PKGBUILD indentation 2023-02-02 20:30:48 +01:00
john smith
381686fc66 add PKGBUILD 2023-02-02 20:30:48 +01:00
ed
a918c285bf up2k-ui: button to randomize upload filenames 2023-02-01 22:26:18 +00:00
ed
1e20eafbe0 volflag to randomize all upload filenames 2023-02-01 21:58:01 +00:00
ed
39399934ee v1.6.3 2023-01-31 21:03:43 +00:00
ed
b47635150a shove #files aside while prologue sandbox is loading 2023-01-31 21:02:58 +00:00
ed
78d2f69ed5 prisonparty: support opus transcoding on debian
libblas.so and liblapack.so are symlinks into /etc/alternatives
2023-01-31 20:50:59 +00:00
ed
7a98dc669e block alerts in sandbox by default + add translation 2023-01-31 19:16:28 +00:00
ed
2f15bb5085 include filesize in notification 2023-01-31 19:03:13 +00:00
ed
712a578e6c indicate when a readme/logue was hidden 2023-01-31 19:01:24 +00:00
ed
d8dfc4ccb2 support davfs2 LOCK (uploads) + misc windows support + logue filtering 2023-01-31 18:53:38 +00:00
ed
e413007eb0 hide dotfiles from search results by default 2023-01-31 18:13:33 +00:00
ed
6d1d3e48d8 sandbox height didnt account for scrollbars 2023-01-31 17:54:04 +00:00
ed
04966164ce more iframe-resize-concealing tricks 2023-01-31 17:43:21 +00:00
ed
8b62aa7cc7 unlink files before replacing them
to avoid hardlink-related surprises
2023-01-31 17:17:18 +00:00
ed
1088e8c6a5 optimize 2023-01-30 22:53:27 +00:00
ed
8c54c2226f cover up most of the layout jank 2023-01-30 22:52:16 +00:00
ed
f74ac1f18b fix sandbox lag by helping the iframe cache js 2023-01-30 22:36:05 +00:00
ed
25931e62fd and nofollow the basic-browser link too 2023-01-29 22:15:22 +00:00
ed
707a940399 add nofollow to zip links 2023-01-29 22:10:03 +00:00
ed
87ef50d384 doc 2023-01-29 21:23:48 +00:00
ed
dcadf2b11c v1.6.2 2023-01-29 18:42:21 +00:00
ed
37a690a4c3 fix cookie + rproxy oversights 2023-01-29 18:34:48 +00:00
ed
87ad23fb93 docs + chmod 2023-01-29 18:28:53 +00:00
ed
5f54d534e3 hook/notify: add android support 2023-01-29 15:14:22 +00:00
ed
aecae552a4 v1.6.1 2023-01-29 04:41:16 +00:00
ed
eaa6b3d0be mute some startup noise 2023-01-29 04:33:28 +00:00
ed
c2ace91e52 v1.6.0 2023-01-29 02:55:44 +00:00
ed
0bac87c36f make loss of hotkeys more obvious 2023-01-29 01:40:02 +00:00
ed
e650d05939 shovel across most of the env too 2023-01-29 01:19:53 +00:00
ed
85a96e4446 add custom text selection colors because chrome is broken on fedora 2023-01-29 01:03:10 +00:00
ed
2569005139 support sandboxed markdown plugins 2023-01-29 00:57:08 +00:00
ed
c50cb66aef sandboxed other-origin iframes dont cache css 2023-01-28 23:40:25 +00:00
ed
d4c5fca15b sandbox readme.md / prologue / epilogue 2023-01-28 21:24:40 +00:00
ed
75cea4f684 misc 2023-01-28 13:35:49 +00:00
ed
68c6794d33 rewrite other symlinks after the actual move;
fixes volumes where symlinking is disabled
2023-01-28 01:14:29 +00:00
ed
82f98dd54d delete/move is now POST 2023-01-28 01:02:50 +00:00
ed
741d781c18 add cors controls + improve preflight + pw header 2023-01-28 00:59:04 +00:00
ed
0be1e43451 mention mtp in the hooks readme 2023-01-28 00:07:50 +00:00
ed
5366bf22bb describe detected network changes 2023-01-27 23:56:54 +00:00
ed
bcd91b1809 add eventhook examples 2023-01-27 23:55:57 +00:00
ed
9bd5738e6f shorter fallback hostname 2023-01-27 22:19:25 +00:00
ed
bab4aa4c0a mkdir fix 2023-01-27 22:16:10 +00:00
ed
e965b9b9e2 mkdir missing volumes on startup 2023-01-27 21:52:28 +00:00
ed
31101427d3 support downloading blockdev contents 2023-01-27 21:09:57 +00:00
ed
a083dc36ba dont get confused by dangling symlinks at target 2023-01-27 20:27:00 +00:00
ed
9b7b9262aa promote dedup control to volflags 2023-01-25 21:46:15 +00:00
ed
660011fa6e md-editor: make hotkey ^e more global 2023-01-25 20:58:28 +00:00
ed
ead31b6823 add eventhook sanchecks 2023-01-25 20:51:02 +00:00
ed
4310580cd4 separate http/https logins (breaks ie4 / win3.11 login) 2023-01-24 21:23:57 +00:00
ed
b005acbfda enable text selection between breadcrumbs + update vs 2023-01-23 22:44:29 +00:00
ed
460709e6f3 upgrade wget downloader to use event hooks 2023-01-22 23:45:11 +00:00
ed
a8768d05a9 add comparison to similar software 2023-01-22 23:39:19 +00:00
ed
f8e3e87a52 add event hooks 2023-01-22 23:35:31 +00:00
ed
70f1642d0d allow tar/zip download of hidden folders 2023-01-21 20:56:44 +00:00
ed
3fc7561da4 macos 2023-01-21 10:36:31 +00:00
ed
9065226c3d oh great its in lts too 2023-01-21 10:19:04 +00:00
ed
b7e321fa47 cleanup 2023-01-19 22:26:49 +00:00
ed
664665b86b fix some location-rproxy bugs 2023-01-19 22:26:24 +00:00
ed
f4f362b7a4 add --freebind 2023-01-18 21:55:36 +00:00
ed
577d23f460 zeroconf: detect network change and reannounce 2023-01-18 21:27:27 +00:00
ed
504e168486 compensate avg.speed for single-chunk uploads 2023-01-18 19:53:19 +00:00
ed
f2f9640371 workaround firefox layout bug:
three-line toasts get a scrollbar even if it doesn't need one
and the width is not adjusted correctly when that happens
2023-01-18 19:45:04 +00:00
ed
ee46f832b1 u2cli: add option -ns for slow terminals 2023-01-17 23:29:51 +00:00
ed
b0e755d410 give curl colored (yet sortable) plaintext listings 2023-01-17 23:22:43 +00:00
ed
cfd24604d5 ux tweaks 2023-01-17 23:21:31 +00:00
ed
264894e595 add cursed usecases 2023-01-16 21:46:11 +00:00
ed
5bb9f56247 linux 6.1 fixed the 6.0 bugs; remove workarounds 2023-01-16 20:44:57 +00:00
ed
18942ed066 location-based rproxy fixes 2023-01-16 20:09:45 +00:00
ed
85321a6f31 stale tree is better than no tree 2023-01-15 20:54:03 +00:00
ed
baf641396d add optional powered-by footnode 2023-01-15 20:52:38 +00:00
ed
17c91e7014 override bogus mimetypes 2023-01-14 15:10:32 +00:00
ed
010770684d workaround another linux kernel bug 2023-01-14 08:16:15 +00:00
ed
b4c503657b ignore loss of stdout 2023-01-14 07:35:44 +00:00
ed
71bd306268 fix unpost filters with slashes 2023-01-13 17:56:32 +00:00
ed
dd7fab1352 u2cli: properly retry failed handshakes 2023-01-13 07:17:41 +00:00
ed
dacca18863 v1.5.6 2023-01-12 05:15:30 +00:00
ed
53d92cc0a6 faster upload of small files on high-latency nets 2023-01-12 02:53:22 +00:00
ed
434823f6f0 ui: allow changing num.threads in search-only 2023-01-11 16:14:02 +00:00
ed
2cb1f50370 fix dualstack on lo 2023-01-11 16:10:07 +00:00
ed
03f53f6392 gallery: fix js error on digit-keypress viewing pics 2023-01-11 16:08:15 +00:00
ed
a70ecd7af0 v1.5.5 2022-12-30 07:54:34 +00:00
ed
8b81e58205 mdns fixes 2022-12-30 07:47:53 +00:00
ed
4500c04edf v1.5.4 2022-12-29 04:44:15 +00:00
ed
6222ddd720 fix ssdp on dualstack 2022-12-22 16:50:46 +00:00
ed
8a7135cf41 support fat32 time precision, avoiding rescans
posted from warzaw airport otw to japan
2022-12-20 22:19:32 +01:00
ed
b4c7282956 password from file 2022-12-20 13:28:48 +00:00
ed
8491a40a04 Create SECURITY.md 2022-12-19 21:18:27 +00:00
ed
343d38b693 extend image-viewer with modern formats 2022-12-15 22:38:33 +00:00
ed
6cf53d7364 try next thumbnailer if one fails;
libvips assumes imagemagick was built with avif
2022-12-15 22:34:51 +00:00
ed
b070d44de7 libvips logging + raise codec errors 2022-12-15 22:22:04 +00:00
ed
79aa40fdea cosmetic fixes 2022-12-14 23:12:51 +00:00
87 changed files with 4283 additions and 800 deletions

3
.gitignore vendored
View File

@@ -25,6 +25,9 @@ copyparty.egg-info/
copyparty/res/COPYING.txt copyparty/res/COPYING.txt
copyparty/web/deps/ copyparty/web/deps/
srv/ srv/
scripts/docker/i/
contrib/package/arch/pkg/
contrib/package/arch/src/
# state/logs # state/logs
up.*.txt up.*.txt

1
.vscode/launch.json vendored
View File

@@ -8,6 +8,7 @@
"module": "copyparty", "module": "copyparty",
"console": "integratedTerminal", "console": "integratedTerminal",
"cwd": "${workspaceFolder}", "cwd": "${workspaceFolder}",
"justMyCode": false,
"args": [ "args": [
//"-nw", //"-nw",
"-ed", "-ed",

View File

@@ -52,9 +52,11 @@
"--disable=missing-module-docstring", "--disable=missing-module-docstring",
"--disable=missing-class-docstring", "--disable=missing-class-docstring",
"--disable=missing-function-docstring", "--disable=missing-function-docstring",
"--disable=import-outside-toplevel",
"--disable=wrong-import-position", "--disable=wrong-import-position",
"--disable=raise-missing-from", "--disable=raise-missing-from",
"--disable=bare-except", "--disable=bare-except",
"--disable=broad-except",
"--disable=invalid-name", "--disable=invalid-name",
"--disable=line-too-long", "--disable=line-too-long",
"--disable=consider-using-f-string" "--disable=consider-using-f-string"
@@ -64,6 +66,7 @@
"editor.formatOnSave": true, "editor.formatOnSave": true,
"[html]": { "[html]": {
"editor.formatOnSave": false, "editor.formatOnSave": false,
"editor.autoIndent": "keep",
}, },
"[css]": { "[css]": {
"editor.formatOnSave": false, "editor.formatOnSave": false,

105
README.md
View File

@@ -1,6 +1,6 @@
# ⇆🎉 copyparty # ⇆🎉 copyparty
* http file sharing hub (py2/py3) [(on PyPI)](https://pypi.org/project/copyparty/) * portable file sharing hub (py2/py3) [(on PyPI)](https://pypi.org/project/copyparty/)
* MIT-Licensed, 2019-05-26, ed @ irc.rizon.net * MIT-Licensed, 2019-05-26, ed @ irc.rizon.net
@@ -57,7 +57,7 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [other tricks](#other-tricks) * [other tricks](#other-tricks)
* [searching](#searching) - search by size, date, path/name, mp3-tags, ... * [searching](#searching) - search by size, date, path/name, mp3-tags, ...
* [server config](#server-config) - using arguments or config files, or a mix of both * [server config](#server-config) - using arguments or config files, or a mix of both
* [zeroconf](#zeroconf) - announce enabled services on the LAN * [zeroconf](#zeroconf) - announce enabled services on the LAN ([pic](https://user-images.githubusercontent.com/241032/215344737-0eae8d98-9496-4256-9aa8-cd2f6971810d.png))
* [mdns](#mdns) - LAN domain-name and feature announcer * [mdns](#mdns) - LAN domain-name and feature announcer
* [ssdp](#ssdp) - windows-explorer announcer * [ssdp](#ssdp) - windows-explorer announcer
* [qr-code](#qr-code) - print a qr-code [(screenshot)](https://user-images.githubusercontent.com/241032/194728533-6f00849b-c6ac-43c6-9359-83e454d11e00.png) for quick access * [qr-code](#qr-code) - print a qr-code [(screenshot)](https://user-images.githubusercontent.com/241032/194728533-6f00849b-c6ac-43c6-9359-83e454d11e00.png) for quick access
@@ -75,7 +75,8 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else * [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else
* [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload * [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload
* [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags * [file parser plugins](#file-parser-plugins) - provide custom parsers to index additional tags
* [upload events](#upload-events) - trigger a script/program on each upload * [event hooks](#event-hooks) - trigger a program on uploads, renames etc ([examples](./bin/hooks/))
* [upload events](#upload-events) - the older, more powerful approach ([examples](./bin/mtag/))
* [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed * [hiding from google](#hiding-from-google) - tell search engines you dont wanna be indexed
* [themes](#themes) * [themes](#themes)
* [complete examples](#complete-examples) * [complete examples](#complete-examples)
@@ -87,6 +88,7 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [client-side](#client-side) - when uploading files * [client-side](#client-side) - when uploading files
* [security](#security) - some notes on hardening * [security](#security) - some notes on hardening
* [gotchas](#gotchas) - behavior that might be unexpected * [gotchas](#gotchas) - behavior that might be unexpected
* [cors](#cors) - cross-site request config
* [recovering from crashes](#recovering-from-crashes) * [recovering from crashes](#recovering-from-crashes)
* [client crashes](#client-crashes) * [client crashes](#client-crashes)
* [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads * [frefox wsod](#frefox-wsod) - firefox 87 can crash during uploads
@@ -106,7 +108,9 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set! download **[copyparty-sfx.py](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py)** and you're all set!
if you cannot install python, you can use [copyparty.exe](#copypartyexe) instead * or install through pypi (python3 only): `python3 -m pip install --user -U copyparty`
* or if you cannot install python, you can use [copyparty.exe](#copypartyexe) instead
* or if you prefer to [use docker](./scripts/docker/) 🐋 you can do that too
running the sfx without arguments (for example doubleclicking it on Windows) will give everyone read/write access to the current folder; you may want [accounts and volumes](#accounts-and-volumes) running the sfx without arguments (for example doubleclicking it on Windows) will give everyone read/write access to the current folder; you may want [accounts and volumes](#accounts-and-volumes)
@@ -163,6 +167,7 @@ recommended additional steps on debian which enable audio metadata and thumbnai
* upload * upload
* ☑ basic: plain multipart, ie6 support * ☑ basic: plain multipart, ie6 support
* ☑ [up2k](#uploading): js, resumable, multithreaded * ☑ [up2k](#uploading): js, resumable, multithreaded
* unaffected by cloudflare's max-upload-size (100 MiB)
* ☑ stash: simple PUT filedropper * ☑ stash: simple PUT filedropper
* ☑ [unpost](#unpost): undo/delete accidental uploads * ☑ [unpost](#unpost): undo/delete accidental uploads
* ☑ [self-destruct](#self-destruct) (specified server-side or client-side) * ☑ [self-destruct](#self-destruct) (specified server-side or client-side)
@@ -174,7 +179,7 @@ recommended additional steps on debian which enable audio metadata and thumbnai
* browser * browser
* ☑ [navpane](#navpane) (directory tree sidebar) * ☑ [navpane](#navpane) (directory tree sidebar)
* ☑ file manager (cut/paste, delete, [batch-rename](#batch-rename)) * ☑ file manager (cut/paste, delete, [batch-rename](#batch-rename))
* ☑ audio player (with OS media controls and opus transcoding) * ☑ audio player (with [OS media controls](https://user-images.githubusercontent.com/241032/215347492-b4250797-6c90-4e09-9a4c-721edf2fb15c.png) and opus transcoding)
* ☑ image gallery with webm player * ☑ image gallery with webm player
* ☑ textfile browser with syntax hilighting * ☑ textfile browser with syntax hilighting
* ☑ [thumbnails](#thumbnails) * ☑ [thumbnails](#thumbnails)
@@ -196,7 +201,7 @@ recommended additional steps on debian which enable audio metadata and thumbnai
small collection of user feedback small collection of user feedback
`good enough`, `surprisingly correct`, `certified good software`, `just works`, `why` `good enough`, `surprisingly correct`, `certified good software`, `just works`, `why`, `wow this is better than nextcloud`
# motivations # motivations
@@ -205,8 +210,7 @@ project goals / philosophy
* inverse linux philosophy -- do all the things, and do an *okay* job * inverse linux philosophy -- do all the things, and do an *okay* job
* quick drop-in service to get a lot of features in a pinch * quick drop-in service to get a lot of features in a pinch
* there are probably [better alternatives](https://github.com/awesome-selfhosted/awesome-selfhosted) if you have specific/long-term needs * some of [the alternatives](./docs/versus.md) might be a better fit for you
* but the resumable multithreaded uploads are p slick ngl
* run anywhere, support everything * run anywhere, support everything
* as many web-browsers and python versions as possible * as many web-browsers and python versions as possible
* every browser should at least be able to browse, download, upload files * every browser should at least be able to browse, download, upload files
@@ -230,7 +234,7 @@ browser-specific:
* Android-Chrome: increase "parallel uploads" for higher speed (android bug) * Android-Chrome: increase "parallel uploads" for higher speed (android bug)
* Android-Firefox: takes a while to select files (their fix for ☝️) * Android-Firefox: takes a while to select files (their fix for ☝️)
* Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now* * Desktop-Firefox: ~~may use gigabytes of RAM if your files are massive~~ *seems to be OK now*
* Desktop-Firefox: may stop you from deleting files you've uploaded until you visit `about:memory` and click `Minimize memory usage` * Desktop-Firefox: [may stop you from unplugging USB flashdrives](https://bugzilla.mozilla.org/show_bug.cgi?id=1792598) until you visit `about:memory` and click `Minimize memory usage`
server-os-specific: server-os-specific:
* RHEL8 / Rocky8: you can run copyparty using `/usr/libexec/platform-python` * RHEL8 / Rocky8: you can run copyparty using `/usr/libexec/platform-python`
@@ -248,23 +252,15 @@ server-os-specific:
* Windows: if the `up2k.db` (filesystem index) is on a samba-share or network disk, you'll get unpredictable behavior if the share is disconnected for a bit * Windows: if the `up2k.db` (filesystem index) is on a samba-share or network disk, you'll get unpredictable behavior if the share is disconnected for a bit
* use `--hist` or the `hist` volflag (`-v [...]:c,hist=/tmp/foo`) to place the db on a local disk instead * use `--hist` or the `hist` volflag (`-v [...]:c,hist=/tmp/foo`) to place the db on a local disk instead
* all volumes must exist / be available on startup; up2k (mtp especially) gets funky otherwise * all volumes must exist / be available on startup; up2k (mtp especially) gets funky otherwise
* [the database can get stuck](https://github.com/9001/copyparty/issues/10)
* has only happened once but that is once too many
* luckily not dangerous for file integrity and doesn't really stop uploads or anything like that
* but would really appreciate some logs if anyone ever runs into it again
* probably more, pls let me know * probably more, pls let me know
## not my bugs ## not my bugs
* [Chrome issue 1317069](https://bugs.chromium.org/p/chromium/issues/detail?id=1317069) -- if you try to upload a folder which contains symlinks by dragging it into the browser, the symlinked files will not get uploaded * [Chrome issue 1317069](https://bugs.chromium.org/p/chromium/issues/detail?id=1317069) -- if you try to upload a folder which contains symlinks by dragging it into the browser, the symlinked files will not get uploaded
* [Chrome issue 1354816](https://bugs.chromium.org/p/chromium/issues/detail?id=1354816) -- chrome may eat all RAM uploading over plaintext http with `mt` enabled * [Chrome issue 1352210](https://bugs.chromium.org/p/chromium/issues/detail?id=1352210) -- plaintext http may be faster at filehashing than https (but also extremely CPU-intensive)
* more amusingly, [Chrome issue 1354800](https://bugs.chromium.org/p/chromium/issues/detail?id=1354800) -- chrome may eat all RAM uploading in general (altho you probably won't run into this one) * [Firefox issue 1790500](https://bugzilla.mozilla.org/show_bug.cgi?id=1790500) -- entire browser can crash after uploading ~4000 small files
* [Chrome issue 1352210](https://bugs.chromium.org/p/chromium/issues/detail?id=1352210) -- plaintext http may be faster at filehashing than https (but also extremely CPU-intensive and likely to run into the above gc bugs)
* [Firefox issue 1790500](https://bugzilla.mozilla.org/show_bug.cgi?id=1790500) -- sometimes forgets to close filedescriptors during upload so the browser can crash after ~4000 files
* iPhones: the volume control doesn't work because [apple doesn't want it to](https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/Using_HTML5_Audio_Video/Device-SpecificConsiderations/Device-SpecificConsiderations.html#//apple_ref/doc/uid/TP40009523-CH5-SW11) * iPhones: the volume control doesn't work because [apple doesn't want it to](https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/Using_HTML5_Audio_Video/Device-SpecificConsiderations/Device-SpecificConsiderations.html#//apple_ref/doc/uid/TP40009523-CH5-SW11)
* *future workaround:* enable the equalizer, make it all-zero, and set a negative boost to reduce the volume * *future workaround:* enable the equalizer, make it all-zero, and set a negative boost to reduce the volume
@@ -287,6 +283,9 @@ server-os-specific:
upgrade notes upgrade notes
* `1.6.0` (2023-01-29):
* http-api: delete/move is now `POST` instead of `GET`
* everything other than `GET` and `HEAD` must pass [cors validation](#cors)
* `1.5.0` (2022-12-03): [new chunksize formula](https://github.com/9001/copyparty/commit/54e1c8d261df) for files larger than 128 GiB * `1.5.0` (2022-12-03): [new chunksize formula](https://github.com/9001/copyparty/commit/54e1c8d261df) for files larger than 128 GiB
* **users:** upgrade to the latest [cli uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) if you use that * **users:** upgrade to the latest [cli uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) if you use that
* **devs:** update third-party up2k clients (if those even exist) * **devs:** update third-party up2k clients (if those even exist)
@@ -301,13 +300,14 @@ upgrade notes
* you can also do this with linux filesystem permissions; `chmod 111 music` will make it possible to access files and folders inside the `music` folder but not list the immediate contents -- also works with other software, not just copyparty * you can also do this with linux filesystem permissions; `chmod 111 music` will make it possible to access files and folders inside the `music` folder but not list the immediate contents -- also works with other software, not just copyparty
* can I make copyparty download a file to my server if I give it a URL? * can I make copyparty download a file to my server if I give it a URL?
* not really, but there is a [terrible hack](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/wget.py) which makes it possible * yes, using [hooks](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py)
# accounts and volumes # accounts and volumes
per-folder, per-user permissions - if your setup is getting complex, consider making a [config file](./docs/example.conf) instead of using arguments per-folder, per-user permissions - if your setup is getting complex, consider making a [config file](./docs/example.conf) instead of using arguments
* much easier to manage, and you can modify the config at runtime with `systemctl reload copyparty` or more conveniently using the `[reload cfg]` button in the control-panel (if logged in as admin) * much easier to manage, and you can modify the config at runtime with `systemctl reload copyparty` or more conveniently using the `[reload cfg]` button in the control-panel (if logged in as admin)
* changes to the `[global]` config section requires a restart to take effect
a quick summary can be seen using `--help-accounts` a quick summary can be seen using `--help-accounts`
@@ -692,11 +692,12 @@ using arguments or config files, or a mix of both:
* config files (`-c some.conf`) can set additional commandline arguments; see [./docs/example.conf](docs/example.conf) and [./docs/example2.conf](docs/example2.conf) * config files (`-c some.conf`) can set additional commandline arguments; see [./docs/example.conf](docs/example.conf) and [./docs/example2.conf](docs/example2.conf)
* `kill -s USR1` (same as `systemctl reload copyparty`) to reload accounts and volumes from config files without restarting * `kill -s USR1` (same as `systemctl reload copyparty`) to reload accounts and volumes from config files without restarting
* or click the `[reload cfg]` button in the control-panel when logged in as admin * or click the `[reload cfg]` button in the control-panel when logged in as admin
* changes to the `[global]` config section requires a restart to take effect
## zeroconf ## zeroconf
announce enabled services on the LAN if you specify the `-z` option, which enables [mdns](#mdns) and [ssdp](#ssdp) announce enabled services on the LAN ([pic](https://user-images.githubusercontent.com/241032/215344737-0eae8d98-9496-4256-9aa8-cd2f6971810d.png)) -- `-z` enables both [mdns](#mdns) and [ssdp](#ssdp)
* `--z-on` / `--z-off`' limits the feature to certain networks * `--z-on` / `--z-off`' limits the feature to certain networks
@@ -802,7 +803,7 @@ some **BIG WARNINGS** specific to SMB/CIFS, in decreasing importance:
and some minor issues, and some minor issues,
* clients only see the first ~400 files in big folders; [impacket#1433](https://github.com/SecureAuthCorp/impacket/issues/1433) * clients only see the first ~400 files in big folders; [impacket#1433](https://github.com/SecureAuthCorp/impacket/issues/1433)
* hot-reload of server config (`/?reload=cfg`) only works for volumes, not account passwords * hot-reload of server config (`/?reload=cfg`) does not include the `[global]` section (commandline args)
* listens on the first IPv4 `-i` interface only (default = :: = 0.0.0.0 = all) * listens on the first IPv4 `-i` interface only (default = :: = 0.0.0.0 = all)
* login doesn't work on winxp, but anonymous access is ok -- remove all accounts from copyparty config for that to work * login doesn't work on winxp, but anonymous access is ok -- remove all accounts from copyparty config for that to work
* win10 onwards does not allow connecting anonymously / without accounts * win10 onwards does not allow connecting anonymously / without accounts
@@ -932,6 +933,8 @@ some examples,
## other flags ## other flags
* `:c,magic` enables filetype detection for nameless uploads, same as `--magic` * `:c,magic` enables filetype detection for nameless uploads, same as `--magic`
* needs https://pypi.org/project/python-magic/ `python3 -m pip install --user -U python-magic`
* on windows grab this instead `python3 -m pip install --user -U python-magic-bin`
## database location ## database location
@@ -1000,9 +1003,18 @@ copyparty can invoke external programs to collect additional metadata for files
if something doesn't work, try `--mtag-v` for verbose error messages if something doesn't work, try `--mtag-v` for verbose error messages
## upload events ## event hooks
trigger a script/program on each upload like so: trigger a program on uploads, renames etc ([examples](./bin/hooks/))
you can set hooks before and/or after an event happens, and currently you can hook uploads, moves/renames, and deletes
there's a bunch of flags and stuff, see `--help-hooks`
### upload events
the older, more powerful approach ([examples](./bin/mtag/)):
``` ```
-v /mnt/inc:inc:w:c,mte=+x1:c,mtp=x1=ad,kn,/usr/bin/notify-send -v /mnt/inc:inc:w:c,mte=+x1:c,mtp=x1=ad,kn,/usr/bin/notify-send
@@ -1012,11 +1024,12 @@ so filesystem location `/mnt/inc` shared at `/inc`, write-only for everyone, app
that'll run the command `notify-send` with the path to the uploaded file as the first and only argument (so on linux it'll show a notification on-screen) that'll run the command `notify-send` with the path to the uploaded file as the first and only argument (so on linux it'll show a notification on-screen)
note that it will only trigger on new unique files, not dupes note that this is way more complicated than the new [event hooks](#event-hooks) but this approach has the following advantages:
* non-blocking and multithreaded; doesn't hold other uploads back
* you get access to tags from FFmpeg and other mtp parsers
* only trigger on new unique files, not dupes
and it will occupy the parsing threads, so fork anything expensive (or set `kn` to have copyparty fork it for you) -- otoh if you want to intentionally queue/singlethread you can combine it with `--mtag-mt 1` note that it will occupy the parsing threads, so fork anything expensive (or set `kn` to have copyparty fork it for you) -- otoh if you want to intentionally queue/singlethread you can combine it with `--mtag-mt 1`
if this becomes popular maybe there should be a less janky way to do it actually
## hiding from google ## hiding from google
@@ -1142,11 +1155,11 @@ interact with copyparty using non-browser clients
* curl/wget: upload some files (post=file, chunk=stdin) * curl/wget: upload some files (post=file, chunk=stdin)
* `post(){ curl -F act=bput -F f=@"$1" http://127.0.0.1:3923/?pw=wark;}` * `post(){ curl -F act=bput -F f=@"$1" http://127.0.0.1:3923/?pw=wark;}`
`post movie.mkv` `post movie.mkv`
* `post(){ curl -b cppwd=wark -H rand:8 -T "$1" http://127.0.0.1:3923/;}` * `post(){ curl -H pw:wark -H rand:8 -T "$1" http://127.0.0.1:3923/;}`
`post movie.mkv` `post movie.mkv`
* `post(){ wget --header='Cookie: cppwd=wark' --post-file="$1" -O- http://127.0.0.1:3923/?raw;}` * `post(){ wget --header='pw: wark' --post-file="$1" -O- http://127.0.0.1:3923/?raw;}`
`post movie.mkv` `post movie.mkv`
* `chunk(){ curl -b cppwd=wark -T- http://127.0.0.1:3923/;}` * `chunk(){ curl -H pw:wark -T- http://127.0.0.1:3923/;}`
`chunk <movie.mkv` `chunk <movie.mkv`
* bash: when curl and wget is not available or too boring * bash: when curl and wget is not available or too boring
@@ -1170,7 +1183,7 @@ copyparty returns a truncated sha512sum of your PUT/POST as base64; you can gene
b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|tr '+/' '-_'|head -c44;} b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|tr '+/' '-_'|head -c44;}
b512 <movie.mkv b512 <movie.mkv
you can provide passwords using cookie `cppwd=hunter2`, as a url-param `?pw=hunter2`, or with basic-authentication (either as the username or password) you can provide passwords using header `PW: hunter2`, cookie `cppwd=hunter2`, url-param `?pw=hunter2`, or with basic-authentication (either as the username or password)
NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename NOTE: curl will not send the original filename if you use `-T` combined with url-params! Also, make sure to always leave a trailing slash in URLs unless you want to override the filename
@@ -1206,7 +1219,7 @@ below are some tweaks roughly ordered by usefulness:
* `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger) * `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger)
* `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example: * `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example:
* huge amount of short-lived connections * huge amount of short-lived connections
* really heavy traffic (downloads/uploads) * simultaneous downloads and uploads saturating a 20gbps connection
...however it adds an overhead to internal communication so it might be a net loss, see if it works 4 u ...however it adds an overhead to internal communication so it might be a net loss, see if it works 4 u
@@ -1231,6 +1244,11 @@ when uploading files,
some notes on hardening some notes on hardening
* set `--rproxy 0` if your copyparty is directly facing the internet (not through a reverse-proxy)
* cors doesn't work right otherwise
safety profiles:
* option `-s` is a shortcut to set the following options: * option `-s` is a shortcut to set the following options:
* `--no-thumb` disables thumbnails and audio transcoding to stop copyparty from running `FFmpeg`/`Pillow`/`VIPS` on uploaded files, which is a [good idea](https://www.cvedetails.com/vulnerability-list.php?vendor_id=3611) if anonymous upload is enabled * `--no-thumb` disables thumbnails and audio transcoding to stop copyparty from running `FFmpeg`/`Pillow`/`VIPS` on uploaded files, which is a [good idea](https://www.cvedetails.com/vulnerability-list.php?vendor_id=3611) if anonymous upload is enabled
* `--no-mtag-ff` uses `mutagen` to grab music tags instead of `FFmpeg`, which is safer and faster but less accurate * `--no-mtag-ff` uses `mutagen` to grab music tags instead of `FFmpeg`, which is safer and faster but less accurate
@@ -1238,7 +1256,6 @@ some notes on hardening
* `--no-robots` and `--force-js` makes life harder for crawlers, see [hiding from google](#hiding-from-google) * `--no-robots` and `--force-js` makes life harder for crawlers, see [hiding from google](#hiding-from-google)
* option `-ss` is a shortcut for the above plus: * option `-ss` is a shortcut for the above plus:
* `--no-logues` and `--no-readme` disables support for readme's and prologues / epilogues in directory listings, which otherwise lets people upload arbitrary `<script>` tags
* `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support * `--unpost 0`, `--no-del`, `--no-mv` disables all move/delete support
* `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance * `--hardlink` creates hardlinks instead of symlinks when deduplicating uploads, which is less maintenance
* however note if you edit one file it will also affect the other copies * however note if you edit one file it will also affect the other copies
@@ -1249,6 +1266,7 @@ some notes on hardening
* option `-sss` is a shortcut for the above plus: * option `-sss` is a shortcut for the above plus:
* `--no-dav` disables webdav support * `--no-dav` disables webdav support
* `--no-logues` and `--no-readme` disables support for readme's and prologues / epilogues in directory listings, which otherwise lets people upload arbitrary (but sandboxed) `<script>` tags
* `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk * `-lo cpp-%Y-%m%d-%H%M%S.txt.xz` enables logging to disk
* `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks * `-ls **,*,ln,p,r` does a scan on startup for any dangerous symlinks
@@ -1256,6 +1274,7 @@ other misc notes:
* you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files * you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files
* combine this with volflag `c,fk` to generate filekeys (per-file accesskeys); users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404 * combine this with volflag `c,fk` to generate filekeys (per-file accesskeys); users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404
* the default filekey entropy is fairly small so give `--fk-salt` around 30 characters if you want filekeys longer than 16 chars
* permissions `wG` lets users upload files and receive their own filekeys, still without being able to see other uploads * permissions `wG` lets users upload files and receive their own filekeys, still without being able to see other uploads
@@ -1264,6 +1283,22 @@ other misc notes:
behavior that might be unexpected behavior that might be unexpected
* users without read-access to a folder can still see the `.prologue.html` / `.epilogue.html` / `README.md` contents, for the purpose of showing a description on how to use the uploader for example * users without read-access to a folder can still see the `.prologue.html` / `.epilogue.html` / `README.md` contents, for the purpose of showing a description on how to use the uploader for example
* users can submit `<script>`s which autorun for other visitors in a few ways;
* uploading a `README.md` -- avoid with `--no-readme`
* renaming `some.html` to `.epilogue.html` -- avoid with either `--no-logues` or `--no-dot-ren`
* the directory-listing embed is sandboxed (so any malicious scripts can't do any damage) but the markdown editor is not
## cors
cross-site request config
by default, except for `GET` and `HEAD` operations, all requests must either:
* not contain an `Origin` header at all
* or have an `Origin` matching the server domain
* or the header `PW` with your password as value
cors can be configured with `--acao` and `--acam`, or the protections entirely disabled with `--allow-csrf`
# recovering from crashes # recovering from crashes
@@ -1316,7 +1351,7 @@ enable [thumbnails](#thumbnails) of...
* **AVIF pictures:** `pyvips` or `ffmpeg` or `pillow-avif-plugin` * **AVIF pictures:** `pyvips` or `ffmpeg` or `pillow-avif-plugin`
* **JPEG XL pictures:** `pyvips` or `ffmpeg` * **JPEG XL pictures:** `pyvips` or `ffmpeg`
enable [smb](#smb-server) support: enable [smb](#smb-server) support (**not** recommended):
* `impacket==0.10.0` * `impacket==0.10.0`
`pyvips` gives higher quality thumbnails than `Pillow` and is 320% faster, using 270% more ram: `sudo apt install libvips42 && python3 -m pip install --user -U pyvips` `pyvips` gives higher quality thumbnails than `Pillow` and is 320% faster, using 270% more ram: `sudo apt install libvips42 && python3 -m pip install --user -U pyvips`

9
SECURITY.md Normal file
View File

@@ -0,0 +1,9 @@
# Security Policy
if you hit something extra juicy pls let me know on either of the following
* email -- `copyparty@ocv.ze` except `ze` should be `me`
* [mastodon dm](https://layer8.space/@tripflag) -- `@tripflag@layer8.space`
* [github private vulnerability report](https://github.com/9001/copyparty/security/advisories/new), wow that form is complicated
* [twitter dm](https://twitter.com/tripflag) (if im somehow not banned yet)
no bug bounties sorry! all i can offer is greetz in the release notes

19
bin/hooks/README.md Normal file
View File

@@ -0,0 +1,19 @@
standalone programs which are executed by copyparty when an event happens (upload, file rename, delete, ...)
these programs either take zero arguments, or a filepath (the affected file), or a json message with filepath + additional info
> **note:** in addition to event hooks (the stuff described here), copyparty has another api to run your programs/scripts while providing way more information such as audio tags / video codecs / etc and optionally daisychaining data between scripts in a processing pipeline; if that's what you want then see [mtp plugins](../mtag/) instead
# after upload
* [notify.py](notify.py) shows a desktop notification ([example](https://user-images.githubusercontent.com/241032/215335767-9c91ed24-d36e-4b6b-9766-fb95d12d163f.png))
* [discord-announce.py](discord-announce.py) announces new uploads on discord using webhooks ([example](https://user-images.githubusercontent.com/241032/215304439-1c1cb3c8-ec6f-4c17-9f27-81f969b1811a.png))
* [reject-mimetype.py](reject-mimetype.py) rejects uploads unless the mimetype is acceptable
# before upload
* [reject-extension.py](reject-extension.py) rejects uploads if they match a list of file extensions
# on message
* [wget.py](wget.py) lets you download files by POSTing URLs to copyparty

61
bin/hooks/discord-announce.py Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
import sys
import json
import requests
from copyparty.util import humansize, quotep
_ = r"""
announces a new upload on discord
example usage as global config:
--xau f,t5,j,bin/hooks/discord-announce.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xau=f,t5,j,bin/hooks/discord-announce.py
parameters explained,
f = fork; don't wait for it to finish
t5 = timeout if it's still running after 5 sec
j = provide upload information as json; not just the filename
replace "xau" with "xbu" to announce Before upload starts instead of After completion
# how to discord:
first create the webhook url; https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
then use this to design your message: https://discohook.org/
"""
def main():
WEBHOOK = "https://discord.com/api/webhooks/1234/base64"
# read info from copyparty
inf = json.loads(sys.argv[1])
vpath = inf["vp"]
filename = vpath.split("/")[-1]
url = f"https://{inf['host']}/{quotep(vpath)}"
# compose the message to discord
j = {
"title": filename,
"url": url,
"description": url.rsplit("/", 1)[0],
"color": 0x449900,
"fields": [
{"name": "Size", "value": humansize(inf["sz"])},
{"name": "User", "value": inf["user"]},
{"name": "IP", "value": inf["ip"]},
],
}
for v in j["fields"]:
v["inline"] = True
r = requests.post(WEBHOOK, json={"embeds": [j]})
print(f"discord: {r}\n", end="")
if __name__ == "__main__":
main()

62
bin/hooks/notify.py Executable file
View File

@@ -0,0 +1,62 @@
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
from plyer import notification
_ = r"""
show os notification on upload; works on windows, linux, macos, android
depdencies:
windows: python3 -m pip install --user -U plyer
linux: python3 -m pip install --user -U plyer
macos: python3 -m pip install --user -U plyer pyobjus
android: just termux and termux-api
example usages; either as global config (all volumes) or as volflag:
--xau f,bin/hooks/notify.py
-v srv/inc:inc:c,xau=f,bin/hooks/notify.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^
parameters explained,
xau = execute after upload
f = fork so it doesn't block uploads
"""
try:
from copyparty.util import humansize
except:
def humansize(n):
return n
def main():
fp = sys.argv[1]
dp, fn = os.path.split(fp)
try:
sz = humansize(os.path.getsize(fp))
except:
sz = "?"
msg = "{} ({})\n📁 {}".format(fn, sz, dp)
title = "File received"
if "com.termux" in sys.executable:
sp.run(["termux-notification", "-t", title, "-c", msg])
return
icon = "emblem-documents-symbolic" if sys.platform == "linux" else ""
notification.notify(
title=title,
message=msg,
app_icon=icon,
timeout=10,
)
if __name__ == "__main__":
main()

30
bin/hooks/reject-extension.py Executable file
View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
import sys
_ = r"""
reject file uploads by file extension
example usage as global config:
--xbu c,bin/hooks/reject-extension.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xbu=c,bin/hooks/reject-extension.py
parameters explained,
xbu = execute before upload
c = check result, reject upload if error
"""
def main():
bad = "exe scr com pif bat ps1 jar msi"
ext = sys.argv[1].split(".")[-1]
sys.exit(1 if ext in bad.split() else 0)
if __name__ == "__main__":
main()

39
bin/hooks/reject-mimetype.py Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env python3
import sys
import magic
_ = r"""
reject file uploads by mimetype
dependencies (linux, macos):
python3 -m pip install --user -U python-magic
dependencies (windows):
python3 -m pip install --user -U python-magic-bin
example usage as global config:
--xau c,bin/hooks/reject-mimetype.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xau=c,bin/hooks/reject-mimetype.py
parameters explained,
xau = execute after upload
c = check result, reject upload if error
"""
def main():
ok = ["image/jpeg", "image/png"]
mt = magic.from_file(sys.argv[1], mime=True)
print(mt)
sys.exit(1 if mt not in ok else 0)
if __name__ == "__main__":
main()

54
bin/hooks/wget.py Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
import os
import sys
import json
import subprocess as sp
_ = r"""
use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the
message/pager function on the website)
example usage as global config:
--xm f,j,t3600,bin/hooks/wget.py
example usage as a volflag (per-volume config):
-v srv/inc:inc:c,xm=f,j,t3600,bin/hooks/wget.py
parameters explained,
f = fork so it doesn't block uploads
j = provide message information as json; not just the text
c3 = mute all output
t3600 = timeout and kill download after 1 hour
"""
def main():
inf = json.loads(sys.argv[1])
url = inf["txt"]
if "://" not in url:
url = "https://" + url
os.chdir(inf["ap"])
name = url.split("?")[0].split("/")[-1]
tfn = "-- DOWNLOADING " + name
print(f"{tfn}\n", end="")
open(tfn, "wb").close()
cmd = ["wget", "--trust-server-names", "-nv", "--", url]
try:
sp.check_call(cmd)
except:
t = "-- FAILED TO DONWLOAD " + name
print(f"{t}\n", end="")
open(t, "wb").close()
os.unlink(tfn)
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,9 @@
standalone programs which take an audio file as argument standalone programs which take an audio file as argument
you may want to forget about all this fancy complicated stuff and just use [event hooks](../hooks/) instead (which doesn't need `-e2ts` or ffmpeg)
----
**NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen` **NOTE:** these all require `-e2ts` to be functional, meaning you need to do at least one of these: `apt install ffmpeg` or `pip3 install mutagen`
some of these rely on libraries which are not MIT-compatible some of these rely on libraries which are not MIT-compatible
@@ -17,6 +21,7 @@ these do not have any problematic dependencies at all:
* [cksum.py](./cksum.py) computes various checksums * [cksum.py](./cksum.py) computes various checksums
* [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser) * [exe.py](./exe.py) grabs metadata from .exe and .dll files (example for retrieving multiple tags with one parser)
* [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty * [wget.py](./wget.py) lets you download files by POSTing URLs to copyparty
* also available as an [event hook](../hooks/wget.py)
# dependencies # dependencies

View File

@@ -61,7 +61,7 @@ def main():
os.chdir(cwd) os.chdir(cwd)
f1 = fsenc(fn) f1 = fsenc(fn)
f2 = os.path.join(b"noexif", f1) f2 = fsenc(os.path.join(b"noexif", fn))
cmd = [ cmd = [
b"exiftool", b"exiftool",
b"-exif:all=", b"-exif:all=",

View File

@@ -57,6 +57,7 @@ hash -r
command -v python3 && pybin=python3 || pybin=python command -v python3 && pybin=python3 || pybin=python
} }
$pybin -c 'import numpy' ||
$pybin -m pip install --user numpy $pybin -m pip install --user numpy

View File

@@ -1,6 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
DEPRECATED -- replaced by event hooks;
https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py
---
use copyparty as a file downloader by POSTing URLs as use copyparty as a file downloader by POSTing URLs as
application/x-www-form-urlencoded (for example using the application/x-www-form-urlencoded (for example using the
message/pager function on the website) message/pager function on the website)

View File

@@ -997,7 +997,7 @@ def main():
ap.add_argument( ap.add_argument(
"-cf", metavar="NUM_BLOCKS", type=int, default=nf, help="file cache" "-cf", metavar="NUM_BLOCKS", type=int, default=nf, help="file cache"
) )
ap.add_argument("-a", metavar="PASSWORD", help="password") ap.add_argument("-a", metavar="PASSWORD", help="password or $filepath")
ap.add_argument("-d", action="store_true", help="enable debug") ap.add_argument("-d", action="store_true", help="enable debug")
ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify") ap.add_argument("-te", metavar="PEM_FILE", help="certificate to expect/verify")
ap.add_argument("-td", action="store_true", help="disable certificate check") ap.add_argument("-td", action="store_true", help="disable certificate check")

View File

@@ -4,7 +4,7 @@ set -e
# runs copyparty (or any other program really) in a chroot # runs copyparty (or any other program really) in a chroot
# #
# assumption: these directories, and everything within, are owned by root # assumption: these directories, and everything within, are owned by root
sysdirs=( /bin /lib /lib32 /lib64 /sbin /usr ) sysdirs=( /bin /lib /lib32 /lib64 /sbin /usr /etc/alternatives )
# error-handler # error-handler
@@ -97,9 +97,11 @@ done
cln() { cln() {
rv=$? rv=$?
# cleanup if not in use wait -f -p rv $p || true
lsof "$jail" | grep -qF "$jail" && cd /
echo "chroot is in use, will not cleanup" || echo "stopping chroot..."
lsof "$jail" | grep -F "$jail" &&
echo "chroot is in use; will not unmount" ||
{ {
mount | grep -F " on $jail" | mount | grep -F " on $jail" |
awk '{sub(/ type .*/,"");sub(/.* on /,"");print}' | awk '{sub(/ type .*/,"");sub(/.* on /,"");print}' |
@@ -124,5 +126,6 @@ export LOGNAME="$USER"
#echo "cpp [$cpp]" #echo "cpp [$cpp]"
chroot --userspec=$uid:$gid "$jail" "$pybin" $pyarg "$cpp" "$@" & chroot --userspec=$uid:$gid "$jail" "$pybin" $pyarg "$cpp" "$@" &
p=$! p=$!
trap 'kill -USR1 $p' USR1
trap 'kill $p' INT TERM trap 'kill $p' INT TERM
wait wait

View File

@@ -3,7 +3,7 @@ from __future__ import print_function, unicode_literals
""" """
up2k.py: upload to copyparty up2k.py: upload to copyparty
2022-12-13, v1.1, ed <irc.rizon.net>, MIT-Licensed 2023-01-13, v1.2, ed <irc.rizon.net>, MIT-Licensed
https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py
- dependencies: requests - dependencies: requests
@@ -506,25 +506,31 @@ def handshake(ar, file, search):
url += quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8", "replace") url += quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8", "replace")
while True: while True:
sc = 600
txt = ""
try: try:
r = req_ses.post(url, headers=headers, json=req) r = req_ses.post(url, headers=headers, json=req)
break sc = r.status_code
txt = r.text
if sc < 400:
break
raise Exception("http {0}: {1}".format(sc, txt))
except Exception as ex: except Exception as ex:
em = str(ex).split("SSLError(")[-1] em = str(ex).split("SSLError(")[-1].split("\nURL: ")[0].strip()
if sc == 422 or "<pre>partial upload exists at a different" in txt:
file.recheck = True
return [], False
elif sc == 409 or "<pre>upload rejected, file already exists" in txt:
return [], False
elif "<pre>you don't have " in txt:
raise
eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em)) eprint("handshake failed, retrying: {0}\n {1}\n\n".format(file.name, em))
time.sleep(1) time.sleep(1)
sc = r.status_code
if sc >= 400:
txt = r.text
if sc == 422 or "<pre>partial upload exists at a different" in txt:
file.recheck = True
return [], False
elif sc == 409 or "<pre>upload rejected, file already exists" in txt:
return [], False
raise Exception("http {0}: {1}".format(sc, txt))
try: try:
r = r.json() r = r.json()
except: except:
@@ -701,7 +707,7 @@ class Ctl(object):
handshake(self.ar, file, search) handshake(self.ar, file, search)
def _fancy(self): def _fancy(self):
if VT100: if VT100 and not self.ar.ns:
atexit.register(self.cleanup_vt100) atexit.register(self.cleanup_vt100)
ss.scroll_region(3) ss.scroll_region(3)
@@ -725,7 +731,7 @@ class Ctl(object):
else: else:
idles = 0 idles = 0
if VT100: if VT100 and not self.ar.ns:
maxlen = ss.w - len(str(self.nfiles)) - 14 maxlen = ss.w - len(str(self.nfiles)) - 14
txt = "\033[s\033[{0}H".format(ss.g) txt = "\033[s\033[{0}H".format(ss.g)
for y, k, st, f in [ for y, k, st, f in [
@@ -765,7 +771,7 @@ class Ctl(object):
eta = str(datetime.timedelta(seconds=int(eta))) eta = str(datetime.timedelta(seconds=int(eta)))
sleft = humansize(self.nbytes - self.up_b) sleft = humansize(self.nbytes - self.up_b)
nleft = self.nfiles - self.up_f nleft = self.nfiles - self.up_f
tail = "\033[K\033[u" if VT100 else "\r" tail = "\033[K\033[u" if VT100 and not self.ar.ns else "\r"
t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft) t = "{0} eta @ {1}/s, {2}, {3}# left".format(eta, spd, sleft, nleft)
eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail)) eprint(txt + "\033]0;{0}\033\\\r{0}{1}".format(t, tail))
@@ -994,10 +1000,10 @@ source file/folder selection uses rsync syntax, meaning that:
ap.add_argument("url", type=unicode, help="server url, including destination folder") ap.add_argument("url", type=unicode, help="server url, including destination folder")
ap.add_argument("files", type=unicode, nargs="+", help="files and/or folders to process") ap.add_argument("files", type=unicode, nargs="+", help="files and/or folders to process")
ap.add_argument("-v", action="store_true", help="verbose") ap.add_argument("-v", action="store_true", help="verbose")
ap.add_argument("-a", metavar="PASSWORD", help="password") ap.add_argument("-a", metavar="PASSWORD", help="password or $filepath")
ap.add_argument("-s", action="store_true", help="file-search (disables upload)") ap.add_argument("-s", action="store_true", help="file-search (disables upload)")
ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible") ap.add_argument("--ok", action="store_true", help="continue even if some local files are inaccessible")
ap = app.add_argument_group("compatibility") ap = app.add_argument_group("compatibility")
ap.add_argument("--cls", action="store_true", help="clear screen before start") ap.add_argument("--cls", action="store_true", help="clear screen before start")
ap.add_argument("--ws", action="store_true", help="copyparty is running on windows; wait before deleting files after uploading") ap.add_argument("--ws", action="store_true", help="copyparty is running on windows; wait before deleting files after uploading")
@@ -1011,6 +1017,7 @@ source file/folder selection uses rsync syntax, meaning that:
ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections") ap.add_argument("-j", type=int, metavar="THREADS", default=4, help="parallel connections")
ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing") ap.add_argument("-J", type=int, metavar="THREADS", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading") ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
ap.add_argument("-ns", action="store_true", help="no status panel (for slow consoles)")
ap.add_argument("--safe", action="store_true", help="use simple fallback approach") ap.add_argument("--safe", action="store_true", help="use simple fallback approach")
ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)") ap.add_argument("-z", action="store_true", help="ZOOMIN' (skip uploading files if they exist at the destination with the ~same last-modified timestamp, so same as yolo / turbo with date-chk but even faster)")
@@ -1041,6 +1048,12 @@ source file/folder selection uses rsync syntax, meaning that:
if "://" not in ar.url: if "://" not in ar.url:
ar.url = "http://" + ar.url ar.url = "http://" + ar.url
if ar.a and ar.a.startswith("$"):
fn = ar.a[1:]
print("reading password from file [{}]".format(fn))
with open(fn, "rb") as f:
ar.a = f.read().decode("utf-8").strip()
if ar.cls: if ar.cls:
print("\x1b\x5b\x48\x1b\x5b\x32\x4a\x1b\x5b\x33\x4a", end="") print("\x1b\x5b\x48\x1b\x5b\x32\x4a\x1b\x5b\x33\x4a", end="")

View File

@@ -29,11 +29,11 @@ however if your copyparty is behind a reverse-proxy, you may want to use [`share
* disables thumbnails and folder-type detection in windows explorer * disables thumbnails and folder-type detection in windows explorer
* makes it way faster (especially for slow/networked locations (such as partyfuse)) * makes it way faster (especially for slow/networked locations (such as partyfuse))
### [`webdav-basicauth.reg`](webdav-basicauth.reg) ### [`webdav-cfg.reg`](webdav-cfg.bat)
* enables webdav basic-auth over plaintext http; takes effect after a reboot OR after running `webdav-unlimit.bat` * improves the native webdav support in windows;
* removes the 47.6 MiB filesize limit when downloading from webdav
### [`webdav-unlimit.bat`](webdav-unlimit.bat) * optionally enables webdav basic-auth over plaintext http
* removes the 47.6 MiB filesize limit when downloading from webdav * optionally helps disable wpad, removing the 10sec latency
### [`cfssl.sh`](cfssl.sh) ### [`cfssl.sh`](cfssl.sh)
* creates CA and server certificates using cfssl * creates CA and server certificates using cfssl

View File

@@ -14,5 +14,5 @@ name="$SVCNAME"
command_background=true command_background=true
pidfile="/var/run/$SVCNAME.pid" pidfile="/var/run/$SVCNAME.pid"
command="/usr/bin/python /usr/local/bin/copyparty-sfx.py" command="/usr/bin/python3 /usr/local/bin/copyparty-sfx.py"
command_args="-q -v /mnt::rw" command_args="-q -v /mnt::rw"

View File

@@ -0,0 +1,57 @@
# Maintainer: icxes <dev.null@need.moe>
pkgname=copyparty
pkgver="1.6.4"
pkgrel=1
pkgdesc="Portable file sharing hub"
arch=("any")
url="https://github.com/9001/${pkgname}"
license=('MIT')
depends=("python" "lsof")
optdepends=("ffmpeg: thumbnails for videos, images (slower) and audio, music tags"
"python-jinja: faster html generator"
"python-mutagen: music tags (alternative)"
"python-pillow: thumbnails for images"
"python-pyvips: thumbnails for images (higher quality, faster, uses more ram)"
"libkeyfinder-git: detection of musical keys"
"qm-vamp-plugins: BPM detection"
"python-pyopenssl: ftps functionality"
"python-impacket-git: smb support (bad idea)"
)
source=("${url}/releases/download/v${pkgver}/${pkgname}-sfx.py"
"${pkgname}.conf"
"${pkgname}.service"
"prisonparty.service"
"index.md"
"https://raw.githubusercontent.com/9001/${pkgname}/v${pkgver}/bin/prisonparty.sh"
"https://raw.githubusercontent.com/9001/${pkgname}/v${pkgver}/LICENSE"
)
backup=("etc/${pkgname}.d/init" )
sha256sums=("d0447c7a8c4738d2f910f0287c66c70f48c6fae4cf941fb7227504e646fe3e78"
"b8565eba5e64dedba1cf6c7aac7e31c5a731ed7153d6810288a28f00a36c28b2"
"f65c207e0670f9d78ad2e399bda18d5502ff30d2ac79e0e7fc48e7fbdc39afdc"
"c4f396b083c9ec02ad50b52412c84d2a82be7f079b2d016e1c9fad22d68285ff"
"dba701de9fd584405917e923ea1e59dbb249b96ef23bad479cf4e42740b774c8"
"746971e95817c54445ce7f9c8406822dffc814cd5eb8113abd36dd472fd677d7"
"cb2ce3d6277bf2f5a82ecf336cc44963bc6490bcf496ffbd75fc9e21abaa75f3"
)
package() {
cd "${srcdir}/"
install -dm755 "${pkgdir}/etc/${pkgname}.d"
install -Dm755 "${pkgname}-sfx.py" "${pkgdir}/usr/bin/${pkgname}"
install -Dm755 "prisonparty.sh" "${pkgdir}/usr/bin/prisonparty"
install -Dm644 "${pkgname}.conf" "${pkgdir}/etc/${pkgname}.d/init"
install -Dm644 "${pkgname}.service" "${pkgdir}/usr/lib/systemd/system/${pkgname}.service"
install -Dm644 "prisonparty.service" "${pkgdir}/usr/lib/systemd/system/prisonparty.service"
install -Dm644 "index.md" "${pkgdir}/var/lib/${pkgname}-jail/README.md"
install -Dm644 "LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
find /etc/${pkgname}.d -iname '*.conf' 2>/dev/null | grep -qE . && return
echo "┏━━━━━━━━━━━━━━━──-"
echo "┃ Configure ${pkgname} by adding .conf files into /etc/${pkgname}.d/"
echo "┃ and maybe copy+edit one of the following to /etc/systemd/system/:"
echo "┣━♦ /usr/lib/systemd/system/${pkgname}.service (standard)"
echo "┣━♦ /usr/lib/systemd/system/prisonparty.service (chroot)"
echo "┗━━━━━━━━━━━━━━━──-"
}

View File

@@ -0,0 +1,7 @@
## import all *.conf files from the current folder (/etc/copyparty.d)
% ./
# add additional .conf files to this folder;
# see example config files for reference:
# https://github.com/9001/copyparty/blob/hovudstraum/docs/example.conf
# https://github.com/9001/copyparty/tree/hovudstraum/docs/copyparty.d

View File

@@ -0,0 +1,32 @@
# this will start `/usr/bin/copyparty-sfx.py`
# and read config from `/etc/copyparty.d/*.conf`
#
# you probably want to:
# change "User=cpp" and "/home/cpp/" to another user
#
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
[Unit]
Description=copyparty file server
[Service]
Type=notify
SyslogIdentifier=copyparty
Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# user to run as + where the TLS certificate is (if any)
User=cpp
Environment=XDG_CONFIG_HOME=/home/cpp/.config
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/usr/bin/python3 /usr/bin/copyparty -c /etc/copyparty.d/init
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,3 @@
this is `/var/lib/copyparty-jail`, the fallback webroot when copyparty has not yet been configured
please add some `*.conf` files to `/etc/copyparty.d/`

View File

@@ -0,0 +1,31 @@
# this will start `/usr/bin/copyparty-sfx.py`
# in a chroot, preventing accidental access elsewhere
# and read config from `/etc/copyparty.d/*.conf`
#
# expose additional filesystem locations to copyparty
# by listing them between the last `1000` and `--`
#
# `1000 1000` = what user to run copyparty as
#
# unless you add -q to disable logging, you may want to remove the
# following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x
[Unit]
Description=copyparty file server
[Service]
SyslogIdentifier=prisonparty
Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/bin/bash /usr/bin/prisonparty /var/lib/copyparty-jail 1000 1000 /etc/copyparty.d -- \
/usr/bin/python3 /usr/bin/copyparty -c /etc/copyparty.d/init
[Install]
WantedBy=multi-user.target

View File

@@ -1,13 +1,22 @@
<!-- <!--
NOTE: DEPRECATED; please use the javascript version instead:
https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/minimal-up2k.js
----
save this as .epilogue.html inside a write-only folder to declutter the UI, makes it look like save this as .epilogue.html inside a write-only folder to declutter the UI, makes it look like
https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png https://user-images.githubusercontent.com/241032/118311195-dd6ca380-b4ef-11eb-86f3-75a3ff2e1332.png
only works if you disable the prologue/epilogue sandbox with --no-sb-lg
which should probably be combined with --no-dot-ren to prevent damage
(`no_sb_lg` can also be set per-volume with volflags)
--> -->
<style> <style>
/* make the up2k ui REALLY minimal by hiding a bunch of stuff: */ /* make the up2k ui REALLY minimal by hiding a bunch of stuff: */
#ops, #tree, #path, #epi+h2, /* main tabs and navigators (tree/breadcrumbs) */ #ops, #tree, #path, #wfp, /* main tabs and navigators (tree/breadcrumbs) */
#u2conf tr:first-child>td[rowspan]:not(#u2btn_cw), /* most of the config options */ #u2conf tr:first-child>td[rowspan]:not(#u2btn_cw), /* most of the config options */

View File

@@ -17,7 +17,7 @@ almost the same as minimal-up2k.html except this one...:
var u2min = ` var u2min = `
<style> <style>
#ops, #path, #tree, #files, #epi+div+h2, #ops, #path, #tree, #files, #wfp,
#u2conf td.c+.c, #u2cards, #srch_dz, #srch_zd { #u2conf td.c+.c, #u2cards, #srch_dz, #srch_zd {
display: none !important; display: none !important;
} }
@@ -55,5 +55,5 @@ var u2min = `
if (!has(perms, 'read')) { if (!has(perms, 'read')) {
var e2 = mknod('div'); var e2 = mknod('div');
e2.innerHTML = u2min; e2.innerHTML = u2min;
ebi('wrap').insertBefore(e2, QS('#epi+h2')); ebi('wrap').insertBefore(e2, QS('#wfp'));
} }

View File

@@ -6,12 +6,17 @@
# 1) put copyparty-sfx.py and prisonparty.sh in /usr/local/bin # 1) put copyparty-sfx.py and prisonparty.sh in /usr/local/bin
# 2) cp -pv prisonparty.service /etc/systemd/system && systemctl enable --now prisonparty # 2) cp -pv prisonparty.service /etc/systemd/system && systemctl enable --now prisonparty
# #
# expose additional filesystem locations to copyparty
# by listing them between the last `1000` and `--`
#
# `1000 1000` = what user to run copyparty as
#
# you may want to: # you may want to:
# change '/mnt::rw' to another location or permission-set # change '/mnt::rw' to another location or permission-set
# (remember to change the '/mnt' chroot arg too) # (remember to change the '/mnt' chroot arg too)
# #
# enable line-buffering for realtime logging (slight performance cost): # unless you add -q to disable logging, you may want to remove the
# inside the [Service] block, add the following line: # following line to allow buffering (slightly better performance):
# Environment=PYTHONUNBUFFERED=x # Environment=PYTHONUNBUFFERED=x
[Unit] [Unit]
@@ -19,7 +24,14 @@ Description=copyparty file server
[Service] [Service]
SyslogIdentifier=prisonparty SyslogIdentifier=prisonparty
WorkingDirectory=/usr/local/bin Environment=PYTHONUNBUFFERED=x
WorkingDirectory=/var/lib/copyparty-jail
ExecReload=/bin/kill -s USR1 $MAINPID
# stop systemd-tmpfiles-clean.timer from deleting copyparty while it's running
ExecStartPre=+/bin/bash -c 'mkdir -p /run/tmpfiles.d/ && echo "x /tmp/pe-copyparty*" > /run/tmpfiles.d/copyparty.conf'
# run copyparty
ExecStart=/bin/bash /usr/local/bin/prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt -- \ ExecStart=/bin/bash /usr/local/bin/prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt -- \
/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -v /mnt::rw /usr/bin/python3 /usr/local/bin/copyparty-sfx.py -q -v /mnt::rw

View File

@@ -1,7 +1,7 @@
@echo off @echo off
rem removes the 47.6 MiB filesize limit when downloading from webdav rem removes the 47.6 MiB filesize limit when downloading from webdav
rem + optionally allows/enables password-auth over plaintext http rem + optionally allows/enables password-auth over plaintext http
rem + optionally helps disable wpad rem + optionally helps disable wpad, removing the 10sec latency
setlocal enabledelayedexpansion setlocal enabledelayedexpansion

View File

@@ -25,7 +25,8 @@ from textwrap import dedent
from .__init__ import ANYWIN, CORES, PY2, VT100, WINDOWS, E, EnvParams, unicode from .__init__ import ANYWIN, CORES, PY2, VT100, WINDOWS, E, EnvParams, unicode
from .__version__ import CODENAME, S_BUILD_DT, S_VERSION from .__version__ import CODENAME, S_BUILD_DT, S_VERSION
from .authsrv import expand_config_file, re_vol from .authsrv import expand_config_file, re_vol, split_cfg_ln, upgrade_cfg_fmt
from .cfg import flagcats, onedash
from .svchub import SvcHub from .svchub import SvcHub
from .util import ( from .util import (
IMPLICATIONS, IMPLICATIONS,
@@ -35,8 +36,10 @@ from .util import (
UNPLICATIONS, UNPLICATIONS,
align_tab, align_tab,
ansi_re, ansi_re,
is_exe,
min_ex, min_ex,
py_desc, py_desc,
pybin,
termsize, termsize,
wrap, wrap,
) )
@@ -53,8 +56,9 @@ try:
except: except:
HAVE_SSL = False HAVE_SSL = False
printed: list[str] = []
u = unicode u = unicode
printed: list[str] = []
zsid = uuid.uuid4().urn[4:]
class RiceFormatter(argparse.HelpFormatter): class RiceFormatter(argparse.HelpFormatter):
@@ -229,9 +233,10 @@ def get_srvname() -> str:
ret = f.read().decode("utf-8", "replace").strip() ret = f.read().decode("utf-8", "replace").strip()
except: except:
ret = "" ret = ""
while len(ret) < 7: namelen = 5
while len(ret) < namelen:
ret += base64.b32encode(os.urandom(4))[:7].decode("utf-8").lower() ret += base64.b32encode(os.urandom(4))[:7].decode("utf-8").lower()
ret = re.sub("[234567=]", "", ret)[:7] ret = re.sub("[234567=]", "", ret)[:namelen]
with open(fp, "wb") as f: with open(fp, "wb") as f:
f.write(ret.encode("utf-8") + b"\n") f.write(ret.encode("utf-8") + b"\n")
@@ -253,7 +258,7 @@ def ensure_locale() -> None:
except: except:
continue continue
t = "setlocale {} failed,\n sorting and dates will be funky" t = "setlocale {} failed,\n sorting and dates might get funky\n"
warn(t.format(safe)) warn(t.format(safe))
@@ -353,27 +358,28 @@ def configure_ssl_ciphers(al: argparse.Namespace) -> None:
def args_from_cfg(cfg_path: str) -> list[str]: def args_from_cfg(cfg_path: str) -> list[str]:
lines: list[str] = [] lines: list[str] = []
expand_config_file(lines, cfg_path, "") expand_config_file(lines, cfg_path, "")
lines = upgrade_cfg_fmt(None, argparse.Namespace(vc=False), lines, "")
ret: list[str] = [] ret: list[str] = []
skip = False skip = True
for ln in lines: for ln in lines:
if not ln: sn = ln.split(" #")[0].strip()
if sn.startswith("["):
skip = True
if sn.startswith("[global]"):
skip = False skip = False
continue continue
if skip or not sn.split("#")[0].strip():
if ln.startswith("#"):
continue continue
for k, v in split_cfg_ln(sn).items():
if not ln.startswith("-"): k = k.lstrip("-")
continue if not k:
continue
if skip: prefix = "-" if k in onedash else "--"
continue if v is True:
ret.append(prefix + k)
try: else:
ret.extend(ln.split(" ", 1)) ret.append(prefix + k + "=" + v)
except:
ret.append(ln)
return ret return ret
@@ -467,7 +473,7 @@ def get_sects():
"g" (get): download files, but cannot see folder contents "g" (get): download files, but cannot see folder contents
"G" (upget): "get", but can see filekeys of their own uploads "G" (upget): "get", but can see filekeys of their own uploads
too many volflags to list here, see the other sections too many volflags to list here, see --help-flags
example:\033[35m example:\033[35m
-a ed:hunter2 -v .::r:rw,ed -v ../inc:dump:w:rw,ed:c,nodupe \033[36m -a ed:hunter2 -v .::r:rw,ed -v ../inc:dump:w:rw,ed:c,nodupe \033[36m
@@ -494,65 +500,53 @@ def get_sects():
""" """
volflags are appended to volume definitions, for example, volflags are appended to volume definitions, for example,
to create a write-only volume with the \033[33mnodupe\033[0m and \033[32mnosub\033[0m flags: to create a write-only volume with the \033[33mnodupe\033[0m and \033[32mnosub\033[0m flags:
\033[35m-v /mnt/inc:/inc:w\033[33m:c,nodupe\033[32m:c,nosub \033[35m-v /mnt/inc:/inc:w\033[33m:c,nodupe\033[32m:c,nosub"""
)
+ build_flags_desc(),
],
[
"hooks",
"execute commands before/after various events",
dedent(
"""
execute a command (a program or script) before or after various events;
\033[36mxbu\033[35m executes CMD before a file upload starts
\033[36mxau\033[35m executes CMD after a file upload finishes
\033[36mxbr\033[35m executes CMD before a file rename/move
\033[36mxar\033[35m executes CMD after a file rename/move
\033[36mxbd\033[35m executes CMD before a file delete
\033[36mxad\033[35m executes CMD after a file delete
\033[36mxm\033[35m executes CMD on message
\033[0m
can be defined as --args or volflags; for example \033[36m
--xau notify-send
-v .::r:c,xau=notify-send
\033[0m
commands specified as --args are appended to volflags;
each --arg and volflag can be specified multiple times,
each command will execute in order unless one returns non-zero
\033[0muploads, general: optionally prefix the command with comma-sep. flags similar to -mtp:
\033[36mnodupe\033[35m rejects existing files (instead of symlinking them)
\033[36mnosub\033[35m forces all uploads into the top folder of the vfs
\033[36mmagic$\033[35m enables filetype detection for nameless uploads
\033[36mgz\033[35m allows server-side gzip of uploads with ?gz (also c,xz)
\033[36mpk\033[35m forces server-side compression, optional arg: xz,9
\033[0mupload rules: \033[36mf\033[35m forks the process, doesn't wait for completion
\033[36mmaxn=250,600\033[35m max 250 uploads over 15min \033[36mc\033[35m checks return code, blocks the action if non-zero
\033[36mmaxb=1g,300\033[35m max 1 GiB over 5min (suffixes: b, k, m, g) \033[36mj\033[35m provides json with info as 1st arg instead of filepath
\033[36msz=1k-3m\033[35m allow filesizes between 1 KiB and 3MiB \033[36mwN\033[35m waits N sec after command has been started before continuing
\033[36mdf=1g\033[35m ensure 1 GiB free disk space \033[36mtN\033[35m sets an N sec timeout before the command is abandoned
\033[0mupload rotation: \033[36mkt\033[35m kills the entire process tree on timeout (default),
(moves all uploads into the specified folder structure) \033[36mkm\033[35m kills just the main process
\033[36mrotn=100,3\033[35m 3 levels of subfolders with 100 entries in each \033[36mkn\033[35m lets it continue running until copyparty is terminated
\033[36mrotf=%Y-%m/%d-%H\033[35m date-formatted organizing
\033[36mlifetime=3600\033[35m uploads are deleted after 1 hour
\033[0mdatabase, general: \033[36mc0\033[35m show all process output (default)
\033[36me2d\033[35m sets -e2d (all -e2* args can be set using ce2* volflags) \033[36mc1\033[35m show only stderr
\033[36md2ts\033[35m disables metadata collection for existing files \033[36mc2\033[35m show only stdout
\033[36md2ds\033[35m disables onboot indexing, overrides -e2ds* \033[36mc3\033[35m mute all process otput
\033[36md2t\033[35m disables metadata collection, overrides -e2t* \033[0m
\033[36md2v\033[35m disables file verification, overrides -e2v* except for \033[36mxm\033[0m, only one hook / one action can run at a time,
\033[36md2d\033[35m disables all database stuff, overrides -e2* so it's recommended to use the \033[36mf\033[0m flag unless you really need
\033[36mhist=/tmp/cdb\033[35m puts thumbnails and indexes at that location to wait for the hook to finish before continuing (without \033[36mf\033[0m
\033[36mscan=60\033[35m scan for new files every 60sec, same as --re-maxage the upload speed can easily drop to 10% for small files)"""
\033[36mnohash=\\.iso$\033[35m skips hashing file contents if path matches *.iso
\033[36mnoidx=\\.iso$\033[35m fully ignores the contents at paths matching *.iso
\033[36mnoforget$\033[35m don't forget files when deleted from disk
\033[36mdbd=[acid|swal|wal|yolo]\033[35m database speed-durability tradeoff
\033[36mxlink$\033[35m cross-volume dupe detection / linking
\033[36mxdev\033[35m do not descend into other filesystems
\033[36mxvol\033[35m skip symlinks leaving the volume root
\033[0mdatabase, audio tags:
"mte", "mth", "mtp", "mtm" all work the same as -mte, -mth, ...
\033[36mmtp=.bpm=f,audio-bpm.py\033[35m uses the "audio-bpm.py" program to
generate ".bpm" tags from uploads (f = overwrite tags)
\033[36mmtp=ahash,vhash=media-hash.py\033[35m collects two tags at once
\033[0mthumbnails:
\033[36mdthumb\033[35m disables all thumbnails
\033[36mdvthumb\033[35m disables video thumbnails
\033[36mdathumb\033[35m disables audio thumbnails (spectrograms)
\033[36mdithumb\033[35m disables image thumbnails
\033[0mclient and ux:
\033[36mhtml_head=TXT\033[35m includes TXT in the <head>
\033[36mrobots\033[35m allows indexing by search engines (default)
\033[36mnorobots\033[35m kindly asks search engines to leave
\033[0mothers:
\033[36mfk=8\033[35m generates per-file accesskeys,
which will then be required at the "g" permission
\033[0m"""
), ),
], ],
[ [
@@ -610,6 +604,17 @@ def get_sects():
] ]
def build_flags_desc():
ret = ""
for grp, flags in flagcats.items():
ret += "\n\n\033[0m" + grp
for k, v in flags.items():
v = v.replace("\n", "\n ")
ret += "\n \033[36m{}\033[35m {}".format(k, v)
return ret + "\033[0m"
# fmt: off # fmt: off
@@ -650,11 +655,13 @@ def add_upload(ap):
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600") ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without -e2d; roughly 1 MiB RAM per 600")
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (very slow on windows)") ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (very slow on windows)")
ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even when it might be dangerous (multiprocessing, filesystems lacking sparse-files support, ...)") ap2.add_argument("--use-fpool", action="store_true", help="force file-handle pooling, even when it might be dangerous (multiprocessing, filesystems lacking sparse-files support, ...)")
ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)") ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem) (volflag=hardlink)")
ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made") ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made (volflag=neversymlink)")
ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead") ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead (volflag=copydupes")
ap2.add_argument("--no-dupe", action="store_true", help="reject duplicate files during upload; only matches within the same volume (volflag=nodupe)") ap2.add_argument("--no-dupe", action="store_true", help="reject duplicate files during upload; only matches within the same volume (volflag=nodupe)")
ap2.add_argument("--no-snap", action="store_true", help="disable snapshots -- forget unfinished uploads on shutdown; don't create .hist/up2k.snap files -- abandoned/interrupted uploads must be cleaned up manually") ap2.add_argument("--no-snap", action="store_true", help="disable snapshots -- forget unfinished uploads on shutdown; don't create .hist/up2k.snap files -- abandoned/interrupted uploads must be cleaned up manually")
ap2.add_argument("--rand", action="store_true", help="force randomized filenames, --nrand chars long (volflag=rand)")
ap2.add_argument("--nrand", metavar="NUM", type=int, default=9, help="randomized filenames length (volflag=nrand)")
ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads (volflag=magic)") ap2.add_argument("--magic", action="store_true", help="enable filetype detection on nameless uploads (volflag=magic)")
ap2.add_argument("--df", metavar="GiB", type=float, default=0, help="ensure GiB free disk space by rejecting upload requests") ap2.add_argument("--df", metavar="GiB", type=float, default=0, help="ensure GiB free disk space by rejecting upload requests")
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files") ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
@@ -672,6 +679,8 @@ def add_network(ap):
ap2.add_argument("--rp-loc", metavar="PATH", type=u, default="", help="if reverse-proxying on a location instead of a dedicated domain/subdomain, provide the base location here (eg. /foo/bar)") ap2.add_argument("--rp-loc", metavar="PATH", type=u, default="", help="if reverse-proxying on a location instead of a dedicated domain/subdomain, provide the base location here (eg. /foo/bar)")
if ANYWIN: if ANYWIN:
ap2.add_argument("--reuseaddr", action="store_true", help="set reuseaddr on listening sockets on windows; allows rapid restart of copyparty at the expense of being able to accidentally start multiple instances") ap2.add_argument("--reuseaddr", action="store_true", help="set reuseaddr on listening sockets on windows; allows rapid restart of copyparty at the expense of being able to accidentally start multiple instances")
else:
ap2.add_argument("--freebind", action="store_true", help="allow listening on IPs which do not yet exist, for example if the network interfaces haven't finished going up. Only makes sense for IPs other than '0.0.0.0', '127.0.0.1', '::', and '::1'. May require running as root (unless net.ipv6.ip_nonlocal_bind)")
ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes") ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes")
ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds") ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds")
ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds") ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds")
@@ -692,12 +701,13 @@ def add_zeroconf(ap):
ap2.add_argument("-z", action="store_true", help="enable all zeroconf backends (mdns, ssdp)") ap2.add_argument("-z", action="store_true", help="enable all zeroconf backends (mdns, ssdp)")
ap2.add_argument("--z-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes\n └─example: \033[32meth0, wlo1, virhost0, 192.168.123.0/24, fd00:fda::/96\033[0m") ap2.add_argument("--z-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes\n └─example: \033[32meth0, wlo1, virhost0, 192.168.123.0/24, fd00:fda::/96\033[0m")
ap2.add_argument("--z-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--z-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--z-chk", metavar="SEC", type=int, default=10, help="check for network changes every SEC seconds (0=disable)")
ap2.add_argument("-zv", action="store_true", help="verbose all zeroconf backends") ap2.add_argument("-zv", action="store_true", help="verbose all zeroconf backends")
ap2.add_argument("--mc-hop", metavar="SEC", type=int, default=0, help="rejoin multicast groups every SEC seconds (workaround for some switches/routers which cause mDNS to suddenly stop working after some time); try [\033[32m300\033[0m] or [\033[32m180\033[0m]") ap2.add_argument("--mc-hop", metavar="SEC", type=int, default=0, help="rejoin multicast groups every SEC seconds (workaround for some switches/routers which cause mDNS to suddenly stop working after some time); try [\033[32m300\033[0m] or [\033[32m180\033[0m]")
def add_zc_mdns(ap): def add_zc_mdns(ap):
ap2 = ap.add_argument_group("Zeroconf-mDNS options:") ap2 = ap.add_argument_group("Zeroconf-mDNS options")
ap2.add_argument("--zm", action="store_true", help="announce the enabled protocols over mDNS (multicast DNS-SD) -- compatible with KDE, gnome, macOS, ...") ap2.add_argument("--zm", action="store_true", help="announce the enabled protocols over mDNS (multicast DNS-SD) -- compatible with KDE, gnome, macOS, ...")
ap2.add_argument("--zm-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zm-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--zm-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zm-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
@@ -716,13 +726,13 @@ def add_zc_mdns(ap):
def add_zc_ssdp(ap): def add_zc_ssdp(ap):
ap2 = ap.add_argument_group("Zeroconf-SSDP options:") ap2 = ap.add_argument_group("Zeroconf-SSDP options")
ap2.add_argument("--zs", action="store_true", help="announce the enabled protocols over SSDP -- compatible with Windows") ap2.add_argument("--zs", action="store_true", help="announce the enabled protocols over SSDP -- compatible with Windows")
ap2.add_argument("--zs-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zs-on", metavar="NETS", type=u, default="", help="enable zeroconf ONLY on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--zs-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes") ap2.add_argument("--zs-off", metavar="NETS", type=u, default="", help="disable zeroconf on the comma-separated list of subnets and/or interface names/indexes")
ap2.add_argument("--zsv", action="store_true", help="verbose SSDP") ap2.add_argument("--zsv", action="store_true", help="verbose SSDP")
ap2.add_argument("--zsl", metavar="PATH", type=u, default="/?hc", help="location to include in the url (or a complete external URL), for example [\033[32mpriv/?pw=hunter2\033[0m] (goes directly to /priv/ with password hunter2) or [\033[32m?hc=priv&pw=hunter2\033[0m] (shows mounting options for /priv/ with password)") ap2.add_argument("--zsl", metavar="PATH", type=u, default="/?hc", help="location to include in the url (or a complete external URL), for example [\033[32mpriv/?pw=hunter2\033[0m] (goes directly to /priv/ with password hunter2) or [\033[32m?hc=priv&pw=hunter2\033[0m] (shows mounting options for /priv/ with password)")
ap2.add_argument("--zsid", metavar="UUID", type=u, default=uuid.uuid4().urn[4:], help="USN (device identifier) to announce") ap2.add_argument("--zsid", metavar="UUID", type=u, default=zsid, help="USN (device identifier) to announce")
def add_ftp(ap): def add_ftp(ap):
@@ -755,6 +765,23 @@ def add_smb(ap):
ap2.add_argument("--smbvvv", action="store_true", help="verbosest") ap2.add_argument("--smbvvv", action="store_true", help="verbosest")
def add_hooks(ap):
ap2 = ap.add_argument_group('event hooks (see --help-hooks)')
ap2.add_argument("--xbu", metavar="CMD", type=u, action="append", help="execute CMD before a file upload starts")
ap2.add_argument("--xau", metavar="CMD", type=u, action="append", help="execute CMD after a file upload finishes")
ap2.add_argument("--xbr", metavar="CMD", type=u, action="append", help="execute CMD before a file move/rename")
ap2.add_argument("--xar", metavar="CMD", type=u, action="append", help="execute CMD after a file move/rename")
ap2.add_argument("--xbd", metavar="CMD", type=u, action="append", help="execute CMD before a file delete")
ap2.add_argument("--xad", metavar="CMD", type=u, action="append", help="execute CMD after a file delete")
ap2.add_argument("--xm", metavar="CMD", type=u, action="append", help="execute CMD on message")
def add_yolo(ap):
ap2 = ap.add_argument_group('yolo options')
ap2.add_argument("--allow-csrf", action="store_true", help="disable csrf protections; let other domains/sites impersonate you through cross-site requests")
ap2.add_argument("--getmod", action="store_true", help="permit ?move=[...] and ?delete as GET")
def add_optouts(ap): def add_optouts(ap):
ap2 = ap.add_argument_group('opt-outs') ap2 = ap.add_argument_group('opt-outs')
ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)") ap2.add_argument("-nw", action="store_true", help="never write anything to disk (debug/benchmark)")
@@ -764,6 +791,7 @@ def add_optouts(ap):
ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations") ap2.add_argument("--no-mv", action="store_true", help="disable move/rename operations")
ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI") ap2.add_argument("-nih", action="store_true", help="no info hostname -- don't show in UI")
ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI") ap2.add_argument("-nid", action="store_true", help="no info disk-usage -- don't show in UI")
ap2.add_argument("-nb", action="store_true", help="no powered-by-copyparty branding in UI")
ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar") ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar")
ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (as specified by the 'lifetime' volflag)") ap2.add_argument("--no-lifetime", action="store_true", help="disable automatic deletion of uploads after a certain time (as specified by the 'lifetime' volflag)")
@@ -771,8 +799,8 @@ def add_optouts(ap):
def add_safety(ap, fk_salt): def add_safety(ap, fk_salt):
ap2 = ap.add_argument_group('safety options') ap2 = ap.add_argument_group('safety options')
ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js") ap2.add_argument("-s", action="count", default=0, help="increase safety: Disable thumbnails / potentially dangerous software (ffmpeg/pillow/vips), hide partial uploads, avoid crawlers.\n └─Alias of\033[32m --dotpart --no-thumb --no-mtag-ff --no-robots --force-js")
ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, webdav, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --no-dot-mv --no-dot-ren --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih") ap2.add_argument("-ss", action="store_true", help="further increase safety: Prevent js-injection, accidental move/delete, broken symlinks, webdav, 404 on 403, ban on excessive 404s.\n └─Alias of\033[32m -s --unpost=0 --no-del --no-mv --hardlink --vague-403 --ban-404=50,60,1440 -nih")
ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss --no-dav -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r") ap2.add_argument("-sss", action="store_true", help="further increase safety: Enable logging to disk, scan for dangerous symlinks.\n └─Alias of\033[32m -ss --no-dav --no-logues --no-readme -lo=cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz --ls=**,*,ln,p,r")
ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments \033[33mUSER\033[0m,\033[33mVOL\033[0m,\033[33mFLAGS\033[0m; example [\033[32m**,*,ln,p,r\033[0m]") ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="do a sanity/safety check of all volumes on startup; arguments \033[33mUSER\033[0m,\033[33mVOL\033[0m,\033[33mFLAGS\033[0m; example [\033[32m**,*,ln,p,r\033[0m]")
ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter") ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt; used to generate unpredictable internal identifiers for uploads -- doesn't really matter")
ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter") ap2.add_argument("--fk-salt", metavar="SALT", type=u, default=fk_salt, help="per-file accesskey salt; used to generate unpredictable URLs for hidden files -- this one DOES matter")
@@ -788,13 +816,15 @@ def add_safety(ap, fk_salt):
ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than \033[33mN\033[0m 404's in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes (disabled by default since turbo-up2k counts as 404s)") ap2.add_argument("--ban-404", metavar="N,W,B", type=u, default="no", help="hitting more than \033[33mN\033[0m 404's in \033[33mW\033[0m minutes = ban for \033[33mB\033[0m minutes (disabled by default since turbo-up2k counts as 404s)")
ap2.add_argument("--aclose", metavar="MIN", type=int, default=10, help="if a client maxes out the server connection limit, downgrade it from connection:keep-alive to connection:close for MIN minutes (and also kill its active connections) -- disable with 0") ap2.add_argument("--aclose", metavar="MIN", type=int, default=10, help="if a client maxes out the server connection limit, downgrade it from connection:keep-alive to connection:close for MIN minutes (and also kill its active connections) -- disable with 0")
ap2.add_argument("--loris", metavar="B", type=int, default=60, help="if a client maxes out the server connection limit without sending headers, ban it for B minutes; disable with [\033[32m0\033[0m]") ap2.add_argument("--loris", metavar="B", type=int, default=60, help="if a client maxes out the server connection limit without sending headers, ban it for B minutes; disable with [\033[32m0\033[0m]")
ap2.add_argument("--acao", metavar="V[,V]", type=u, default="*", help="Access-Control-Allow-Origin; list of origins (domains/IPs without port) to accept requests from; [\033[32mhttps://1.2.3.4\033[0m]. Default [\033[32m*\033[0m] allows requests from all sites but removes cookies and http-auth; only ?pw=hunter2 survives")
ap2.add_argument("--acam", metavar="V[,V]", type=u, default="GET,HEAD", help="Access-Control-Allow-Methods; list of methods to accept from offsite ('*' behaves like described in --acao)")
def add_shutdown(ap): def add_shutdown(ap):
ap2 = ap.add_argument_group('shutdown options') ap2 = ap.add_argument_group('shutdown options')
ap2.add_argument("--ign-ebind", action="store_true", help="continue running even if it's impossible to listen on some of the requested endpoints") ap2.add_argument("--ign-ebind", action="store_true", help="continue running even if it's impossible to listen on some of the requested endpoints")
ap2.add_argument("--ign-ebind-all", action="store_true", help="continue running even if it's impossible to receive connections at all") ap2.add_argument("--ign-ebind-all", action="store_true", help="continue running even if it's impossible to receive connections at all")
ap2.add_argument("--exit", metavar="WHEN", type=u, default="", help="shutdown after WHEN has finished; for example [\033[32midx\033[0m] will do volume indexing + metadata analysis") ap2.add_argument("--exit", metavar="WHEN", type=u, default="", help="shutdown after WHEN has finished; [\033[32mcfg\033[0m] config parsing, [\033[32midx\033[0m] volscan + multimedia indexing")
def add_logging(ap): def add_logging(ap):
@@ -836,11 +866,11 @@ def add_thumbnail(ap):
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html # https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
# https://github.com/libvips/libvips # https://github.com/libvips/libvips
# ffmpeg -hide_banner -demuxers | awk '/^ D /{print$2}' | while IFS= read -r x; do ffmpeg -hide_banner -h demuxer=$x; done | grep -E '^Demuxer |extensions:' # ffmpeg -hide_banner -demuxers | awk '/^ D /{print$2}' | while IFS= read -r x; do ffmpeg -hide_banner -h demuxer=$x; done | grep -E '^Demuxer |extensions:'
ap2.add_argument("--th-r-pil", metavar="T,T", type=u, default="bmp,dib,gif,icns,ico,jpg,jpeg,jp2,jpx,pcx,png,pbm,pgm,ppm,pnm,sgi,tga,tif,tiff,webp,xbm,dds,xpm,heif,heifs,heic,heics,avif,avifs", help="image formats to decode using pillow") ap2.add_argument("--th-r-pil", metavar="T,T", type=u, default="avif,avifs,blp,bmp,dcx,dds,dib,emf,eps,fits,flc,fli,fpx,gif,heic,heics,heif,heifs,icns,ico,im,j2p,j2k,jp2,jpeg,jpg,jpx,pbm,pcx,pgm,png,pnm,ppm,psd,sgi,spi,tga,tif,tiff,webp,wmf,xbm,xpm", help="image formats to decode using pillow")
ap2.add_argument("--th-r-vips", metavar="T,T", type=u, default="jpg,jpeg,jp2,jpx,jxl,tif,tiff,png,webp,heic,avif,fit,fits,fts,exr,svg,hdr,ppm,pgm,pfm,gif,nii", help="image formats to decode using pyvips") ap2.add_argument("--th-r-vips", metavar="T,T", type=u, default="avif,exr,fit,fits,fts,gif,hdr,heic,jp2,jpeg,jpg,jpx,jxl,nii,pfm,pgm,png,ppm,svg,tif,tiff,webp", help="image formats to decode using pyvips")
ap2.add_argument("--th-r-ffi", metavar="T,T", type=u, default="apng,avif,avifs,bmp,dds,dib,fit,fits,fts,gif,heic,heics,heif,heifs,icns,ico,jp2,jpeg,jpg,jpx,jxl,pbm,pcx,pfm,pgm,png,pnm,ppm,psd,sgi,tga,tif,tiff,webp,xbm,xpm", help="image formats to decode using ffmpeg") ap2.add_argument("--th-r-ffi", metavar="T,T", type=u, default="apng,avif,avifs,bmp,dds,dib,fit,fits,fts,gif,hdr,heic,heics,heif,heifs,icns,ico,jp2,jpeg,jpg,jpx,jxl,pbm,pcx,pfm,pgm,png,pnm,ppm,psd,sgi,tga,tif,tiff,webp,xbm,xpm", help="image formats to decode using ffmpeg")
ap2.add_argument("--th-r-ffv", metavar="T,T", type=u, default="av1,asf,avi,flv,m4v,mkv,mjpeg,mjpg,mpg,mpeg,mpg2,mpeg2,h264,avc,mts,h265,hevc,mov,3gp,mp4,ts,mpegts,nut,ogv,ogm,rm,vob,webm,wmv", help="video formats to decode using ffmpeg") ap2.add_argument("--th-r-ffv", metavar="T,T", type=u, default="3gp,asf,av1,avc,avi,flv,h264,h265,hevc,m4v,mjpeg,mjpg,mkv,mov,mp4,mpeg,mpeg2,mpegts,mpg,mpg2,mts,nut,ogm,ogv,rm,ts,vob,webm,wmv", help="video formats to decode using ffmpeg")
ap2.add_argument("--th-r-ffa", metavar="T,T", type=u, default="aac,m4a,ogg,opus,flac,alac,mp3,mp2,ac3,dts,wma,ra,wav,aif,aiff,au,alaw,ulaw,mulaw,amr,gsm,ape,tak,tta,wv,mpc", help="audio formats to decode using ffmpeg") ap2.add_argument("--th-r-ffa", metavar="T,T", type=u, default="aac,ac3,aif,aiff,alac,alaw,amr,apac,ape,au,bonk,dfpwm,dts,flac,gsm,ilbc,it,m4a,mo3,mod,mp2,mp3,mpc,mptm,mt2,mulaw,ogg,okt,opus,ra,s3m,tak,tta,ulaw,wav,wma,wv,xm,xpk", help="audio formats to decode using ffmpeg")
def add_transcoding(ap): def add_transcoding(ap):
@@ -851,7 +881,7 @@ def add_transcoding(ap):
def add_db_general(ap, hcores): def add_db_general(ap, hcores):
ap2 = ap.add_argument_group('general db options') ap2 = ap.add_argument_group('general db options')
ap2.add_argument("-e2d", action="store_true", help="enable up2k database, making files searchable + enables upload deduplocation") ap2.add_argument("-e2d", action="store_true", help="enable up2k database, making files searchable + enables upload deduplication")
ap2.add_argument("-e2ds", action="store_true", help="scan writable folders for new files on startup; sets -e2d") ap2.add_argument("-e2ds", action="store_true", help="scan writable folders for new files on startup; sets -e2d")
ap2.add_argument("-e2dsa", action="store_true", help="scans all folders on startup; sets -e2ds") ap2.add_argument("-e2dsa", action="store_true", help="scans all folders on startup; sets -e2ds")
ap2.add_argument("-e2v", action="store_true", help="verify file integrity; rehash all files and compare with db") ap2.add_argument("-e2v", action="store_true", help="verify file integrity; rehash all files and compare with db")
@@ -872,6 +902,7 @@ def add_db_general(ap, hcores):
ap2.add_argument("--db-act", metavar="SEC", type=float, default=10, help="defer any scheduled volume reindexing until SEC seconds after last db write (uploads, renames, ...)") ap2.add_argument("--db-act", metavar="SEC", type=float, default=10, help="defer any scheduled volume reindexing until SEC seconds after last db write (uploads, renames, ...)")
ap2.add_argument("--srch-time", metavar="SEC", type=int, default=45, help="search deadline -- terminate searches running for more than SEC seconds") ap2.add_argument("--srch-time", metavar="SEC", type=int, default=45, help="search deadline -- terminate searches running for more than SEC seconds")
ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially") ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially")
ap2.add_argument("--dotsrch", action="store_true", help="show dotfiles in search results (volflags: dotsrch | nodotsrch)")
def add_db_metadata(ap): def add_db_metadata(ap):
@@ -906,10 +937,18 @@ def add_ui(ap, retry):
ap2.add_argument("--textfiles", metavar="CSV", type=u, default="txt,nfo,diz,cue,readme", help="file extensions to present as plaintext") ap2.add_argument("--textfiles", metavar="CSV", type=u, default="txt,nfo,diz,cue,readme", help="file extensions to present as plaintext")
ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)") ap2.add_argument("--txt-max", metavar="KiB", type=int, default=64, help="max size of embedded textfiles on ?doc= (anything bigger will be lazy-loaded by JS)")
ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents") ap2.add_argument("--doctitle", metavar="TXT", type=u, default="copyparty", help="title / service-name to show in html documents")
ap2.add_argument("--pb-url", metavar="URL", type=u, default="https://github.com/9001/copyparty", help="powered-by link; disable with -np")
ap2.add_argument("--ver", action="store_true", help="show version on the control panel (incompatible by -np)")
ap2.add_argument("--md-sbf", metavar="FLAGS", type=u, default="downloads forms popups scripts top-navigation-by-user-activation", help="list of capabilities to ALLOW for README.md docs (volflag=md_sbf); see https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe#attr-sandbox")
ap2.add_argument("--lg-sbf", metavar="FLAGS", type=u, default="downloads forms popups scripts top-navigation-by-user-activation", help="list of capabilities to ALLOW for prologue/epilogue docs (volflag=lg_sbf)")
ap2.add_argument("--no-sb-md", action="store_true", help="don't sandbox README.md documents (volflags: no_sb_md | sb_md)")
ap2.add_argument("--no-sb-lg", action="store_true", help="don't sandbox prologue/epilogue docs (volflags: no_sb_lg | sb_lg); enables non-js support")
def add_debug(ap): def add_debug(ap):
ap2 = ap.add_argument_group('debug options') ap2 = ap.add_argument_group('debug options')
ap2.add_argument("--vc", action="store_true", help="verbose config file parser (explain config)")
ap2.add_argument("--cgen", action="store_true", help="generate config file from current config (best-effort; probably buggy)")
ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile; instead using a traditional file read loop") ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile; instead using a traditional file read loop")
ap2.add_argument("--no-scandir", action="store_true", help="disable scandir; instead using listdir + stat on each file") ap2.add_argument("--no-scandir", action="store_true", help="disable scandir; instead using listdir + stat on each file")
ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing before starting the httpd") ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing before starting the httpd")
@@ -964,6 +1003,8 @@ def run_argparse(
add_safety(ap, fk_salt) add_safety(ap, fk_salt)
add_optouts(ap) add_optouts(ap)
add_shutdown(ap) add_shutdown(ap)
add_yolo(ap)
add_hooks(ap)
add_ui(ap, retry) add_ui(ap, retry)
add_admin(ap) add_admin(ap)
add_logging(ap) add_logging(ap)
@@ -1026,6 +1067,9 @@ def main(argv: Optional[list[str]] = None) -> None:
showlic() showlic()
sys.exit(0) sys.exit(0)
if is_exe:
print("pybin: {}\n".format(pybin), end="")
ensure_locale() ensure_locale()
if HAVE_SSL: if HAVE_SSL:
ensure_cert() ensure_cert()
@@ -1058,7 +1102,8 @@ def main(argv: Optional[list[str]] = None) -> None:
if da: if da:
argv.extend(["--qr"]) argv.extend(["--qr"])
if ANYWIN or not os.geteuid(): if ANYWIN or not os.geteuid():
argv.extend(["-p80,443,3923", "--ign-ebind"]) # win10 allows symlinks if admin; can be unexpected
argv.extend(["-p80,443,3923", "--ign-ebind", "--no-dedup"])
except: except:
pass pass
@@ -1080,6 +1125,7 @@ def main(argv: Optional[list[str]] = None) -> None:
for fmtr in [RiceFormatter, RiceFormatter, Dodge11874, BasicDodge11874]: for fmtr in [RiceFormatter, RiceFormatter, Dodge11874, BasicDodge11874]:
try: try:
al = run_argparse(argv, fmtr, retry, nc) al = run_argparse(argv, fmtr, retry, nc)
dal = run_argparse([], fmtr, retry, nc)
break break
except SystemExit: except SystemExit:
raise raise
@@ -1089,6 +1135,7 @@ def main(argv: Optional[list[str]] = None) -> None:
try: try:
assert al # type: ignore assert al # type: ignore
assert dal # type: ignore
al.E = E # __init__ is not shared when oxidized al.E = E # __init__ is not shared when oxidized
except: except:
sys.exit(1) sys.exit(1)
@@ -1194,7 +1241,7 @@ def main(argv: Optional[list[str]] = None) -> None:
# signal.signal(signal.SIGINT, sighandler) # signal.signal(signal.SIGINT, sighandler)
SvcHub(al, argv, "".join(printed)).run() SvcHub(al, dal, argv, "".join(printed)).run()
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -1,8 +1,8 @@
# coding: utf-8 # coding: utf-8
VERSION = (1, 5, 3) VERSION = (1, 6, 5)
CODENAME = "babel" CODENAME = "cors k"
BUILD_DT = (2022, 12, 13) BUILD_DT = (2023, 2, 12)
S_VERSION = ".".join(map(str, VERSION)) S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT) S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

View File

@@ -14,6 +14,7 @@ from datetime import datetime
from .__init__ import ANYWIN, TYPE_CHECKING, WINDOWS from .__init__ import ANYWIN, TYPE_CHECKING, WINDOWS
from .bos import bos from .bos import bos
from .cfg import flagdescs, permdescs, vf_bmap, vf_cmap, vf_vmap
from .util import ( from .util import (
IMPLICATIONS, IMPLICATIONS,
META_NOBOTS, META_NOBOTS,
@@ -21,7 +22,7 @@ from .util import (
UNPLICATIONS, UNPLICATIONS,
Pebkac, Pebkac,
absreal, absreal,
fsenc, afsenc,
get_df, get_df,
humansize, humansize,
relchk, relchk,
@@ -36,7 +37,7 @@ if True: # pylint: disable=using-constant-test
from typing import Any, Generator, Optional, Union from typing import Any, Generator, Optional, Union
from .util import RootLogger from .util import NamedLogger, RootLogger
if TYPE_CHECKING: if TYPE_CHECKING:
pass pass
@@ -587,7 +588,7 @@ class VFS(object):
# if multiselect: add all items to archive root # if multiselect: add all items to archive root
# if single folder: the folder itself is the top-level item # if single folder: the folder itself is the top-level item
folder = "" if flt or not wrap else (vrem.split("/")[-1] or "top") folder = "" if flt or not wrap else (vrem.split("/")[-1].lstrip(".") or "top")
g = self.walk(folder, vrem, [], uname, [[True, False]], dots, scandir, False) g = self.walk(folder, vrem, [], uname, [[True, False]], dots, scandir, False)
for _, _, vpath, apath, files, rd, vd in g: for _, _, vpath, apath, files, rd, vd in g:
@@ -653,11 +654,15 @@ class AuthSrv(object):
args: argparse.Namespace, args: argparse.Namespace,
log_func: Optional["RootLogger"], log_func: Optional["RootLogger"],
warn_anonwrite: bool = True, warn_anonwrite: bool = True,
dargs: Optional[argparse.Namespace] = None,
) -> None: ) -> None:
self.args = args self.args = args
self.dargs = dargs or args
self.log_func = log_func self.log_func = log_func
self.warn_anonwrite = warn_anonwrite self.warn_anonwrite = warn_anonwrite
self.line_ctr = 0 self.line_ctr = 0
self.indent = ""
self.desc = []
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.reload() self.reload()
@@ -690,17 +695,47 @@ class AuthSrv(object):
raise Exception("invalid config") raise Exception("invalid config")
if src in mount.values(): if src in mount.values():
t = "warning: filesystem-path [{}] mounted in multiple locations:" t = "filesystem-path [{}] mounted in multiple locations:"
t = t.format(src) t = t.format(src)
for v in [k for k, v in mount.items() if v == src] + [dst]: for v in [k for k, v in mount.items() if v == src] + [dst]:
t += "\n /{}".format(v) t += "\n /{}".format(v)
self.log(t, c=3) self.log(t, c=3)
raise Exception("invalid config")
if not bos.path.isdir(src):
self.log("warning: filesystem-path does not exist: {}".format(src), 3)
mount[dst] = src mount[dst] = src
daxs[dst] = AXS() daxs[dst] = AXS()
mflags[dst] = {} mflags[dst] = {}
def _e(self, desc: Optional[str] = None) -> None:
if not self.args.vc or not self.line_ctr:
return
if not desc and not self.indent:
self.log("")
return
desc = desc or ""
desc = desc.replace("[", "[\033[0m").replace("]", "\033[90m]")
self.log(" >>> {}{}".format(self.indent, desc), "90")
def _l(self, ln: str, c: int, desc: str) -> None:
if not self.args.vc or not self.line_ctr:
return
if c < 10:
c += 30
t = "\033[97m{:4} \033[{}m{}{}"
if desc:
t += " \033[0;90m# {}\033[0m"
desc = desc.replace("[", "[\033[0m").replace("]", "\033[90m]")
self.log(t.format(self.line_ctr, c, self.indent, ln, desc))
def _parse_config_file( def _parse_config_file(
self, self,
fp: str, fp: str,
@@ -710,61 +745,140 @@ class AuthSrv(object):
mflags: dict[str, dict[str, Any]], mflags: dict[str, dict[str, Any]],
mount: dict[str, str], mount: dict[str, str],
) -> None: ) -> None:
skip = False self.desc = []
vol_src = None
vol_dst = None
self.line_ctr = 0 self.line_ctr = 0
expand_config_file(cfg_lines, fp, "") expand_config_file(cfg_lines, fp, "")
if self.args.vc:
lns = ["{:4}: {}".format(n, s) for n, s in enumerate(cfg_lines, 1)]
self.log("expanded config file (unprocessed):\n" + "\n".join(lns))
cfg_lines = upgrade_cfg_fmt(self.log, self.args, cfg_lines, fp)
cat = ""
catg = "[global]"
cata = "[accounts]"
catx = "accs:"
catf = "flags:"
ap: Optional[str] = None
vp: Optional[str] = None
for ln in cfg_lines: for ln in cfg_lines:
self.line_ctr += 1 self.line_ctr += 1
if not ln and vol_src is not None: ln = ln.split(" #")[0].strip()
vol_src = None if not ln.split("#")[0].strip():
vol_dst = None
if skip:
if not ln:
skip = False
continue continue
if not ln or ln.startswith("#"): subsection = ln in (catx, catf)
continue if ln.startswith("[") or subsection:
self._e()
if ap is None and vp is not None:
t = "the first line after [/{}] must be a filesystem path to share on that volume"
raise Exception(t.format(vp))
if vol_src is None: cat = ln
if ln.startswith("u "): if not subsection:
u, p = ln[2:].split(":", 1) ap = vp = None
acct[u] = p self.indent = ""
elif ln.startswith("-"):
skip = True # argv
else: else:
vol_src = ln self.indent = " "
if ln == catg:
t = "begin commandline-arguments (anything from --help; dashes are optional)"
self._l(ln, 6, t)
elif ln == cata:
self._l(ln, 5, "begin user-accounts section")
elif ln.startswith("[/"):
vp = ln[1:-1].strip("/")
self._l(ln, 2, "define volume at URL [/{}]".format(vp))
elif subsection:
if ln == catx:
self._l(ln, 5, "volume access config:")
else:
t = "volume-specific config (anything from --help-flags)"
self._l(ln, 6, t)
else:
raise Exception("invalid section header")
self.indent = " " if subsection else " "
continue continue
if vol_src and vol_dst is None: if cat == catg:
vol_dst = ln self._l(ln, 6, "")
if not vol_dst.startswith("/"): zt = split_cfg_ln(ln)
raise Exception('invalid mountpoint "{}"'.format(vol_dst)) for zs, za in zt.items():
zs = zs.lstrip("-")
if vol_src.startswith("~"): if za is True:
vol_src = os.path.expanduser(vol_src) self._e("└─argument [{}]".format(zs))
else:
# cfg files override arguments and previous files self._e("└─argument [{}] with value [{}]".format(zs, za))
vol_src = absreal(vol_src)
vol_dst = vol_dst.strip("/")
self._map_volume(vol_src, vol_dst, mount, daxs, mflags)
continue continue
try: if cat == cata:
lvl, uname = ln.split(" ", 1) try:
except: u, p = [zs.strip() for zs in ln.split(":", 1)]
lvl = ln self._l(ln, 5, "account [{}], password [{}]".format(u, p))
uname = "*" acct[u] = p
except:
t = 'lines inside the [accounts] section must be "username: password"'
raise Exception(t)
continue
if lvl == "a": if vp is not None and ap is None:
t = "WARNING (config-file): permission flag 'a' is deprecated; please use 'rw' instead" ap = ln
self.log(t, 1) if ap.startswith("~"):
ap = os.path.expanduser(ap)
assert vol_dst is not None ap = absreal(ap)
self._read_vol_str(lvl, uname, daxs[vol_dst], mflags[vol_dst]) self._l(ln, 2, "bound to filesystem-path [{}]".format(ap))
self._map_volume(ap, vp, mount, daxs, mflags)
continue
if cat == catx:
err = ""
try:
self._l(ln, 5, "volume access config:")
sk, sv = ln.split(":")
if re.sub("[rwmdgG]", "", sk) or not sk:
err = "invalid accs permissions list; "
raise Exception(err)
if " " in re.sub(", *", "", sv).strip():
err = "list of users is not comma-separated; "
raise Exception(err)
self._read_vol_str(sk, sv.replace(" ", ""), daxs[vp], mflags[vp])
continue
except:
err += "accs entries must be 'rwmdgG: user1, user2, ...'"
raise Exception(err)
if cat == catf:
err = ""
try:
self._l(ln, 6, "volume-specific config:")
zd = split_cfg_ln(ln)
fstr = ""
for sk, sv in zd.items():
bad = re.sub(r"[a-z0-9_]", "", sk)
if bad:
err = "bad characters [{}] in volflag name [{}]; "
err = err.format(bad, sk)
raise Exception(err)
if sv is True:
fstr += "," + sk
else:
fstr += ",{}={}".format(sk, sv)
self._read_vol_str("c", fstr[1:], daxs[vp], mflags[vp])
fstr = ""
if fstr:
self._read_vol_str("c", fstr[1:], daxs[vp], mflags[vp])
continue
except:
err += "flags entries (volflags) must be one of the following:\n 'flag1, flag2, ...'\n 'key: value'\n 'flag1, flag2, key: value'"
raise Exception(err)
raise Exception("unprocessable line in config")
self._e()
self.line_ctr = 0
def _read_vol_str( def _read_vol_str(
self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any] self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any]
@@ -803,6 +917,13 @@ class AuthSrv(object):
("G", axs.upget), ("G", axs.upget),
]: # b bb bbb ]: # b bb bbb
if ch in lvl: if ch in lvl:
if un == "*":
t = "└─add permission [{0}] for [everyone] -- {2}"
else:
t = "└─add permission [{0}] for user [{1}] -- {2}"
desc = permdescs.get(ch, "?")
self._e(t.format(ch, un, desc))
al.add(un) al.add(un)
def _read_volflag( def _read_volflag(
@@ -812,7 +933,13 @@ class AuthSrv(object):
value: Union[str, bool, list[str]], value: Union[str, bool, list[str]],
is_list: bool, is_list: bool,
) -> None: ) -> None:
if name not in ["mtp"]: desc = flagdescs.get(name, "?").replace("\n", " ")
if name not in ["mtp", "xbu", "xau", "xbr", "xar", "xbd", "xad", "xm"]:
if value is True:
t = "└─add volflag [{}] = {} ({})"
else:
t = "└─add volflag [{}] = [{}] ({})"
self._e(t.format(name, value, desc))
flags[name] = value flags[name] = value
return return
@@ -825,6 +952,7 @@ class AuthSrv(object):
vals += [value] vals += [value]
flags[name] = vals flags[name] = vals
self._e("volflag [{}] += {} ({})".format(name, vals, desc))
def reload(self) -> None: def reload(self) -> None:
""" """
@@ -875,6 +1003,18 @@ class AuthSrv(object):
lns: list[str] = [] lns: list[str] = []
try: try:
self._parse_config_file(cfg_fn, lns, acct, daxs, mflags, mount) self._parse_config_file(cfg_fn, lns, acct, daxs, mflags, mount)
zs = "#\033[36m cfg files in "
zst = [x[len(zs) :] for x in lns if x.startswith(zs)]
for zs in list(set(zst)):
self.log("discovered config files in " + zs, 6)
zs = "#\033[36m opening cfg file"
zstt = [x.split(" -> ") for x in lns if x.startswith(zs)]
zst = [(max(0, len(x) - 2) * " ") + "" + x[-1] for x in zstt]
t = "loaded {} config files:\n{}"
self.log(t.format(len(zst), "\n".join(zst)))
except: except:
lns = lns[: self.line_ctr] lns = lns[: self.line_ctr]
slns = ["{:4}: {}".format(n, s) for n, s in enumerate(lns, 1)] slns = ["{:4}: {}".format(n, s) for n, s in enumerate(lns, 1)]
@@ -955,7 +1095,7 @@ class AuthSrv(object):
promote = [] promote = []
demote = [] demote = []
for vol in vfs.all_vols.values(): for vol in vfs.all_vols.values():
zb = hashlib.sha512(fsenc(vol.realpath)).digest() zb = hashlib.sha512(afsenc(vol.realpath)).digest()
hid = base64.b32encode(zb).decode("ascii").lower() hid = base64.b32encode(zb).decode("ascii").lower()
vflag = vol.flags.get("hist") vflag = vol.flags.get("hist")
if vflag == "-": if vflag == "-":
@@ -974,7 +1114,7 @@ class AuthSrv(object):
except: except:
owner = None owner = None
me = fsenc(vol.realpath).rstrip() me = afsenc(vol.realpath).rstrip()
if owner not in [None, me]: if owner not in [None, me]:
continue continue
@@ -1114,19 +1254,30 @@ class AuthSrv(object):
if ptn: if ptn:
vol.flags[vf] = re.compile(ptn) vol.flags[vf] = re.compile(ptn)
for k in ["e2t", "e2ts", "e2tsr", "e2v", "e2vu", "e2vp", "xdev", "xvol"]: for ga, vf in vf_bmap().items():
if getattr(self.args, k):
vol.flags[k] = True
for ga, vf in (
("no_forget", "noforget"),
("no_dupe", "nodupe"),
("magic", "magic"),
("xlink", "xlink"),
):
if getattr(self.args, ga): if getattr(self.args, ga):
vol.flags[vf] = True vol.flags[vf] = True
for ve, vd in (
("nodotsrch", "dotsrch"),
("sb_lg", "no_sb_lg"),
("sb_md", "no_sb_md"),
):
if ve in vol.flags:
vol.flags.pop(vd, None)
for ga, vf in vf_vmap().items():
if vf not in vol.flags:
vol.flags[vf] = getattr(self.args, ga)
for k in ("nrand",):
if k not in vol.flags:
vol.flags[k] = getattr(self.args, k)
for k in ("nrand",):
if k in vol.flags:
vol.flags[k] = int(vol.flags[k])
for k1, k2 in IMPLICATIONS: for k1, k2 in IMPLICATIONS:
if k1 in vol.flags: if k1 in vol.flags:
vol.flags[k2] = True vol.flags[k2] = True
@@ -1151,8 +1302,32 @@ class AuthSrv(object):
if "mth" not in vol.flags: if "mth" not in vol.flags:
vol.flags["mth"] = self.args.mth vol.flags["mth"] = self.args.mth
# append parsers from argv to volflags # append additive args from argv to volflags
self._read_volflag(vol.flags, "mtp", self.args.mtp, True) hooks = "xbu xau xbr xar xbd xad xm".split()
for name in ["mtp"] + hooks:
self._read_volflag(vol.flags, name, getattr(self.args, name), True)
for hn in hooks:
cmds = vol.flags.get(hn)
if not cmds:
continue
ncmds = []
for cmd in cmds:
hfs = []
ocmd = cmd
while "," in cmd[:6]:
zs, cmd = cmd.split(",", 1)
hfs.append(zs)
if "c" in hfs and "f" in hfs:
t = "cannot combine flags c and f; removing f from eventhook [{}]"
self.log(t.format(ocmd), 1)
hfs = [x for x in hfs if x != "f"]
ocmd = ",".join(hfs + [cmd])
ncmds.append(ocmd)
vol.flags[hn] = ncmds
# d2d drops all database features for a volume # d2d drops all database features for a volume
for grp, rm in [["d2d", "e2d"], ["d2t", "e2t"], ["d2d", "e2v"]]: for grp, rm in [["d2d", "e2d"], ["d2t", "e2t"], ["d2d", "e2v"]]:
@@ -1193,6 +1368,9 @@ class AuthSrv(object):
self.log(t.format(vol.vpath), 1) self.log(t.format(vol.vpath), 1)
del vol.flags["lifetime"] del vol.flags["lifetime"]
if vol.flags.get("neversymlink") and not vol.flags.get("hardlink"):
vol.flags["copydupes"] = True
# verify tags mentioned by -mt[mp] are used by -mte # verify tags mentioned by -mt[mp] are used by -mte
local_mtp = {} local_mtp = {}
local_only_mtp = {} local_only_mtp = {}
@@ -1437,32 +1615,293 @@ class AuthSrv(object):
if not flag_r: if not flag_r:
sys.exit(0) sys.exit(0)
def cgen(self) -> None:
ret = [
"## WARNING:",
"## there will probably be mistakes in",
"## commandline-args (and maybe volflags)",
"",
]
csv = set("i p".split())
lst = set("c ihead mtm mtp xad xar xau xbd xbr xbu xm".split())
askip = set("a v c vc cgen".split())
# keymap from argv to vflag
amap = vf_bmap()
amap.update(vf_vmap())
amap.update(vf_cmap())
vmap = {v: k for k, v in amap.items()}
args = {k: v for k, v in vars(self.args).items()}
pops = []
for k1, k2 in IMPLICATIONS:
if args.get(k1):
pops.append(k2)
for pop in pops:
args.pop(pop, None)
if args:
ret.append("[global]")
for k, v in args.items():
if k in askip:
continue
if k in csv:
v = ", ".join([str(za) for za in v])
try:
v2 = getattr(self.dargs, k)
if v == v2:
continue
except:
continue
dk = " " + k.replace("_", "-")
if k in lst:
for ve in v:
ret.append("{}: {}".format(dk, ve))
else:
if v is True:
ret.append(dk)
elif v not in (False, None, ""):
ret.append("{}: {}".format(dk, v))
ret.append("")
if self.acct:
ret.append("[accounts]")
for u, p in self.acct.items():
ret.append(" {}: {}".format(u, p))
ret.append("")
for vol in self.vfs.all_vols.values():
ret.append("[/{}]".format(vol.vpath))
ret.append(" " + vol.realpath)
ret.append(" accs:")
perms = {
"r": "uread",
"w": "uwrite",
"m": "umove",
"d": "udel",
"g": "uget",
"G": "upget",
}
users = {}
for pkey in perms.values():
for uname in getattr(vol.axs, pkey):
try:
users[uname] += 1
except:
users[uname] = 1
lusers = [(v, k) for k, v in users.items()]
vperms = {}
for _, uname in sorted(lusers):
pstr = ""
for pchar, pkey in perms.items():
if uname in getattr(vol.axs, pkey):
pstr += pchar
if "g" in pstr and "G" in pstr:
pstr = pstr.replace("g", "")
try:
vperms[pstr].append(uname)
except:
vperms[pstr] = [uname]
for pstr, uname in vperms.items():
ret.append(" {}: {}".format(pstr, ", ".join(uname)))
trues = []
vals = []
for k, v in sorted(vol.flags.items()):
try:
ak = vmap[k]
if getattr(self.args, ak) is v:
continue
except:
pass
if k in lst:
for ve in v:
vals.append("{}: {}".format(k, ve))
elif v is True:
trues.append(k)
elif v is not False:
vals.append("{}: {}".format(k, v))
pops = []
for k1, k2 in IMPLICATIONS:
if k1 in trues:
pops.append(k2)
trues = [x for x in trues if x not in pops]
if trues:
vals.append(", ".join(trues))
if vals:
ret.append(" flags:")
for zs in vals:
ret.append(" " + zs)
ret.append("")
self.log("generated config:\n\n" + "\n".join(ret))
def split_cfg_ln(ln: str) -> dict[str, Any]:
# "a, b, c: 3" => {a:true, b:true, c:3}
ret = {}
while True:
ln = ln.strip()
if not ln:
break
ofs_sep = ln.find(",") + 1
ofs_var = ln.find(":") + 1
if not ofs_sep and not ofs_var:
ret[ln] = True
break
if ofs_sep and (ofs_sep < ofs_var or not ofs_var):
k, ln = ln.split(",", 1)
ret[k.strip()] = True
else:
k, ln = ln.split(":", 1)
ret[k.strip()] = ln.strip()
break
return ret
def expand_config_file(ret: list[str], fp: str, ipath: str) -> None: def expand_config_file(ret: list[str], fp: str, ipath: str) -> None:
"""expand all % file includes""" """expand all % file includes"""
fp = absreal(fp) fp = absreal(fp)
ipath += " -> " + fp
ret.append("#\033[36m opening cfg file{}\033[0m".format(ipath))
if len(ipath.split(" -> ")) > 64: if len(ipath.split(" -> ")) > 64:
raise Exception("hit max depth of 64 includes") raise Exception("hit max depth of 64 includes")
if os.path.isdir(fp): if os.path.isdir(fp):
for fn in sorted(os.listdir(fp)): names = os.listdir(fp)
ret.append("#\033[36m cfg files in {} => {}\033[0m".format(fp, names))
for fn in sorted(names):
fp2 = os.path.join(fp, fn) fp2 = os.path.join(fp, fn)
if not os.path.isfile(fp2): if not fp2.endswith(".conf") or fp2 in ipath:
continue # dont recurse continue
expand_config_file(ret, fp2, ipath) expand_config_file(ret, fp2, ipath)
return return
ipath += " -> " + fp
ret.append("#\033[36m opening cfg file{}\033[0m".format(ipath))
with open(fp, "rb") as f: with open(fp, "rb") as f:
for ln in [x.decode("utf-8").strip() for x in f]: for oln in [x.decode("utf-8").rstrip() for x in f]:
ln = oln.split(" #")[0].strip()
if ln.startswith("% "): if ln.startswith("% "):
pad = " " * len(oln.split("%")[0])
fp2 = ln[1:].strip() fp2 = ln[1:].strip()
fp2 = os.path.join(os.path.dirname(fp), fp2) fp2 = os.path.join(os.path.dirname(fp), fp2)
ofs = len(ret)
expand_config_file(ret, fp2, ipath) expand_config_file(ret, fp2, ipath)
for n in range(ofs, len(ret)):
ret[n] = pad + ret[n]
continue continue
ret.append(ln) ret.append(oln)
ret.append("#\033[36m closed{}\033[0m".format(ipath)) ret.append("#\033[36m closed{}\033[0m".format(ipath))
def upgrade_cfg_fmt(
log: Optional["NamedLogger"], args: argparse.Namespace, orig: list[str], cfg_fp: str
) -> list[str]:
"""convert from v1 to v2 format"""
zst = [x.split("#")[0].strip() for x in orig]
zst = [x for x in zst if x]
if (
"[global]" in zst
or "[accounts]" in zst
or "accs:" in zst
or "flags:" in zst
or [x for x in zst if x.startswith("[/")]
or len(zst) == len([x for x in zst if x.startswith("%")])
):
return orig
zst = [x for x in orig if "#\033[36m opening cfg file" not in x]
incl = len(zst) != len(orig) - 1
t = "upgrading config file [{}] from v1 to v2"
if not args.vc:
t += ". Run with argument '--vc' to see the converted config if you want to upgrade"
if incl:
t += ". Please don't include v1 configs from v2 files or vice versa! Upgrade all of them at the same time."
if log:
log(t.format(cfg_fp), 3)
ret = []
vp = ""
ap = ""
cat = ""
catg = "[global]"
cata = "[accounts]"
catx = " accs:"
catf = " flags:"
for ln in orig:
sn = ln.strip()
if not sn:
cat = vp = ap = ""
if not sn.split("#")[0]:
ret.append(ln)
elif sn.startswith("-") and cat in ("", catg):
if cat != catg:
cat = catg
ret.append(cat)
sn = sn.lstrip("-")
zst = sn.split(" ", 1)
if len(zst) > 1:
sn = "{}: {}".format(zst[0], zst[1].strip())
ret.append(" " + sn)
elif sn.startswith("u ") and cat in ("", catg, cata):
if cat != cata:
cat = cata
ret.append(cat)
s1, s2 = sn[1:].split(":", 1)
ret.append(" {}: {}".format(s1.strip(), s2.strip()))
elif not ap:
ap = sn
elif not vp:
vp = "/" + sn.strip("/")
cat = "[{}]".format(vp)
ret.append(cat)
ret.append(" " + ap)
elif sn.startswith("c "):
if cat != catf:
cat = catf
ret.append(cat)
sn = sn[1:].strip()
if "=" in sn:
zst = sn.split("=", 1)
sn = zst[0].replace(",", ", ")
sn += ": " + zst[1]
else:
sn = sn.replace(",", ", ")
ret.append(" " + sn)
elif sn[:1] in "rwmdgG":
if cat != catx:
cat = catx
ret.append(cat)
zst = sn.split(" ")
zst = [x for x in zst if x]
if len(zst) == 1:
zst.append("*")
ret.append(" {}: {}".format(zst[0], ", ".join(zst[1:])))
else:
t = "did not understand line {} in the config"
t1 = t
n = 0
for ln in orig:
n += 1
t += "\n{:4} {}".format(n, ln)
if log:
log(t, 1)
else:
print("\033[31m" + t)
raise Exception(t1)
if args.vc and log:
t = "new config syntax (copy/paste this to upgrade your config):\n"
t += "\n# ======================[ begin upgraded config ]======================\n\n"
for ln in ret:
t += ln + "\n"
t += "\n# ======================[ end of upgraded config ]======================\n"
log(t)
return ret

149
copyparty/cfg.py Normal file
View File

@@ -0,0 +1,149 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
# awk -F\" '/add_argument\("-[^-]/{print(substr($2,2))}' copyparty/__main__.py | sort | tr '\n' ' '
zs = "a c e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vp e2vu ed emp i j lo mcr mte mth mtm mtp nb nc nid nih nw p q s ss sss v z zv"
onedash = set(zs.split())
def vf_bmap() -> dict[str, str]:
"""argv-to-volflag: simple bools"""
ret = {
"never_symlink": "neversymlink",
"no_dedup": "copydupes",
"no_dupe": "nodupe",
"no_forget": "noforget",
}
for k in (
"dotsrch",
"e2t",
"e2ts",
"e2tsr",
"e2v",
"e2vu",
"e2vp",
"hardlink",
"magic",
"no_sb_md",
"no_sb_lg",
"rand",
"xdev",
"xlink",
"xvol",
):
ret[k] = k
return ret
def vf_vmap() -> dict[str, str]:
"""argv-to-volflag: simple values"""
ret = {}
for k in ("lg_sbf", "md_sbf"):
ret[k] = k
return ret
def vf_cmap() -> dict[str, str]:
"""argv-to-volflag: complex/lists"""
ret = {}
for k in ("dbd", "html_head", "mte", "mth", "nrand"):
ret[k] = k
return ret
permdescs = {
"r": "read; list folder contents, download files",
"w": 'write; upload files; need "r" to see the uploads',
"m": 'move; move files and folders; need "w" at destination',
"d": "delete; permanently delete files and folders",
"g": "get; download files, but cannot see folder contents",
"G": 'upget; same as "g" but can see filekeys of their own uploads',
}
flagcats = {
"uploads, general": {
"nodupe": "rejects existing files (instead of symlinking them)",
"hardlink": "does dedup with hardlinks instead of symlinks",
"neversymlink": "disables symlink fallback; full copy instead",
"copydupes": "disables dedup, always saves full copies of dupes",
"daw": "enable full WebDAV write support (dangerous);\nPUT-operations will now \033[1;31mOVERWRITE\033[0;35m existing files",
"nosub": "forces all uploads into the top folder of the vfs",
"magic": "enables filetype detection for nameless uploads",
"gz": "allows server-side gzip of uploads with ?gz (also c,xz)",
"pk": "forces server-side compression, optional arg: xz,9",
},
"upload rules": {
"maxn=250,600": "max 250 uploads over 15min",
"maxb=1g,300": "max 1 GiB over 5min (suffixes: b, k, m, g)",
"rand": "force randomized filenames, 9 chars long by default",
"nrand=N": "randomized filenames are N chars long",
"sz=1k-3m": "allow filesizes between 1 KiB and 3MiB",
"df=1g": "ensure 1 GiB free disk space",
},
"upload rotation\n(moves all uploads into the specified folder structure)": {
"rotn=100,3": "3 levels of subfolders with 100 entries in each",
"rotf=%Y-%m/%d-%H": "date-formatted organizing",
"lifetime=3600": "uploads are deleted after 1 hour",
},
"database, general": {
"e2d": "enable database; makes files searchable + enables upload dedup",
"e2ds": "scan writable folders for new files on startup; also sets -e2d",
"e2dsa": "scans all folders for new files on startup; also sets -e2d",
"e2t": "enable multimedia indexing; makes it possible to search for tags",
"e2ts": "scan existing files for tags on startup; also sets -e2t",
"e2tsa": "delete all metadata from DB (full rescan); also sets -e2ts",
"d2ts": "disables metadata collection for existing files",
"d2ds": "disables onboot indexing, overrides -e2ds*",
"d2t": "disables metadata collection, overrides -e2t*",
"d2v": "disables file verification, overrides -e2v*",
"d2d": "disables all database stuff, overrides -e2*",
"hist=/tmp/cdb": "puts thumbnails and indexes at that location",
"scan=60": "scan for new files every 60sec, same as --re-maxage",
"nohash=\\.iso$": "skips hashing file contents if path matches *.iso",
"noidx=\\.iso$": "fully ignores the contents at paths matching *.iso",
"noforget": "don't forget files when deleted from disk",
"dbd=[acid|swal|wal|yolo]": "database speed-durability tradeoff",
"xlink": "cross-volume dupe detection / linking",
"xdev": "do not descend into other filesystems",
"xvol": "skip symlinks leaving the volume root",
"dotsrch": "show dotfiles in search results",
"nodotsrch": "hide dotfiles in search results (default)",
},
'database, audio tags\n"mte", "mth", "mtp", "mtm" all work the same as -mte, -mth, ...': {
"mtp=.bpm=f,audio-bpm.py": 'uses the "audio-bpm.py" program to\ngenerate ".bpm" tags from uploads (f = overwrite tags)',
"mtp=ahash,vhash=media-hash.py": "collects two tags at once",
},
"thumbnails": {
"dthumb": "disables all thumbnails",
"dvthumb": "disables video thumbnails",
"dathumb": "disables audio thumbnails (spectrograms)",
"dithumb": "disables image thumbnails",
},
"event hooks\n(better explained in --help-hooks)": {
"xbu=CMD": "execute CMD before a file upload starts",
"xau=CMD": "execute CMD after a file upload finishes",
"xbr=CMD": "execute CMD before a file rename/move",
"xar=CMD": "execute CMD after a file rename/move",
"xbd=CMD": "execute CMD before a file delete",
"xad=CMD": "execute CMD after a file delete",
"xm=CMD": "execute CMD on message",
},
"client and ux": {
"html_head=TXT": "includes TXT in the <head>",
"robots": "allows indexing by search engines (default)",
"norobots": "kindly asks search engines to leave",
"no_sb_md": "disable js sandbox for markdown files",
"no_sb_lg": "disable js sandbox for prologue/epilogue",
"sb_md": "enable js sandbox for markdown files (default)",
"sb_lg": "enable js sandbox for prologue/epilogue (default)",
"md_sbf": "list of markdown-sandbox safeguards to disable",
"lg_sbf": "list of *logue-sandbox safeguards to disable",
},
"others": {
"fk=8": 'generates per-file accesskeys,\nwhich will then be required at the "g" permission'
},
}
flagdescs = {k.split("=")[0]: v for tab in flagcats.values() for k, v in tab.items()}

View File

@@ -13,9 +13,19 @@ from pyftpdlib.filesystems import AbstractedFS, FilesystemError
from pyftpdlib.handlers import FTPHandler from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer from pyftpdlib.servers import FTPServer
from .__init__ import PY2, TYPE_CHECKING, E from .__init__ import ANYWIN, PY2, TYPE_CHECKING, E
from .bos import bos from .bos import bos
from .util import Daemon, Pebkac, exclude_dotfiles, fsenc, ipnorm from .util import (
Daemon,
Pebkac,
exclude_dotfiles,
fsenc,
ipnorm,
pybin,
relchk,
sanitize_fn,
vjoin,
)
try: try:
from pyftpdlib.ioloop import IOLoop from pyftpdlib.ioloop import IOLoop
@@ -125,6 +135,13 @@ class FtpFs(AbstractedFS):
) -> str: ) -> str:
try: try:
vpath = vpath.replace("\\", "/").lstrip("/") vpath = vpath.replace("\\", "/").lstrip("/")
rd, fn = os.path.split(vpath)
if ANYWIN and relchk(rd):
logging.warning("malicious vpath: %s", vpath)
raise FilesystemError("unsupported characters in filepath")
fn = sanitize_fn(fn or "", "", [".prologue.html", ".epilogue.html"])
vpath = vjoin(rd, fn)
vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, r, w, m, d) vfs, rem = self.hub.asrv.vfs.get(vpath, self.uname, r, w, m, d)
if not vfs.realpath: if not vfs.realpath:
raise FilesystemError("no filesystem mounted at this path") raise FilesystemError("no filesystem mounted at this path")
@@ -402,7 +419,7 @@ class Ftpd(object):
h1 = SftpHandler h1 = SftpHandler
except: except:
t = "\nftps requires pyopenssl;\nplease run the following:\n\n {} -m pip install --user pyopenssl\n" t = "\nftps requires pyopenssl;\nplease run the following:\n\n {} -m pip install --user pyopenssl\n"
print(t.format(sys.executable)) print(t.format(pybin))
sys.exit(1) sys.exit(1)
h1.certfile = os.path.join(self.args.E.cfg, "cert.pem") h1.certfile = os.path.join(self.args.E.cfg, "cert.pem")
@@ -435,10 +452,18 @@ class Ftpd(object):
lgr = logging.getLogger("pyftpdlib") lgr = logging.getLogger("pyftpdlib")
lgr.setLevel(logging.DEBUG if self.args.ftpv else logging.INFO) lgr.setLevel(logging.DEBUG if self.args.ftpv else logging.INFO)
ips = self.args.i
if "::" in ips:
ips.append("0.0.0.0")
ioloop = IOLoop() ioloop = IOLoop()
for ip in self.args.i: for ip in ips:
for h, lp in hs: for h, lp in hs:
FTPServer((ip, int(lp)), h, ioloop) try:
FTPServer((ip, int(lp)), h, ioloop)
except:
if ip != "0.0.0.0" or "::" not in ips:
raise
Daemon(ioloop.loop, "ftp") Daemon(ioloop.loop, "ftp")

View File

@@ -28,6 +28,7 @@ except:
pass pass
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, EnvParams, unicode from .__init__ import ANYWIN, PY2, TYPE_CHECKING, EnvParams, unicode
from .__version__ import S_VERSION
from .authsrv import VFS # typechk from .authsrv import VFS # typechk
from .bos import bos from .bos import bos
from .star import StreamTar from .star import StreamTar
@@ -51,19 +52,21 @@ from .util import (
guess_mime, guess_mime,
gzip_orig_sz, gzip_orig_sz,
hashcopy, hashcopy,
hidedir,
html_bescape, html_bescape,
html_escape, html_escape,
humansize, humansize,
ipnorm, ipnorm,
min_ex, min_ex,
quotep, quotep,
rand_name,
read_header, read_header,
read_socket, read_socket,
read_socket_chunked, read_socket_chunked,
read_socket_unbounded, read_socket_unbounded,
relchk, relchk,
ren_open, ren_open,
hidedir, runhook,
s3enc, s3enc,
sanitize_fn, sanitize_fn,
sendfile_kern, sendfile_kern,
@@ -125,9 +128,9 @@ class HttpCli(object):
self.mode = " " self.mode = " "
self.req = " " self.req = " "
self.http_ver = " " self.http_ver = " "
self.host = " "
self.ua = " " self.ua = " "
self.is_rclone = False self.is_rclone = False
self.is_ancient = False
self.ouparam: dict[str, str] = {} self.ouparam: dict[str, str] = {}
self.uparam: dict[str, str] = {} self.uparam: dict[str, str] = {}
self.cookies: dict[str, str] = {} self.cookies: dict[str, str] = {}
@@ -156,8 +159,8 @@ class HttpCli(object):
self.trailing_slash = True self.trailing_slash = True
self.out_headerlist: list[tuple[str, str]] = [] self.out_headerlist: list[tuple[str, str]] = []
self.out_headers = { self.out_headers = {
"Access-Control-Allow-Origin": "*", "Vary": "Origin, PW, Cookie",
"Cache-Control": "no-store; max-age=0", "Cache-Control": "no-store, max-age=0",
} }
h = self.args.html_head h = self.args.html_head
if self.args.no_robots: if self.args.no_robots:
@@ -252,7 +255,6 @@ class HttpCli(object):
self.ua = self.headers.get("user-agent", "") self.ua = self.headers.get("user-agent", "")
self.is_rclone = self.ua.startswith("rclone/") self.is_rclone = self.ua.startswith("rclone/")
self.is_ancient = self.ua.startswith("Mozilla/4.")
zs = self.headers.get("connection", "").lower() zs = self.headers.get("connection", "").lower()
self.keepalive = "close" not in zs and ( self.keepalive = "close" not in zs and (
@@ -261,6 +263,9 @@ class HttpCli(object):
self.is_https = ( self.is_https = (
self.headers.get("x-forwarded-proto", "").lower() == "https" or self.tls self.headers.get("x-forwarded-proto", "").lower() == "https" or self.tls
) )
self.host = self.headers.get("host") or "{}:{}".format(
*list(self.s.getsockname()[:2])
)
n = self.args.rproxy n = self.args.rproxy
if n: if n:
@@ -279,6 +284,7 @@ class HttpCli(object):
self.log_src = self.conn.set_rproxy(self.ip) self.log_src = self.conn.set_rproxy(self.ip)
self.is_vproxied = bool(self.args.R) self.is_vproxied = bool(self.args.R)
self.host = self.headers.get("x-forwarded-host") or self.host
if self.is_banned(): if self.is_banned():
return False return False
@@ -294,7 +300,10 @@ class HttpCli(object):
else: else:
self.keepalive = False self.keepalive = False
if self.args.ihead: ptn: Optional[Pattern[str]] = self.conn.lf_url # mypy404
self.do_log = not ptn or not ptn.search(self.req)
if self.args.ihead and self.do_log:
keys = self.args.ihead keys = self.args.ihead
if "*" in keys: if "*" in keys:
keys = list(sorted(self.headers.keys())) keys = list(sorted(self.headers.keys()))
@@ -339,11 +348,12 @@ class HttpCli(object):
if zso: if zso:
zsll = [x.split("=", 1) for x in zso.split(";") if "=" in x] zsll = [x.split("=", 1) for x in zso.split(";") if "=" in x]
cookies = {k.strip(): unescape_cookie(zs) for k, zs in zsll} cookies = {k.strip(): unescape_cookie(zs) for k, zs in zsll}
for kc, ku in [["cppwd", "pw"], ["b", "b"]]: cookie_pw = cookies.get("cppws") or cookies.get("cppwd") or ""
if kc in cookies and ku not in uparam: if "b" in cookies and "b" not in uparam:
uparam[ku] = cookies[kc] uparam["b"] = cookies["b"]
else: else:
cookies = {} cookies = {}
cookie_pw = ""
if len(uparam) > 10 or len(cookies) > 50: if len(uparam) > 10 or len(cookies) > 50:
raise Pebkac(400, "u wot m8") raise Pebkac(400, "u wot m8")
@@ -356,25 +366,24 @@ class HttpCli(object):
if ANYWIN: if ANYWIN:
ok = ok and not relchk(self.vpath) ok = ok and not relchk(self.vpath)
if not ok: if not ok and (self.vpath != "*" or self.mode != "OPTIONS"):
self.log("invalid relpath [{}]".format(self.vpath)) self.log("invalid relpath [{}]".format(self.vpath))
return self.tx_404() and self.keepalive return self.tx_404() and self.keepalive
pwd = ""
zso = self.headers.get("authorization") zso = self.headers.get("authorization")
bauth = ""
if zso: if zso:
try: try:
zb = zso.split(" ")[1].encode("ascii") zb = zso.split(" ")[1].encode("ascii")
zs = base64.b64decode(zb).decode("utf-8") zs = base64.b64decode(zb).decode("utf-8")
# try "pwd", "x:pwd", "pwd:x" # try "pwd", "x:pwd", "pwd:x"
for zs in [zs] + zs.split(":", 1)[::-1]: for bauth in [zs] + zs.split(":", 1)[::-1]:
if self.asrv.iacct.get(zs): if self.asrv.iacct.get(bauth):
pwd = zs
break break
except: except:
pass pass
self.pw = uparam.get("pw") or pwd self.pw = uparam.get("pw") or self.headers.get("pw") or bauth or cookie_pw
self.uname = self.asrv.iacct.get(self.pw) or "*" self.uname = self.asrv.iacct.get(self.pw) or "*"
self.rvol = self.asrv.vfs.aread[self.uname] self.rvol = self.asrv.vfs.aread[self.uname]
self.wvol = self.asrv.vfs.awrite[self.uname] self.wvol = self.asrv.vfs.awrite[self.uname]
@@ -383,17 +392,17 @@ class HttpCli(object):
self.gvol = self.asrv.vfs.aget[self.uname] self.gvol = self.asrv.vfs.aget[self.uname]
self.upvol = self.asrv.vfs.apget[self.uname] self.upvol = self.asrv.vfs.apget[self.uname]
if self.pw: if self.pw and (
self.out_headerlist.append(("Set-Cookie", self.get_pwd_cookie(self.pw)[0])) self.pw != cookie_pw or self.conn.freshen_pwd + 30 < time.time()
):
self.conn.freshen_pwd = time.time()
self.get_pwd_cookie(self.pw)
if self.is_rclone: if self.is_rclone:
uparam["dots"] = "" uparam["dots"] = ""
uparam["b"] = "" uparam["b"] = ""
cookies["b"] = "" cookies["b"] = ""
ptn: Optional[Pattern[str]] = self.conn.lf_url # mypy404
self.do_log = not ptn or not ptn.search(self.req)
( (
self.can_read, self.can_read,
self.can_write, self.can_write,
@@ -404,15 +413,22 @@ class HttpCli(object):
) = self.asrv.vfs.can_access(self.vpath, self.uname) ) = self.asrv.vfs.can_access(self.vpath, self.uname)
try: try:
# getattr(self.mode) is not yet faster than this cors_k = self._cors()
if self.mode in ["GET", "HEAD"]: if self.mode in ("GET", "HEAD"):
return self.handle_get() and self.keepalive return self.handle_get() and self.keepalive
elif self.mode == "POST": if self.mode == "OPTIONS":
return self.handle_options() and self.keepalive
if not cors_k:
origin = self.headers.get("origin", "<?>")
self.log("cors-reject {} from {}".format(self.mode, origin), 3)
raise Pebkac(403, "no surfing")
# getattr(self.mode) is not yet faster than this
if self.mode == "POST":
return self.handle_post() and self.keepalive return self.handle_post() and self.keepalive
elif self.mode == "PUT": elif self.mode == "PUT":
return self.handle_put() and self.keepalive return self.handle_put() and self.keepalive
elif self.mode == "OPTIONS":
return self.handle_options() and self.keepalive
elif self.mode == "PROPFIND": elif self.mode == "PROPFIND":
return self.handle_propfind() and self.keepalive return self.handle_propfind() and self.keepalive
elif self.mode == "DELETE": elif self.mode == "DELETE":
@@ -591,12 +607,12 @@ class HttpCli(object):
if self.is_rclone: if self.is_rclone:
return "" return ""
cmap = {"pw": "cppwd"} kv = {k: zs for k, zs in self.uparam.items() if k not in rm}
kv = { if "pw" in kv:
k: zs pw = self.cookies.get("cppws") or self.cookies.get("cppwd")
for k, zs in self.uparam.items() if kv["pw"] == pw:
if k not in rm and self.cookies.get(cmap.get(k, k)) != zs del kv["pw"]
}
kv.update(add) kv.update(add)
if not kv: if not kv:
return "" return ""
@@ -631,6 +647,63 @@ class HttpCli(object):
return True return True
def _cors(self) -> bool:
ih = self.headers
origin = ih.get("origin")
if not origin:
sfsite = ih.get("sec-fetch-site")
if sfsite and sfsite.lower().startswith("cross"):
origin = ":|" # sandboxed iframe
else:
return True
oh = self.out_headers
origin = origin.lower()
good_origins = self.args.acao + [
"{}://{}".format(
"https" if self.is_https else "http",
self.host.lower().split(":")[0],
)
]
if re.sub(r"(:[0-9]{1,5})?/?$", "", origin) in good_origins:
good_origin = True
bad_hdrs = ("",)
else:
good_origin = False
bad_hdrs = ("", "pw")
# '*' blocks all credentials (cookies, http-auth);
# exact-match for Origin is necessary to unlock those,
# however yolo-requests (?pw=) are always allowed
acah = ih.get("access-control-request-headers", "")
acao = (origin if good_origin else None) or (
"*" if "*" in good_origins else None
)
if self.args.allow_csrf:
acao = origin or acao or "*" # explicitly permit impersonation
acam = ", ".join(self.conn.hsrv.mallow) # and all methods + headers
oh["Access-Control-Allow-Credentials"] = "true"
good_origin = True
else:
acam = ", ".join(self.args.acam)
# wash client-requested headers and roll with that
if "range" not in acah.lower():
acah += ",Range" # firefox
req_h = acah.split(",")
req_h = [x.strip() for x in req_h]
req_h = [x for x in req_h if x.lower() not in bad_hdrs]
acah = ", ".join(req_h)
if not acao:
return False
oh["Access-Control-Allow-Origin"] = acao
oh["Access-Control-Allow-Methods"] = acam.upper()
if acah:
oh["Access-Control-Allow-Headers"] = acah
return good_origin
def handle_get(self) -> bool: def handle_get(self) -> bool:
if self.do_log: if self.do_log:
logmsg = "{:4} {}".format(self.mode, self.req) logmsg = "{:4} {}".format(self.mode, self.req)
@@ -679,15 +752,16 @@ class HttpCli(object):
if "tree" in self.uparam: if "tree" in self.uparam:
return self.tx_tree() return self.tx_tree()
if "delete" in self.uparam:
return self.handle_rm([])
if "move" in self.uparam:
return self.handle_mv()
if "scan" in self.uparam: if "scan" in self.uparam:
return self.scanvol() return self.scanvol()
if self.args.getmod:
if "delete" in self.uparam:
return self.handle_rm([])
if "move" in self.uparam:
return self.handle_mv()
if not self.vpath: if not self.vpath:
if "reload" in self.uparam: if "reload" in self.uparam:
return self.handle_reload() return self.handle_reload()
@@ -835,7 +909,7 @@ class HttpCli(object):
raise Pebkac(404) raise Pebkac(404)
fgen = itertools.chain([topdir], fgen) # type: ignore fgen = itertools.chain([topdir], fgen) # type: ignore
vtop = vjoin(vn.vpath, rem) vtop = vjoin(self.args.R, vjoin(vn.vpath, rem))
chunksz = 0x7FF8 # preferred by nginx or cf (dunno which) chunksz = 0x7FF8 # preferred by nginx or cf (dunno which)
@@ -935,7 +1009,7 @@ class HttpCli(object):
el = xroot.find(r"./{DAV:}response") el = xroot.find(r"./{DAV:}response")
assert el assert el
e2 = mktnod("D:href", quotep("/" + self.vpath)) e2 = mktnod("D:href", quotep(self.args.SRS + self.vpath))
el.insert(0, e2) el.insert(0, e2)
el = xroot.find(r"./{DAV:}response/{DAV:}propstat") el = xroot.find(r"./{DAV:}response/{DAV:}propstat")
@@ -985,12 +1059,17 @@ class HttpCli(object):
lk = parse_xml(txt) lk = parse_xml(txt)
assert lk.tag == "{DAV:}lockinfo" assert lk.tag == "{DAV:}lockinfo"
if not lk.find(r"./{DAV:}depth"): token = str(uuid.uuid4())
lk.append(mktnod("D:depth", "infinity"))
lk.append(mkenod("D:timeout", mktnod("D:href", "Second-3310"))) if not lk.find(r"./{DAV:}depth"):
lk.append(mkenod("D:locktoken", mktnod("D:href", uuid.uuid4().urn))) depth = self.headers.get("depth", "infinity")
lk.append(mkenod("D:lockroot", mktnod("D:href", "/" + quotep(self.vpath)))) lk.append(mktnod("D:depth", depth))
lk.append(mktnod("D:timeout", "Second-3310"))
lk.append(mkenod("D:locktoken", mktnod("D:href", token)))
lk.append(
mkenod("D:lockroot", mktnod("D:href", quotep(self.args.SRS + self.vpath)))
)
lk2 = mkenod("D:activelock") lk2 = mkenod("D:activelock")
xroot = mkenod("D:prop", mkenod("D:lockdiscovery", lk2)) xroot = mkenod("D:prop", mkenod("D:lockdiscovery", lk2))
@@ -1000,11 +1079,13 @@ class HttpCli(object):
ret = '<?xml version="1.0" encoding="{}"?>\n'.format(uenc) ret = '<?xml version="1.0" encoding="{}"?>\n'.format(uenc)
ret += ET.tostring(xroot).decode("utf-8") ret += ET.tostring(xroot).decode("utf-8")
rc = 200
if self.can_write and not bos.path.isfile(abspath): if self.can_write and not bos.path.isfile(abspath):
with open(fsenc(abspath), "wb") as _: with open(fsenc(abspath), "wb") as _:
pass rc = 201
self.reply(ret.encode(enc, "replace"), 200, "text/xml; charset=" + enc) self.out_headers["Lock-Token"] = "<{}>".format(token)
self.reply(ret.encode(enc, "replace"), rc, "text/xml; charset=" + enc)
return True return True
def handle_unlock(self) -> bool: def handle_unlock(self) -> bool:
@@ -1082,26 +1163,16 @@ class HttpCli(object):
if self.do_log: if self.do_log:
self.log("OPTIONS " + self.req) self.log("OPTIONS " + self.req)
ret = { oh = self.out_headers
"Allow": "GET, HEAD, POST, PUT, OPTIONS", oh["Allow"] = ", ".join(self.conn.hsrv.mallow)
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "*",
"Access-Control-Allow-Headers": "*",
}
wd = {
"Dav": "1, 2",
"Ms-Author-Via": "DAV",
}
if not self.args.no_dav: if not self.args.no_dav:
# PROPPATCH, LOCK, UNLOCK, COPY: noop (spec-must) # PROPPATCH, LOCK, UNLOCK, COPY: noop (spec-must)
zs = ", PROPFIND, PROPPATCH, LOCK, UNLOCK, MKCOL, COPY, MOVE, DELETE" oh["Dav"] = "1, 2"
ret["Allow"] += zs oh["Ms-Author-Via"] = "DAV"
ret.update(wd)
# winxp-webdav doesnt know what 204 is # winxp-webdav doesnt know what 204 is
self.send_headers(0, 200, headers=ret) self.send_headers(0, 200)
return True return True
def handle_delete(self) -> bool: def handle_delete(self) -> bool:
@@ -1141,8 +1212,6 @@ class HttpCli(object):
return self.handle_stash(False) return self.handle_stash(False)
ctype = self.headers.get("content-type", "").lower() ctype = self.headers.get("content-type", "").lower()
if not ctype:
raise Pebkac(400, "you can't post without a content-type header")
if "multipart/form-data" in ctype: if "multipart/form-data" in ctype:
return self.handle_post_multipart() return self.handle_post_multipart()
@@ -1154,6 +1223,12 @@ class HttpCli(object):
): ):
return self.handle_post_json() return self.handle_post_json()
if "move" in self.uparam:
return self.handle_mv()
if "delete" in self.uparam:
return self.handle_rm([])
if "application/octet-stream" in ctype: if "application/octet-stream" in ctype:
return self.handle_post_binary() return self.handle_post_binary()
@@ -1182,9 +1257,27 @@ class HttpCli(object):
plain = zb.decode("utf-8", "replace") plain = zb.decode("utf-8", "replace")
if buf.startswith(b"msg="): if buf.startswith(b"msg="):
plain = plain[4:] plain = plain[4:]
vfs, rem = self.asrv.vfs.get(
self.vpath, self.uname, False, False
)
xm = vfs.flags.get("xm")
if xm:
runhook(
self.log,
xm,
vfs.canonical(rem),
self.vpath,
self.host,
self.uname,
self.ip,
time.time(),
len(xm),
plain,
)
t = "urlform_dec {} @ {}\n {}\n" t = "urlform_dec {} @ {}\n {}\n"
self.log(t.format(len(plain), self.vpath, plain)) self.log(t.format(len(plain), self.vpath, plain))
except Exception as ex: except Exception as ex:
self.log(repr(ex)) self.log(repr(ex))
@@ -1225,7 +1318,7 @@ class HttpCli(object):
# post_sz, sha_hex, sha_b64, remains, path, url # post_sz, sha_hex, sha_b64, remains, path, url
reader, remains = self.get_body_reader() reader, remains = self.get_body_reader()
vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True) vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True)
rnd, _, lifetime = self.upload_flags(vfs) rnd, _, lifetime, xbu, xau = self.upload_flags(vfs)
lim = vfs.get_dbv(rem)[0].lim lim = vfs.get_dbv(rem)[0].lim
fdir = vfs.canonical(rem) fdir = vfs.canonical(rem)
if lim: if lim:
@@ -1302,8 +1395,31 @@ class HttpCli(object):
params.update(open_ka) params.update(open_ka)
assert fn assert fn
if rnd and not self.args.nw: if not self.args.nw:
fn = self.rand_name(fdir, fn, rnd) if rnd:
fn = rand_name(fdir, fn, rnd)
fn = sanitize_fn(fn or "", "", [".prologue.html", ".epilogue.html"])
path = os.path.join(fdir, fn)
if xbu:
at = time.time() - lifetime
if not runhook(
self.log,
xbu,
path,
self.vpath,
self.host,
self.uname,
self.ip,
at,
remains,
"",
):
t = "upload denied by xbu"
self.log(t, 1)
raise Pebkac(403, t)
if is_put and not self.args.no_dav: if is_put and not self.args.no_dav:
# allow overwrite if... # allow overwrite if...
@@ -1313,7 +1429,6 @@ class HttpCli(object):
# * file exists and is empty # * file exists and is empty
# * and there is no .PARTIAL # * and there is no .PARTIAL
path = os.path.join(fdir, fn)
tnam = fn + ".PARTIAL" tnam = fn + ".PARTIAL"
if self.args.dotpart: if self.args.dotpart:
tnam = "." + tnam tnam = "." + tnam
@@ -1323,7 +1438,8 @@ class HttpCli(object):
and bos.path.exists(path) and bos.path.exists(path)
and not bos.path.getsize(path) and not bos.path.getsize(path)
): ):
params["overwrite"] = "a" # small toctou, but better than clobbering a hardlink
bos.unlink(path)
with ren_open(fn, *open_a, **params) as zfw: with ren_open(fn, *open_a, **params) as zfw:
f, fn = zfw["orz"] f, fn = zfw["orz"]
@@ -1351,7 +1467,7 @@ class HttpCli(object):
if ext: if ext:
if rnd: if rnd:
fn2 = self.rand_name(fdir, "a." + ext, rnd) fn2 = rand_name(fdir, "a." + ext, rnd)
else: else:
fn2 = fn.rsplit(".", 1)[0] + "." + ext fn2 = fn.rsplit(".", 1)[0] + "." + ext
@@ -1364,6 +1480,24 @@ class HttpCli(object):
fn = fn2 fn = fn2
path = path2 path = path2
at = time.time() - lifetime
if xau and not runhook(
self.log,
xau,
path,
self.vpath,
self.host,
self.uname,
self.ip,
at,
post_sz,
"",
):
t = "upload denied by xau"
self.log(t, 1)
os.unlink(path)
raise Pebkac(403, t)
vfs, rem = vfs.get_dbv(rem) vfs, rem = vfs.get_dbv(rem)
self.conn.hsrv.broker.say( self.conn.hsrv.broker.say(
"up2k.hash_file", "up2k.hash_file",
@@ -1372,7 +1506,7 @@ class HttpCli(object):
rem, rem,
fn, fn,
self.ip, self.ip,
time.time() - lifetime, at,
) )
vsuf = "" vsuf = ""
@@ -1389,7 +1523,7 @@ class HttpCli(object):
url = "{}://{}/{}".format( url = "{}://{}/{}".format(
"https" if self.is_https else "http", "https" if self.is_https else "http",
self.headers.get("host") or "{}:{}".format(*list(self.s.getsockname()[:2])), self.host,
self.args.RS + vpath + vsuf, self.args.RS + vpath + vsuf,
) )
@@ -1445,27 +1579,6 @@ class HttpCli(object):
else: else:
self.log("bakflip ok", 2) self.log("bakflip ok", 2)
def rand_name(self, fdir: str, fn: str, rnd: int) -> str:
ok = False
try:
ext = "." + fn.rsplit(".", 1)[1]
except:
ext = ""
for extra in range(16):
for _ in range(16):
if ok:
break
nc = rnd + extra
nb = int((6 + 6 * nc) / 8)
zb = os.urandom(nb)
zb = base64.urlsafe_b64encode(zb)
fn = zb[:nc].decode("utf-8") + ext
ok = not bos.path.exists(os.path.join(fdir, fn))
return fn
def _spd(self, nbytes: int, add: bool = True) -> str: def _spd(self, nbytes: int, add: bool = True) -> str:
if add: if add:
self.conn.nbyte += nbytes self.conn.nbyte += nbytes
@@ -1565,6 +1678,8 @@ class HttpCli(object):
body["vtop"] = dbv.vpath body["vtop"] = dbv.vpath
body["ptop"] = dbv.realpath body["ptop"] = dbv.realpath
body["prel"] = vrem body["prel"] = vrem
body["host"] = self.host
body["user"] = self.uname
body["addr"] = self.ip body["addr"] = self.ip
body["vcfg"] = dbv.flags body["vcfg"] = dbv.flags
@@ -1784,21 +1899,19 @@ class HttpCli(object):
self.parser.drop() self.parser.drop()
self.out_headerlist = [ self.out_headerlist = [
x x for x in self.out_headerlist if x[0] != "Set-Cookie" or "cppw" != x[1][:4]
for x in self.out_headerlist
if x[0] != "Set-Cookie" or "cppwd=" not in x[1]
] ]
dst = "/" dst = self.args.SRS
if self.vpath: if self.vpath:
dst += quotep(self.vpath) dst += quotep(self.vpath)
ck, msg = self.get_pwd_cookie(pwd) msg = self.get_pwd_cookie(pwd)
html = self.j2s("msg", h1=msg, h2='<a href="' + dst + '">ack</a>', redir=dst) html = self.j2s("msg", h1=msg, h2='<a href="' + dst + '">ack</a>', redir=dst)
self.reply(html.encode("utf-8"), headers={"Set-Cookie": ck}) self.reply(html.encode("utf-8"))
return True return True
def get_pwd_cookie(self, pwd: str) -> tuple[str, str]: def get_pwd_cookie(self, pwd: str) -> str:
if pwd in self.asrv.iacct: if pwd in self.asrv.iacct:
msg = "login ok" msg = "login ok"
dur = int(60 * 60 * self.args.logout) dur = int(60 * 60 * self.args.logout)
@@ -1815,11 +1928,18 @@ class HttpCli(object):
pwd = "x" # nosec pwd = "x" # nosec
dur = None dur = None
r = gencookie("cppwd", pwd, dur) if pwd == "x":
if self.is_ancient: # reset both plaintext and tls
r = r.rsplit(" ", 1)[0] # (only affects active tls cookies when tls)
for k in ("cppwd", "cppws") if self.is_https else ("cppwd",):
ck = gencookie(k, pwd, self.args.R, False, dur)
self.out_headerlist.append(("Set-Cookie", ck))
else:
k = "cppws" if self.is_https else "cppwd"
ck = gencookie(k, pwd, self.args.R, self.is_https, dur)
self.out_headerlist.append(("Set-Cookie", ck))
return r, msg return msg
def handle_mkdir(self) -> bool: def handle_mkdir(self) -> bool:
assert self.parser assert self.parser
@@ -1886,9 +2006,14 @@ class HttpCli(object):
self.redirect(vpath, "?edit") self.redirect(vpath, "?edit")
return True return True
def upload_flags(self, vfs: VFS) -> tuple[int, bool, int]: def upload_flags(self, vfs: VFS) -> tuple[int, bool, int, list[str], list[str]]:
srnd = self.uparam.get("rand", self.headers.get("rand", "")) if self.args.nw:
rnd = int(srnd) if srnd and not self.args.nw else 0 rnd = 0
else:
rnd = int(self.uparam.get("rand") or self.headers.get("rand") or 0)
if vfs.flags.get("rand"): # force-enable
rnd = max(rnd, vfs.flags["nrand"])
ac = self.uparam.get( ac = self.uparam.get(
"want", self.headers.get("accept", "").lower().split(";")[-1] "want", self.headers.get("accept", "").lower().split(";")[-1]
) )
@@ -1900,7 +2025,13 @@ class HttpCli(object):
else: else:
lifetime = 0 lifetime = 0
return rnd, want_url, lifetime return (
rnd,
want_url,
lifetime,
vfs.flags.get("xbu") or [],
vfs.flags.get("xau") or [],
)
def handle_plain_upload(self) -> bool: def handle_plain_upload(self) -> bool:
assert self.parser assert self.parser
@@ -1917,7 +2048,7 @@ class HttpCli(object):
if not nullwrite: if not nullwrite:
bos.makedirs(fdir_base) bos.makedirs(fdir_base)
rnd, want_url, lifetime = self.upload_flags(vfs) rnd, want_url, lifetime, xbu, xau = self.upload_flags(vfs)
files: list[tuple[int, str, str, str, str, str]] = [] files: list[tuple[int, str, str, str, str, str]] = []
# sz, sha_hex, sha_b64, p_file, fname, abspath # sz, sha_hex, sha_b64, p_file, fname, abspath
@@ -1937,7 +2068,7 @@ class HttpCli(object):
) )
if p_file and not nullwrite: if p_file and not nullwrite:
if rnd: if rnd:
fname = self.rand_name(fdir, fname, rnd) fname = rand_name(fdir, fname, rnd)
if not bos.path.isdir(fdir): if not bos.path.isdir(fdir):
raise Pebkac(404, "that folder does not exist") raise Pebkac(404, "that folder does not exist")
@@ -1959,6 +2090,24 @@ class HttpCli(object):
tnam = fname = os.devnull tnam = fname = os.devnull
fdir = abspath = "" fdir = abspath = ""
if xbu:
at = time.time() - lifetime
if not runhook(
self.log,
xbu,
abspath,
self.vpath,
self.host,
self.uname,
self.ip,
at,
0,
"",
):
t = "upload denied by xbu"
self.log(t, 1)
raise Pebkac(403, t)
if lim: if lim:
lim.chk_bup(self.ip) lim.chk_bup(self.ip)
lim.chk_nup(self.ip) lim.chk_nup(self.ip)
@@ -2001,6 +2150,24 @@ class HttpCli(object):
files.append( files.append(
(sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath) (sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath)
) )
at = time.time() - lifetime
if xau and not runhook(
self.log,
xau,
abspath,
self.vpath,
self.host,
self.uname,
self.ip,
at,
sz,
"",
):
t = "upload denied by xau"
self.log(t, 1)
os.unlink(abspath)
raise Pebkac(403, t)
dbv, vrem = vfs.get_dbv(rem) dbv, vrem = vfs.get_dbv(rem)
self.conn.hsrv.broker.say( self.conn.hsrv.broker.say(
"up2k.hash_file", "up2k.hash_file",
@@ -2009,7 +2176,7 @@ class HttpCli(object):
vrem, vrem,
fname, fname,
self.ip, self.ip,
time.time() - lifetime, at,
) )
self.conn.nbyte += sz self.conn.nbyte += sz
@@ -2069,8 +2236,7 @@ class HttpCli(object):
jpart = { jpart = {
"url": "{}://{}/{}".format( "url": "{}://{}/{}".format(
"https" if self.is_https else "http", "https" if self.is_https else "http",
self.headers.get("host") self.host,
or "{}:{}".format(*list(self.s.getsockname()[:2])),
rel_url, rel_url,
), ),
"sha512": sha_hex[:56], "sha512": sha_hex[:56],
@@ -2129,7 +2295,7 @@ class HttpCli(object):
raise Pebkac(400, "could not read lastmod from request") raise Pebkac(400, "could not read lastmod from request")
nullwrite = self.args.nw nullwrite = self.args.nw
vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True) vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, True, True)
self._assert_safe_rem(rem) self._assert_safe_rem(rem)
clen = int(self.headers.get("content-length", -1)) clen = int(self.headers.get("content-length", -1))
@@ -2211,6 +2377,9 @@ class HttpCli(object):
if p_field != "body": if p_field != "body":
raise Pebkac(400, "expected body, got {}".format(p_field)) raise Pebkac(400, "expected body, got {}".format(p_field))
if bos.path.exists(fp):
bos.unlink(fp)
with open(fsenc(fp), "wb", 512 * 1024) as f: with open(fsenc(fp), "wb", 512 * 1024) as f:
sz, sha512, _ = hashcopy(p_data, f, self.args.s_wr_slp) sz, sha512, _ = hashcopy(p_data, f, self.args.s_wr_slp)
@@ -2273,8 +2442,17 @@ class HttpCli(object):
if stat.S_ISDIR(st.st_mode): if stat.S_ISDIR(st.st_mode):
continue continue
if stat.S_ISBLK(st.st_mode):
fd = bos.open(fs_path, os.O_RDONLY)
try:
sz = os.lseek(fd, 0, os.SEEK_END)
finally:
os.close(fd)
else:
sz = st.st_size
file_ts = max(file_ts, int(st.st_mtime)) file_ts = max(file_ts, int(st.st_mtime))
editions[ext or "plain"] = (fs_path, st.st_size) editions[ext or "plain"] = (fs_path, sz)
except: except:
pass pass
if not self.vpath.startswith(".cpr/"): if not self.vpath.startswith(".cpr/"):
@@ -2451,7 +2629,7 @@ class HttpCli(object):
if fn: if fn:
fn = fn.rstrip("/").split("/")[-1] fn = fn.rstrip("/").split("/")[-1]
else: else:
fn = self.headers.get("host", "hey") fn = self.host.split(":")[0]
safe = (string.ascii_letters + string.digits).replace("%", "") safe = (string.ascii_letters + string.digits).replace("%", "")
afn = "".join([x if x in safe.replace('"', "") else "_" for x in fn]) afn = "".join([x if x in safe.replace('"', "") else "_" for x in fn])
@@ -2606,7 +2784,7 @@ class HttpCli(object):
def tx_svcs(self) -> bool: def tx_svcs(self) -> bool:
aname = re.sub("[^0-9a-zA-Z]+", "", self.args.name) or "a" aname = re.sub("[^0-9a-zA-Z]+", "", self.args.name) or "a"
ep = self.headers["host"] ep = self.host
host = ep.split(":")[0] host = ep.split(":")[0]
hport = ep[ep.find(":") :] if ":" in ep else "" hport = ep[ep.find(":") :] if ":" in ep else ""
rip = ( rip = (
@@ -2614,6 +2792,7 @@ class HttpCli(object):
if self.args.rclone_mdns or not self.args.zm if self.args.rclone_mdns or not self.args.zm
else self.conn.hsrv.nm.map(self.ip) or host else self.conn.hsrv.nm.map(self.ip) or host
) )
vp = (self.uparam["hc"] or "").lstrip("/")
html = self.j2s( html = self.j2s(
"svcs", "svcs",
args=self.args, args=self.args,
@@ -2621,7 +2800,8 @@ class HttpCli(object):
s="s" if self.is_https else "", s="s" if self.is_https else "",
rip=rip, rip=rip,
ep=ep, ep=ep,
vp=(self.uparam["hc"] or "").lstrip("/"), vp=vp,
rvp=vjoin(self.args.R, vp),
host=host, host=host,
hport=hport, hport=hport,
aname=aname, aname=aname,
@@ -2652,7 +2832,11 @@ class HttpCli(object):
"dbwt": None, "dbwt": None,
} }
if self.uparam.get("ls") in ["v", "t", "txt"]: fmt = self.uparam.get("ls", "")
if not fmt and self.ua.startswith("curl/"):
fmt = "v"
if fmt in ["v", "t", "txt"]:
if self.uname == "*": if self.uname == "*":
txt = "howdy stranger (you're not logged in)" txt = "howdy stranger (you're not logged in)"
else: else:
@@ -2692,26 +2876,28 @@ class HttpCli(object):
dbwt=vs["dbwt"], dbwt=vs["dbwt"],
url_suf=suf, url_suf=suf,
k304=self.k304(), k304=self.k304(),
ver=S_VERSION if self.args.ver else "",
) )
self.reply(html.encode("utf-8")) self.reply(html.encode("utf-8"))
return True return True
def set_k304(self) -> bool: def set_k304(self) -> bool:
ck = gencookie("k304", self.uparam["k304"], 60 * 60 * 24 * 299) ck = gencookie("k304", self.uparam["k304"], self.args.R, False, 86400 * 299)
self.out_headerlist.append(("Set-Cookie", ck)) self.out_headerlist.append(("Set-Cookie", ck))
self.redirect("", "?h#cc") self.redirect("", "?h#cc")
return True return True
def set_am_js(self) -> bool: def set_am_js(self) -> bool:
v = "n" if self.uparam["am_js"] == "n" else "y" v = "n" if self.uparam["am_js"] == "n" else "y"
ck = gencookie("js", v, 60 * 60 * 24 * 299) ck = gencookie("js", v, self.args.R, False, 86400 * 299)
self.out_headerlist.append(("Set-Cookie", ck)) self.out_headerlist.append(("Set-Cookie", ck))
self.reply(b"promoted\n") self.reply(b"promoted\n")
return True return True
def set_cfg_reset(self) -> bool: def set_cfg_reset(self) -> bool:
for k in ("k304", "js", "cppwd"): for k in ("k304", "js", "cppwd", "cppws"):
self.out_headerlist.append(("Set-Cookie", gencookie(k, "x", None))) cookie = gencookie(k, "x", self.args.R, False, None)
self.out_headerlist.append(("Set-Cookie", cookie))
self.redirect("", "?h#cc") self.redirect("", "?h#cc")
return True return True
@@ -2849,6 +3035,7 @@ class HttpCli(object):
raise Pebkac(500, "sqlite3 is not available on the server; cannot unpost") raise Pebkac(500, "sqlite3 is not available on the server; cannot unpost")
filt = self.uparam.get("filter") filt = self.uparam.get("filter")
filt = unquotep(filt or "")
lm = "ups [{}]".format(filt) lm = "ups [{}]".format(filt)
self.log(lm) self.log(lm)
@@ -2971,7 +3158,7 @@ class HttpCli(object):
biggest = 0 biggest = 0
if arg == "v": if arg == "v":
fmt = "\033[0;7;36m{{}} {{:>{}}}\033[0m {{}}" fmt = "\033[0;7;36m{{}}{{:>{}}}\033[0m {{}}"
nfmt = "{}" nfmt = "{}"
biggest = 0 biggest = 0
f2 = "".join( f2 = "".join(
@@ -2991,7 +3178,7 @@ class HttpCli(object):
a = x["dt"].replace("-", " ").replace(":", " ").split(" ") a = x["dt"].replace("-", " ").replace(":", " ").split(" ")
x["dt"] = f2.format(*list(a)) x["dt"] = f2.format(*list(a))
sz = humansize(x["sz"], True) sz = humansize(x["sz"], True)
x["sz"] = "\033[0;3{}m{:>5}".format(ctab.get(sz[-1:], 0), sz) x["sz"] = "\033[0;3{}m {:>5}".format(ctab.get(sz[-1:], 0), sz)
else: else:
fmt = "{{}} {{:{},}} {{}}" fmt = "{{}} {{:{},}} {{}}"
nfmt = "{:,}" nfmt = "{:,}"
@@ -3142,6 +3329,10 @@ class HttpCli(object):
is_ls = "ls" in self.uparam is_ls = "ls" in self.uparam
is_js = self.args.force_js or self.cookies.get("js") == "y" is_js = self.args.force_js or self.cookies.get("js") == "y"
if not is_ls and self.ua.startswith("curl/"):
self.uparam["ls"] = "v"
is_ls = True
tpl = "browser" tpl = "browser"
if "b" in self.uparam: if "b" in self.uparam:
tpl = "browser2" tpl = "browser2"
@@ -3159,11 +3350,12 @@ class HttpCli(object):
if not self.args.no_readme and not logues[1]: if not self.args.no_readme and not logues[1]:
for fn in ["README.md", "readme.md"]: for fn in ["README.md", "readme.md"]:
fn = os.path.join(abspath, fn) fn = os.path.join(abspath, fn)
if bos.path.exists(fn): if bos.path.isfile(fn):
with open(fsenc(fn), "rb") as f: with open(fsenc(fn), "rb") as f:
readme = f.read().decode("utf-8") readme = f.read().decode("utf-8")
break break
vf = vn.flags
ls_ret = { ls_ret = {
"dirs": [], "dirs": [],
"files": [], "files": [],
@@ -3173,6 +3365,7 @@ class HttpCli(object):
"idx": ("e2d" in vn.flags), "idx": ("e2d" in vn.flags),
"itag": ("e2t" in vn.flags), "itag": ("e2t" in vn.flags),
"lifetime": vn.flags.get("lifetime") or 0, "lifetime": vn.flags.get("lifetime") or 0,
"frand": bool(vn.flags.get("rand")),
"perms": perms, "perms": perms,
"logues": logues, "logues": logues,
"readme": readme, "readme": readme,
@@ -3185,6 +3378,7 @@ class HttpCli(object):
"acct": self.uname, "acct": self.uname,
"perms": json.dumps(perms), "perms": json.dumps(perms),
"lifetime": ls_ret["lifetime"], "lifetime": ls_ret["lifetime"],
"frand": bool(vn.flags.get("rand")),
"taglist": [], "taglist": [],
"def_hcols": [], "def_hcols": [],
"have_emp": self.args.emp, "have_emp": self.args.emp,
@@ -3196,6 +3390,8 @@ class HttpCli(object):
"have_zip": (not self.args.no_zip), "have_zip": (not self.args.no_zip),
"have_unpost": int(self.args.unpost), "have_unpost": int(self.args.unpost),
"have_b_u": (self.can_write and self.uparam.get("b") == "u"), "have_b_u": (self.can_write and self.uparam.get("b") == "u"),
"sb_md": "" if "no_sb_md" in vf else (vf.get("md_sbf") or "y"),
"sb_lg": "" if "no_sb_lg" in vf else (vf.get("lg_sbf") or "y"),
"url_suf": url_suf, "url_suf": url_suf,
"logues": logues, "logues": logues,
"readme": readme, "readme": readme,
@@ -3296,7 +3492,9 @@ class HttpCli(object):
if self.args.no_zip: if self.args.no_zip:
margin = "DIR" margin = "DIR"
else: else:
margin = '<a href="{}?zip">zip</a>'.format(quotep(href)) margin = '<a href="{}?zip" rel="nofollow">zip</a>'.format(
quotep(href)
)
elif fn in hist: elif fn in hist:
margin = '<a href="{}.hist/{}">#{}</a>'.format( margin = '<a href="{}.hist/{}">#{}</a>'.format(
base, html_escape(hist[fn][2], quot=True, crlf=True), hist[fn][0] base, html_escape(hist[fn][2], quot=True, crlf=True), hist[fn][0]

View File

@@ -65,6 +65,7 @@ class HttpConn(object):
self.ico: Ico = Ico(self.args) # mypy404 self.ico: Ico = Ico(self.args) # mypy404
self.t0: float = time.time() # mypy404 self.t0: float = time.time() # mypy404
self.freshen_pwd: float = 0.0
self.stopping = False self.stopping = False
self.nreq: int = -1 # mypy404 self.nreq: int = -1 # mypy404
self.nbyte: int = 0 # mypy404 self.nbyte: int = 0 # mypy404

View File

@@ -81,8 +81,7 @@ class HttpSrv(object):
self.bans: dict[str, int] = {} self.bans: dict[str, int] = {}
self.aclose: dict[str, int] = {} self.aclose: dict[str, int] = {}
self.ip = "" self.bound: set[tuple[str, int]] = set()
self.port = 0
self.name = "hsrv" + nsuf self.name = "hsrv" + nsuf
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.stopping = False self.stopping = False
@@ -110,6 +109,11 @@ class HttpSrv(object):
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz") zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
self.prism = os.path.exists(zs) self.prism = os.path.exists(zs)
self.mallow = "GET HEAD POST PUT DELETE OPTIONS".split()
if not self.args.no_dav:
zs = "PROPFIND PROPPATCH LOCK UNLOCK MKCOL COPY MOVE"
self.mallow += zs.split()
if self.args.zs: if self.args.zs:
from .ssdp import SSDPr from .ssdp import SSDPr
@@ -142,7 +146,11 @@ class HttpSrv(object):
pass pass
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None: def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
self.nm = NetMap([self.ip], netdevs) ips = set()
for ip, _ in self.bound:
ips.add(ip)
self.nm = NetMap(list(ips), netdevs)
def start_threads(self, n: int) -> None: def start_threads(self, n: int) -> None:
self.tp_nthr += n self.tp_nthr += n
@@ -184,12 +192,13 @@ class HttpSrv(object):
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sck.settimeout(None) # < does not inherit, ^ opts above do sck.settimeout(None) # < does not inherit, ^ opts above do
self.ip, self.port = sck.getsockname()[:2] ip, port = sck.getsockname()[:2]
self.srvs.append(sck) self.srvs.append(sck)
self.bound.add((ip, port))
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners) self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
Daemon( Daemon(
self.thr_listen, self.thr_listen,
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", self.ip, self.port), "httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
(sck,), (sck,),
) )

View File

@@ -11,6 +11,7 @@ from ipaddress import IPv4Network, IPv6Network
from .__init__ import TYPE_CHECKING from .__init__ import TYPE_CHECKING
from .__init__ import unicode as U from .__init__ import unicode as U
from .multicast import MC_Sck, MCast from .multicast import MC_Sck, MCast
from .stolen.dnslib import AAAA
from .stolen.dnslib import CLASS as DC from .stolen.dnslib import CLASS as DC
from .stolen.dnslib import ( from .stolen.dnslib import (
NSEC, NSEC,
@@ -20,12 +21,11 @@ from .stolen.dnslib import (
SRV, SRV,
TXT, TXT,
A, A,
AAAA,
DNSHeader, DNSHeader,
DNSQuestion, DNSQuestion,
DNSRecord, DNSRecord,
) )
from .util import CachedSet, Daemon, Netdev, min_ex from .util import CachedSet, Daemon, Netdev, list_ips, min_ex
if TYPE_CHECKING: if TYPE_CHECKING:
from .svchub import SvcHub from .svchub import SvcHub
@@ -55,10 +55,11 @@ class MDNS_Sck(MC_Sck):
self.bp_bye = b"" self.bp_bye = b""
self.last_tx = 0.0 self.last_tx = 0.0
self.tx_ex = False
class MDNS(MCast): class MDNS(MCast):
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args al = hub.args
grp4 = "" if al.zm6 else MDNS4 grp4 = "" if al.zm6 else MDNS4
grp6 = "" if al.zm4 else MDNS6 grp6 = "" if al.zm4 else MDNS6
@@ -66,7 +67,8 @@ class MDNS(MCast):
hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv hub, MDNS_Sck, al.zm_on, al.zm_off, grp4, grp6, 5353, hub.args.zmv
) )
self.srv: dict[socket.socket, MDNS_Sck] = {} self.srv: dict[socket.socket, MDNS_Sck] = {}
self.logsrc = "mDNS-{}".format(ngen)
self.ngen = ngen
self.ttl = 300 self.ttl = 300
zs = self.args.name + ".local." zs = self.args.name + ".local."
@@ -89,7 +91,7 @@ class MDNS(MCast):
self.defend: dict[MDNS_Sck, float] = {} # server -> deadline self.defend: dict[MDNS_Sck, float] = {} # server -> deadline
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("mDNS", msg, c) self.log_func(self.logsrc, msg, c)
def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]: def build_svcs(self) -> tuple[dict[str, dict[str, Any]], set[str]]:
zms = self.args.zms zms = self.args.zms
@@ -287,12 +289,15 @@ class MDNS(MCast):
rx: list[socket.socket] = rdy[0] # type: ignore rx: list[socket.socket] = rdy[0] # type: ignore
self.rx4.cln() self.rx4.cln()
self.rx6.cln() self.rx6.cln()
buf = b""
addr = ("0", 0)
for sck in rx: for sck in rx:
buf, addr = sck.recvfrom(4096)
try: try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr, sck) self.eat(buf, addr, sck)
except: except:
if not self.running: if not self.running:
self.log("stopped", 2)
return return
t = "{} {} \033[33m|{}| {}\n{}".format( t = "{} {} \033[33m|{}| {}\n{}".format(
@@ -309,14 +314,18 @@ class MDNS(MCast):
self.log(t.format(self.hn[:-1]), 2) self.log(t.format(self.hn[:-1]), 2)
self.probing = 0 self.probing = 0
self.log("stopped", 2)
def stop(self, panic=False) -> None: def stop(self, panic=False) -> None:
self.running = False self.running = False
if not panic: for srv in self.srv.values():
for srv in self.srv.values(): try:
try: if panic:
srv.sck.close()
else:
srv.sck.sendto(srv.bp_bye, (srv.grp, 5353)) srv.sck.sendto(srv.bp_bye, (srv.grp, 5353))
except: except:
pass pass
self.srv = {} self.srv = {}
@@ -374,6 +383,14 @@ class MDNS(MCast):
# avahi broadcasting 127.0.0.1-only packets # avahi broadcasting 127.0.0.1-only packets
return return
# check if we've been given additional IPs
for ip in list_ips():
if ip in cips:
self.sips.add(ip)
if not self.sips.isdisjoint(cips):
return
t = "mdns zeroconf: " t = "mdns zeroconf: "
if self.probing: if self.probing:
t += "Cannot start; hostname '{}' is occupied" t += "Cannot start; hostname '{}' is occupied"
@@ -507,6 +524,15 @@ class MDNS(MCast):
if now < srv.last_tx + cooldown: if now < srv.last_tx + cooldown:
return False return False
srv.sck.sendto(msg, (srv.grp, 5353)) try:
srv.last_tx = now srv.sck.sendto(msg, (srv.grp, 5353))
srv.last_tx = now
except Exception as ex:
if srv.tx_ex:
return True
srv.tx_ex = True
t = "tx({},|{}|,{}): {}"
self.log(t.format(srv.ip, len(msg), cooldown, ex), 3)
return True return True

View File

@@ -10,7 +10,18 @@ import sys
from .__init__ import PY2, WINDOWS, E, unicode from .__init__ import PY2, WINDOWS, E, unicode
from .bos import bos from .bos import bos
from .util import REKOBO_LKEY, fsenc, min_ex, retchk, runcmd, uncyg from .util import (
FFMPEG_URL,
REKOBO_LKEY,
fsenc,
is_exe,
min_ex,
pybin,
retchk,
runcmd,
sfsenc,
uncyg,
)
if True: # pylint: disable=using-constant-test if True: # pylint: disable=using-constant-test
from typing import Any, Union from typing import Any, Union
@@ -285,9 +296,14 @@ class MTag(object):
self.log(msg, c=3) self.log(msg, c=3)
if not self.usable: if not self.usable:
if is_exe:
t = "copyparty.exe cannot use mutagen; need ffprobe.exe to read media tags: "
self.log(t + FFMPEG_URL)
return
msg = "need Mutagen{} to read media tags so please run this:\n{}{} -m pip install --user mutagen\n" msg = "need Mutagen{} to read media tags so please run this:\n{}{} -m pip install --user mutagen\n"
pybin = os.path.basename(sys.executable) pyname = os.path.basename(pybin)
self.log(msg.format(or_ffprobe, " " * 37, pybin), c=1) self.log(msg.format(or_ffprobe, " " * 37, pyname), c=1)
return return
# https://picard-docs.musicbrainz.org/downloads/MusicBrainz_Picard_Tag_Map.html # https://picard-docs.musicbrainz.org/downloads/MusicBrainz_Picard_Tag_Map.html
@@ -519,12 +535,15 @@ class MTag(object):
env = os.environ.copy() env = os.environ.copy()
try: try:
if is_exe:
raise Exception()
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
zsl = [str(pypath)] + [str(x) for x in sys.path if x] zsl = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(zsl)) pypath = str(os.pathsep.join(zsl))
env["PYTHONPATH"] = pypath env["PYTHONPATH"] = pypath
except: except:
if not E.ox: if not E.ox and not is_exe:
raise raise
ret: dict[str, Any] = {} ret: dict[str, Any] = {}
@@ -532,7 +551,7 @@ class MTag(object):
try: try:
cmd = [parser.bin, abspath] cmd = [parser.bin, abspath]
if parser.bin.endswith(".py"): if parser.bin.endswith(".py"):
cmd = [sys.executable] + cmd cmd = [pybin] + cmd
args = { args = {
"env": env, "env": env,
@@ -551,7 +570,7 @@ class MTag(object):
else: else:
cmd = ["nice"] + cmd cmd = ["nice"] + cmd
bcmd = [fsenc(x) for x in cmd] bcmd = [sfsenc(x) for x in cmd[:-1]] + [fsenc(cmd[-1])]
rc, v, err = runcmd(bcmd, **args) # type: ignore rc, v, err = runcmd(bcmd, **args) # type: ignore
retchk(rc, bcmd, err, self.log, 5, self.args.mtag_v) retchk(rc, bcmd, err, self.log, 5, self.args.mtag_v)
v = v.strip() v = v.strip()

View File

@@ -14,8 +14,8 @@ from ipaddress import (
ip_network, ip_network,
) )
from .__init__ import TYPE_CHECKING from .__init__ import MACOS, TYPE_CHECKING
from .util import MACOS, Netdev, min_ex, spack from .util import Netdev, find_prefix, min_ex, spack
if TYPE_CHECKING: if TYPE_CHECKING:
from .svchub import SvcHub from .svchub import SvcHub
@@ -110,9 +110,7 @@ class MCast(object):
) )
ips = [x for x in ips if x not in ("::1", "127.0.0.1")] ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
ips = find_prefix(ips, netdevs)
# ip -> ip/prefix
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips]
on = self.on[:] on = self.on[:]
off = self.off[:] off = self.off[:]

View File

@@ -12,7 +12,7 @@ from types import SimpleNamespace
from .__init__ import ANYWIN, TYPE_CHECKING from .__init__ import ANYWIN, TYPE_CHECKING
from .authsrv import LEELOO_DALLAS, VFS from .authsrv import LEELOO_DALLAS, VFS
from .bos import bos from .bos import bos
from .util import Daemon, min_ex from .util import Daemon, is_exe, min_ex, pybin
if True: # pylint: disable=using-constant-test if True: # pylint: disable=using-constant-test
from typing import Any from typing import Any
@@ -42,8 +42,12 @@ class SMB(object):
from impacket import smbserver from impacket import smbserver
from impacket.ntlm import compute_lmhash, compute_nthash from impacket.ntlm import compute_lmhash, compute_nthash
except ImportError: except ImportError:
if is_exe:
print("copyparty.exe cannot do SMB")
sys.exit(1)
m = "\033[36m\n{}\033[31m\n\nERROR: need 'impacket'; please run this command:\033[33m\n {} -m pip install --user impacket\n\033[0m" m = "\033[36m\n{}\033[31m\n\nERROR: need 'impacket'; please run this command:\033[33m\n {} -m pip install --user impacket\n\033[0m"
print(m.format(min_ex(), sys.executable)) print(m.format(min_ex(), pybin))
sys.exit(1) sys.exit(1)
# patch vfs into smbserver.os # patch vfs into smbserver.os

View File

@@ -8,7 +8,7 @@ from email.utils import formatdate
from .__init__ import TYPE_CHECKING from .__init__ import TYPE_CHECKING
from .multicast import MC_Sck, MCast from .multicast import MC_Sck, MCast
from .util import CachedSet, min_ex, html_escape from .util import CachedSet, html_escape, min_ex
if TYPE_CHECKING: if TYPE_CHECKING:
from .broker_util import BrokerCli from .broker_util import BrokerCli
@@ -75,6 +75,7 @@ class SSDPr(object):
c = html_escape c = html_escape
sip, sport = hc.s.getsockname()[:2] sip, sport = hc.s.getsockname()[:2]
sip = sip.replace("::ffff:", "")
proto = "https" if self.args.https_only else "http" proto = "https" if self.args.https_only else "http"
ubase = "{}://{}:{}".format(proto, sip, sport) ubase = "{}://{}:{}".format(proto, sip, sport)
zsl = self.args.zsl zsl = self.args.zsl
@@ -88,19 +89,22 @@ class SSDPr(object):
class SSDPd(MCast): class SSDPd(MCast):
"""communicates with ssdp clients over multicast""" """communicates with ssdp clients over multicast"""
def __init__(self, hub: "SvcHub") -> None: def __init__(self, hub: "SvcHub", ngen: int) -> None:
al = hub.args al = hub.args
vinit = al.zsv and not al.zmv vinit = al.zsv and not al.zmv
super(SSDPd, self).__init__( super(SSDPd, self).__init__(
hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit hub, SSDP_Sck, al.zs_on, al.zs_off, GRP, "", 1900, vinit
) )
self.srv: dict[socket.socket, SSDP_Sck] = {} self.srv: dict[socket.socket, SSDP_Sck] = {}
self.logsrc = "SSDP-{}".format(ngen)
self.ngen = ngen
self.rxc = CachedSet(0.7) self.rxc = CachedSet(0.7)
self.txc = CachedSet(5) # win10: every 3 sec self.txc = CachedSet(5) # win10: every 3 sec
self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I) self.ptn_st = re.compile(b"\nst: *upnp:rootdevice", re.I)
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("SSDP", msg, c) self.log_func(self.logsrc, msg, c)
def run(self) -> None: def run(self) -> None:
try: try:
@@ -126,24 +130,34 @@ class SSDPd(MCast):
self.log("listening") self.log("listening")
while self.running: while self.running:
rdy = select.select(self.srv, [], [], 180) rdy = select.select(self.srv, [], [], self.args.z_chk or 180)
rx: list[socket.socket] = rdy[0] # type: ignore rx: list[socket.socket] = rdy[0] # type: ignore
self.rxc.cln() self.rxc.cln()
buf = b""
addr = ("0", 0)
for sck in rx: for sck in rx:
buf, addr = sck.recvfrom(4096)
try: try:
buf, addr = sck.recvfrom(4096)
self.eat(buf, addr) self.eat(buf, addr)
except: except:
if not self.running: if not self.running:
return break
t = "{} {} \033[33m|{}| {}\n{}".format( t = "{} {} \033[33m|{}| {}\n{}".format(
self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex() self.srv[sck].name, addr, len(buf), repr(buf)[2:-1], min_ex()
) )
self.log(t, 6) self.log(t, 6)
self.log("stopped", 2)
def stop(self) -> None: def stop(self) -> None:
self.running = False self.running = False
for srv in self.srv.values():
try:
srv.sck.close()
except:
pass
self.srv = {} self.srv = {}
def eat(self, buf: bytes, addr: tuple[str, int]) -> None: def eat(self, buf: bytes, addr: tuple[str, int]) -> None:
@@ -160,7 +174,7 @@ class SSDPd(MCast):
self.rxc.add(buf) self.rxc.add(buf)
if not buf.startswith(b"M-SEARCH * HTTP/1."): if not buf.startswith(b"M-SEARCH * HTTP/1."):
raise Exception("not an ssdp message") return
if not self.ptn_st.search(buf): if not self.ptn_st.search(buf):
return return
@@ -184,7 +198,8 @@ BOOTID.UPNP.ORG: 0
CONFIGID.UPNP.ORG: 1 CONFIGID.UPNP.ORG: 1
""" """
zs = zs.format(formatdate(usegmt=True), srv.ip, srv.hport, self.args.zsid) v4 = srv.ip.replace("::ffff:", "")
zs = zs.format(formatdate(usegmt=True), v4, srv.hport, self.args.zsid)
zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace") zb = zs[1:].replace("\n", "\r\n").encode("utf-8", "replace")
srv.sck.sendto(zb, addr[:2]) srv.sck.sendto(zb, addr[:2])

View File

@@ -4,6 +4,7 @@ from __future__ import print_function, unicode_literals
import argparse import argparse
import base64 import base64
import calendar import calendar
import errno
import gzip import gzip
import logging import logging
import os import os
@@ -34,6 +35,7 @@ from .tcpsrv import TcpSrv
from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv
from .up2k import Up2k from .up2k import Up2k
from .util import ( from .util import (
FFMPEG_URL,
VERSIONS, VERSIONS,
Daemon, Daemon,
Garda, Garda,
@@ -41,8 +43,10 @@ from .util import (
HMaccas, HMaccas,
alltrace, alltrace,
ansi_re, ansi_re,
is_exe,
min_ex, min_ex,
mp, mp,
pybin,
start_log_thrs, start_log_thrs,
start_stackmon, start_stackmon,
) )
@@ -66,8 +70,15 @@ class SvcHub(object):
put() can return a queue (if want_reply=True) which has a blocking get() with the response. put() can return a queue (if want_reply=True) which has a blocking get() with the response.
""" """
def __init__(self, args: argparse.Namespace, argv: list[str], printed: str) -> None: def __init__(
self,
args: argparse.Namespace,
dargs: argparse.Namespace,
argv: list[str],
printed: str,
) -> None:
self.args = args self.args = args
self.dargs = dargs
self.argv = argv self.argv = argv
self.E: EnvParams = args.E self.E: EnvParams = args.E
self.logf: Optional[typing.TextIO] = None self.logf: Optional[typing.TextIO] = None
@@ -96,13 +107,13 @@ class SvcHub(object):
if args.sss or args.s >= 3: if args.sss or args.s >= 3:
args.ss = True args.ss = True
args.no_dav = True args.no_dav = True
args.no_logues = True
args.no_readme = True
args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz" args.lo = args.lo or "cpp-%Y-%m%d-%H%M%S.txt.xz"
args.ls = args.ls or "**,*,ln,p,r" args.ls = args.ls or "**,*,ln,p,r"
if args.ss or args.s >= 2: if args.ss or args.s >= 2:
args.s = True args.s = True
args.no_logues = True
args.no_readme = True
args.unpost = 0 args.unpost = 0
args.no_del = True args.no_del = True
args.no_mv = True args.no_mv = True
@@ -150,14 +161,18 @@ class SvcHub(object):
ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)] ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)]
args.theme = "{0}{1} {0} {1}".format(ch, bri) args.theme = "{0}{1} {0} {1}".format(ch, bri)
if not args.hardlink and args.never_symlink:
args.no_dedup = True
if args.log_fk: if args.log_fk:
args.log_fk = re.compile(args.log_fk) args.log_fk = re.compile(args.log_fk)
# initiate all services to manage # initiate all services to manage
self.asrv = AuthSrv(self.args, self.log) self.asrv = AuthSrv(self.args, self.log, dargs=self.dargs)
if args.cgen:
self.asrv.cgen()
if args.exit == "cfg":
sys.exit(0)
if args.ls: if args.ls:
self.asrv.dbg_ls() self.asrv.dbg_ls()
@@ -182,6 +197,7 @@ class SvcHub(object):
self.args.th_dec = list(decs.keys()) self.args.th_dec = list(decs.keys())
self.thumbsrv = None self.thumbsrv = None
want_ff = False
if not args.no_thumb: if not args.no_thumb:
t = ", ".join(self.args.th_dec) or "(None available)" t = ", ".join(self.args.th_dec) or "(None available)"
self.log("thumb", "decoder preference: {}".format(t)) self.log("thumb", "decoder preference: {}".format(t))
@@ -193,8 +209,12 @@ class SvcHub(object):
if self.args.th_dec: if self.args.th_dec:
self.thumbsrv = ThumbSrv(self) self.thumbsrv = ThumbSrv(self)
else: else:
want_ff = True
msg = "need either Pillow, pyvips, or FFmpeg to create thumbnails; for example:\n{0}{1} -m pip install --user Pillow\n{0}{1} -m pip install --user pyvips\n{0}apt install ffmpeg" msg = "need either Pillow, pyvips, or FFmpeg to create thumbnails; for example:\n{0}{1} -m pip install --user Pillow\n{0}{1} -m pip install --user pyvips\n{0}apt install ffmpeg"
msg = msg.format(" " * 37, os.path.basename(sys.executable)) msg = msg.format(" " * 37, os.path.basename(pybin))
if is_exe:
msg = "copyparty.exe cannot use Pillow or pyvips; need ffprobe.exe and ffmpeg.exe to create thumbnails"
self.log("thumb", msg, c=3) self.log("thumb", msg, c=3)
if not args.no_acode and args.no_thumb: if not args.no_acode and args.no_thumb:
@@ -206,6 +226,10 @@ class SvcHub(object):
msg = "setting --no-acode because either FFmpeg or FFprobe is not available" msg = "setting --no-acode because either FFmpeg or FFprobe is not available"
self.log("thumb", msg, c=6) self.log("thumb", msg, c=6)
args.no_acode = True args.no_acode = True
want_ff = True
if want_ff and ANYWIN:
self.log("thumb", "download FFmpeg to fix it:\033[0m " + FFMPEG_URL, 3)
args.th_poke = min(args.th_poke, args.th_maxage, args.ac_maxage) args.th_poke = min(args.th_poke, args.th_maxage, args.ac_maxage)
@@ -236,6 +260,7 @@ class SvcHub(object):
if not args.zms: if not args.zms:
args.zms = zms args.zms = zms
self.zc_ngen = 0
self.mdns: Optional["MDNS"] = None self.mdns: Optional["MDNS"] = None
self.ssdp: Optional["SSDPd"] = None self.ssdp: Optional["SSDPd"] = None
@@ -295,12 +320,25 @@ class SvcHub(object):
al.zs_on = al.zs_on or al.z_on al.zs_on = al.zs_on or al.z_on
al.zm_off = al.zm_off or al.z_off al.zm_off = al.zm_off or al.z_off
al.zs_off = al.zs_off or al.z_off al.zs_off = al.zs_off or al.z_off
for n in ("zm_on", "zm_off", "zs_on", "zs_off"): ns = "zm_on zm_off zs_on zs_off acao acam"
for n in ns.split(" "):
vs = getattr(al, n).split(",") vs = getattr(al, n).split(",")
vs = [x.strip() for x in vs] vs = [x.strip() for x in vs]
vs = [x for x in vs if x] vs = [x for x in vs if x]
setattr(al, n, vs) setattr(al, n, vs)
ns = "acao acam"
for n in ns.split(" "):
vs = getattr(al, n)
vd = {zs: 1 for zs in vs}
setattr(al, n, vd)
ns = "acao"
for n in ns.split(" "):
vs = getattr(al, n)
vs = [x.lower() for x in vs]
setattr(al, n, vs)
R = al.rp_loc R = al.rp_loc
if "//" in R or ":" in R: if "//" in R or ":" in R:
t = "found URL in --rp-loc; it should be just the location, for example /foo/bar" t = "found URL in --rp-loc; it should be just the location, for example /foo/bar"
@@ -309,6 +347,7 @@ class SvcHub(object):
al.R = R = R.strip("/") al.R = R = R.strip("/")
al.SR = "/" + R if R else "" al.SR = "/" + R if R else ""
al.RS = R + "/" if R else "" al.RS = R + "/" if R else ""
al.SRS = "/" + R + "/" if R else "/"
return True return True
@@ -386,7 +425,7 @@ class SvcHub(object):
lh = codecs.open(fn, "w", encoding="utf-8", errors="replace") lh = codecs.open(fn, "w", encoding="utf-8", errors="replace")
argv = [sys.executable] + self.argv argv = [pybin] + self.argv
if hasattr(shlex, "quote"): if hasattr(shlex, "quote"):
argv = [shlex.quote(x) for x in argv] argv = [shlex.quote(x) for x in argv]
else: else:
@@ -402,24 +441,10 @@ class SvcHub(object):
def run(self) -> None: def run(self) -> None:
self.tcpsrv.run() self.tcpsrv.run()
if getattr(self.args, "z_chk", 0) and (
if getattr(self.args, "zm", False): getattr(self.args, "zm", False) or getattr(self.args, "zs", False)
try: ):
from .mdns import MDNS Daemon(self.tcpsrv.netmon, "netmon")
self.mdns = MDNS(self)
Daemon(self.mdns.run, "mdns")
except:
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
if getattr(self.args, "zs", False):
try:
from .ssdp import SSDPd
self.ssdp = SSDPd(self)
Daemon(self.ssdp.run, "ssdp")
except:
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
Daemon(self.thr_httpsrv_up, "sig-hsrv-up2") Daemon(self.thr_httpsrv_up, "sig-hsrv-up2")
@@ -451,6 +476,33 @@ class SvcHub(object):
else: else:
self.stop_thr() self.stop_thr()
def start_zeroconf(self) -> None:
self.zc_ngen += 1
if getattr(self.args, "zm", False):
try:
from .mdns import MDNS
if self.mdns:
self.mdns.stop(True)
self.mdns = MDNS(self, self.zc_ngen)
Daemon(self.mdns.run, "mdns")
except:
self.log("root", "mdns startup failed;\n" + min_ex(), 3)
if getattr(self.args, "zs", False):
try:
from .ssdp import SSDPd
if self.ssdp:
self.ssdp.stop()
self.ssdp = SSDPd(self, self.zc_ngen)
Daemon(self.ssdp.run, "ssdp")
except:
self.log("root", "ssdp startup failed;\n" + min_ex(), 3)
def reload(self) -> str: def reload(self) -> str:
if self.reloading: if self.reloading:
return "cannot reload; already in progress" return "cannot reload; already in progress"
@@ -635,13 +687,20 @@ class SvcHub(object):
print(msg.encode("utf-8", "replace").decode(), end="") print(msg.encode("utf-8", "replace").decode(), end="")
except: except:
print(msg.encode("ascii", "replace").decode(), end="") print(msg.encode("ascii", "replace").decode(), end="")
except OSError as ex:
if ex.errno != errno.EPIPE:
raise
if self.logf: if self.logf:
self.logf.write(msg) self.logf.write(msg)
def pr(self, *a: Any, **ka: Any) -> None: def pr(self, *a: Any, **ka: Any) -> None:
with self.log_mutex: try:
print(*a, **ka) with self.log_mutex:
print(*a, **ka)
except OSError as ex:
if ex.errno != errno.EPIPE:
raise
def check_mp_support(self) -> str: def check_mp_support(self) -> str:
if MACOS: if MACOS:

View File

@@ -2,8 +2,8 @@
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import calendar import calendar
import time
import stat import stat
import time
import zlib import zlib
from .bos import bos from .bos import bos

View File

@@ -5,6 +5,7 @@ import os
import re import re
import socket import socket
import sys import sys
import time
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, unicode from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, unicode
from .stolen.qrcodegen import QrCode from .stolen.qrcodegen import QrCode
@@ -28,6 +29,9 @@ if TYPE_CHECKING:
if not hasattr(socket, "IPPROTO_IPV6"): if not hasattr(socket, "IPPROTO_IPV6"):
setattr(socket, "IPPROTO_IPV6", 41) setattr(socket, "IPPROTO_IPV6", 41)
if not hasattr(socket, "IP_FREEBIND"):
setattr(socket, "IP_FREEBIND", 15)
class TcpSrv(object): class TcpSrv(object):
""" """
@@ -46,6 +50,8 @@ class TcpSrv(object):
self.stopping = False self.stopping = False
self.srv: list[socket.socket] = [] self.srv: list[socket.socket] = []
self.bound: list[tuple[str, int]] = [] self.bound: list[tuple[str, int]] = []
self.netdevs: dict[str, Netdev] = {}
self.netlist = ""
self.nsrv = 0 self.nsrv = 0
self.qr = "" self.qr = ""
pad = False pad = False
@@ -221,8 +227,16 @@ class TcpSrv(object):
except: except:
pass # will create another ipv4 socket instead pass # will create another ipv4 socket instead
if not ANYWIN and self.args.freebind:
srv.setsockopt(socket.SOL_IP, socket.IP_FREEBIND, 1)
try: try:
srv.bind((ip, port)) srv.bind((ip, port))
sport = srv.getsockname()[1]
if port != sport:
# linux 6.0.16 lets you bind a port which is in use
# except it just gives you a random port instead
raise OSError(E_ADDR_IN_USE[0], "")
self.srv.append(srv) self.srv.append(srv)
except (OSError, socket.error) as ex: except (OSError, socket.error) as ex:
if ex.errno in E_ADDR_IN_USE: if ex.errno in E_ADDR_IN_USE:
@@ -241,6 +255,14 @@ class TcpSrv(object):
ip, port = srv.getsockname()[:2] ip, port = srv.getsockname()[:2]
try: try:
srv.listen(self.args.nc) srv.listen(self.args.nc)
try:
ok = srv.getsockopt(socket.SOL_SOCKET, socket.SO_ACCEPTCONN)
except:
ok = 1 # macos
if not ok:
# some linux don't throw on listen(0.0.0.0) after listen(::)
raise Exception("failed to listen on {}".format(srv.getsockname()))
except: except:
if ip == "0.0.0.0" and ("::", port) in bound: if ip == "0.0.0.0" and ("::", port) in bound:
# dualstack # dualstack
@@ -268,7 +290,11 @@ class TcpSrv(object):
self.srv = srvs self.srv = srvs
self.bound = bound self.bound = bound
self.nsrv = len(srvs) self.nsrv = len(srvs)
self._distribute_netdevs()
def _distribute_netdevs(self):
self.hub.broker.say("set_netdevs", self.netdevs) self.hub.broker.say("set_netdevs", self.netdevs)
self.hub.start_zeroconf()
def shutdown(self) -> None: def shutdown(self) -> None:
self.stopping = True self.stopping = True
@@ -280,6 +306,27 @@ class TcpSrv(object):
self.log("tcpsrv", "ok bye") self.log("tcpsrv", "ok bye")
def netmon(self):
while not self.stopping:
time.sleep(self.args.z_chk)
netdevs = self.detect_interfaces(self.args.i)
if not netdevs:
continue
added = "nothing"
removed = "nothing"
for k, v in netdevs.items():
if k not in self.netdevs:
added = "{} = {}".format(k, v)
for k, v in self.netdevs.items():
if k not in netdevs:
removed = "{} = {}".format(k, v)
t = "network change detected:\n added {}\nremoved {}"
self.log("tcpsrv", t.format(added, removed), 3)
self.netdevs = netdevs
self._distribute_netdevs()
def detect_interfaces(self, listen_ips: list[str]) -> dict[str, Netdev]: def detect_interfaces(self, listen_ips: list[str]) -> dict[str, Netdev]:
from .stolen.ifaddr import get_adapters from .stolen.ifaddr import get_adapters
@@ -300,6 +347,12 @@ class TcpSrv(object):
except: except:
pass pass
netlist = str(sorted(eps.items()))
if netlist == self.netlist and self.netdevs:
return {}
self.netlist = netlist
if "0.0.0.0" not in listen_ips and "::" not in listen_ips: if "0.0.0.0" not in listen_ips and "::" not in listen_ips:
eps = {k: v for k, v in eps.items() if k.split("/")[0] in listen_ips} eps = {k: v for k, v in eps.items() if k.split("/")[0] in listen_ips}

View File

@@ -3,6 +3,7 @@ from __future__ import print_function, unicode_literals
import base64 import base64
import hashlib import hashlib
import logging
import os import os
import shutil import shutil
import subprocess as sp import subprocess as sp
@@ -11,14 +12,16 @@ import time
from queue import Queue from queue import Queue
from .__init__ import TYPE_CHECKING from .__init__ import ANYWIN, TYPE_CHECKING
from .bos import bos from .bos import bos
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE, ffprobe from .mtag import HAVE_FFMPEG, HAVE_FFPROBE, ffprobe
from .util import ( from .util import (
BytesIO, BytesIO,
Cooldown, Cooldown,
Daemon, Daemon,
FFMPEG_URL,
Pebkac, Pebkac,
afsenc,
fsenc, fsenc,
min_ex, min_ex,
runcmd, runcmd,
@@ -61,12 +64,16 @@ try:
HAVE_AVIF = True HAVE_AVIF = True
except: except:
pass pass
logging.getLogger("PIL").setLevel(logging.WARNING)
except: except:
pass pass
try: try:
HAVE_VIPS = True HAVE_VIPS = True
import pyvips import pyvips
logging.getLogger("pyvips").setLevel(logging.WARNING)
except: except:
HAVE_VIPS = False HAVE_VIPS = False
@@ -77,14 +84,14 @@ def thumb_path(histpath: str, rem: str, mtime: float, fmt: str) -> str:
# base64 = 64 = 4096 # base64 = 64 = 4096
rd, fn = vsplit(rem) rd, fn = vsplit(rem)
if rd: if rd:
h = hashlib.sha512(fsenc(rd)).digest() h = hashlib.sha512(afsenc(rd)).digest()
b64 = base64.urlsafe_b64encode(h).decode("ascii")[:24] b64 = base64.urlsafe_b64encode(h).decode("ascii")[:24]
rd = "{}/{}/".format(b64[:2], b64[2:4]).lower() + b64 rd = "{}/{}/".format(b64[:2], b64[2:4]).lower() + b64
else: else:
rd = "top" rd = "top"
# could keep original filenames but this is safer re pathlen # could keep original filenames but this is safer re pathlen
h = hashlib.sha512(fsenc(fn)).digest() h = hashlib.sha512(afsenc(fn)).digest()
fn = base64.urlsafe_b64encode(h).decode("ascii")[:24] fn = base64.urlsafe_b64encode(h).decode("ascii")[:24]
if fmt in ("opus", "caf"): if fmt in ("opus", "caf"):
@@ -128,6 +135,8 @@ class ThumbSrv(object):
msg = "cannot create audio/video thumbnails because some of the required programs are not available: " msg = "cannot create audio/video thumbnails because some of the required programs are not available: "
msg += ", ".join(missing) msg += ", ".join(missing)
self.log(msg, c=3) self.log(msg, c=3)
if ANYWIN and not self.args.no_acode:
self.log("download FFmpeg to fix it:\033[0m " + FFMPEG_URL, 3)
if self.args.th_clean: if self.args.th_clean:
Daemon(self.cleaner, "thumb.cln") Daemon(self.cleaner, "thumb.cln")
@@ -191,12 +200,12 @@ class ThumbSrv(object):
self.log("wait {}".format(tpath)) self.log("wait {}".format(tpath))
except: except:
thdir = os.path.dirname(tpath) thdir = os.path.dirname(tpath)
bos.makedirs(thdir) bos.makedirs(os.path.join(thdir, "w"))
inf_path = os.path.join(thdir, "dir.txt") inf_path = os.path.join(thdir, "dir.txt")
if not bos.path.exists(inf_path): if not bos.path.exists(inf_path):
with open(inf_path, "wb") as f: with open(inf_path, "wb") as f:
f.write(fsenc(os.path.dirname(abspath))) f.write(afsenc(os.path.dirname(abspath)))
self.busy[tpath] = [cond] self.busy[tpath] = [cond]
do_conv = True do_conv = True
@@ -242,47 +251,55 @@ class ThumbSrv(object):
abspath, tpath = task abspath, tpath = task
ext = abspath.split(".")[-1].lower() ext = abspath.split(".")[-1].lower()
png_ok = False png_ok = False
fun = None funs = []
if not bos.path.exists(tpath): if not bos.path.exists(tpath):
for lib in self.args.th_dec: for lib in self.args.th_dec:
if fun: if lib == "pil" and ext in self.fmt_pil:
break funs.append(self.conv_pil)
elif lib == "pil" and ext in self.fmt_pil:
fun = self.conv_pil
elif lib == "vips" and ext in self.fmt_vips: elif lib == "vips" and ext in self.fmt_vips:
fun = self.conv_vips funs.append(self.conv_vips)
elif lib == "ff" and ext in self.fmt_ffi or ext in self.fmt_ffv: elif lib == "ff" and ext in self.fmt_ffi or ext in self.fmt_ffv:
fun = self.conv_ffmpeg funs.append(self.conv_ffmpeg)
elif lib == "ff" and ext in self.fmt_ffa: elif lib == "ff" and ext in self.fmt_ffa:
if tpath.endswith(".opus") or tpath.endswith(".caf"): if tpath.endswith(".opus") or tpath.endswith(".caf"):
fun = self.conv_opus funs.append(self.conv_opus)
elif tpath.endswith(".png"): elif tpath.endswith(".png"):
fun = self.conv_waves funs.append(self.conv_waves)
png_ok = True png_ok = True
else: else:
fun = self.conv_spec funs.append(self.conv_spec)
if not png_ok and tpath.endswith(".png"): if not png_ok and tpath.endswith(".png"):
raise Pebkac(400, "png only allowed for waveforms") raise Pebkac(400, "png only allowed for waveforms")
if fun: tdir, tfn = os.path.split(tpath)
ttpath = os.path.join(tdir, "w", tfn)
for fun in funs:
try: try:
fun(abspath, tpath) fun(abspath, ttpath)
break
except Exception as ex: except Exception as ex:
msg = "{} could not create thumbnail of {}\n{}" msg = "{} could not create thumbnail of {}\n{}"
msg = msg.format(fun.__name__, abspath, min_ex()) msg = msg.format(fun.__name__, abspath, min_ex())
c: Union[str, int] = 1 if "<Signals.SIG" in msg else "90" c: Union[str, int] = 1 if "<Signals.SIG" in msg else "90"
self.log(msg, c) self.log(msg, c)
if getattr(ex, "returncode", 0) != 321: if getattr(ex, "returncode", 0) != 321:
with open(tpath, "wb") as _: if fun == funs[-1]:
pass with open(ttpath, "wb") as _:
pass
else: else:
# ffmpeg may spawn empty files on windows # ffmpeg may spawn empty files on windows
try: try:
os.unlink(tpath) os.unlink(ttpath)
except: except:
pass pass
try:
bos.rename(ttpath, tpath)
except:
pass
with self.mutex: with self.mutex:
subs = self.busy[tpath] subs = self.busy[tpath]
del self.busy[tpath] del self.busy[tpath]
@@ -363,7 +380,8 @@ class ThumbSrv(object):
img = pyvips.Image.thumbnail(abspath, w, **kw) img = pyvips.Image.thumbnail(abspath, w, **kw)
break break
except: except:
pass if c == crops[-1]:
raise
img.write_to_file(tpath, Q=40) img.write_to_file(tpath, Q=40)

View File

@@ -311,6 +311,7 @@ class U2idx(object):
sret = [] sret = []
fk = flags.get("fk") fk = flags.get("fk")
dots = flags.get("dotsrch")
c = cur.execute(uq, tuple(vuv)) c = cur.execute(uq, tuple(vuv))
for hit in c: for hit in c:
w, ts, sz, rd, fn, ip, at = hit[:7] w, ts, sz, rd, fn, ip, at = hit[:7]
@@ -321,6 +322,10 @@ class U2idx(object):
if rd.startswith("//") or fn.startswith("//"): if rd.startswith("//") or fn.startswith("//"):
rd, fn = s3dec(rd, fn) rd, fn = s3dec(rd, fn)
rp = quotep("/".join([x for x in [vtop, rd, fn] if x]))
if not dots and "/." in ("/" + rp):
continue
if not fk: if not fk:
suf = "" suf = ""
else: else:
@@ -337,8 +342,7 @@ class U2idx(object):
)[:fk] )[:fk]
) )
rp = quotep("/".join([x for x in [vtop, rd, fn] if x])) + suf sret.append({"ts": int(ts), "sz": sz, "rp": rp + suf, "w": w[:16]})
sret.append({"ts": int(ts), "sz": sz, "rp": rp, "w": w[:16]})
for hit in sret: for hit in sret:
w = hit["w"] w = hit["w"]

View File

@@ -38,16 +38,21 @@ from .util import (
db_ex_chk, db_ex_chk,
djoin, djoin,
fsenc, fsenc,
gen_filekey,
gen_filekey_dbg,
hidedir, hidedir,
min_ex, min_ex,
quotep, quotep,
rand_name,
ren_open, ren_open,
rmdirs, rmdirs,
rmdirs_up, rmdirs_up,
runhook,
s2hms, s2hms,
s3dec, s3dec,
s3enc, s3enc,
sanitize_fn, sanitize_fn,
sfsenc,
spack, spack,
statdir, statdir,
vjoin, vjoin,
@@ -157,6 +162,7 @@ class Up2k(object):
Daemon(self._lastmodder, "up2k-lastmod") Daemon(self._lastmodder, "up2k-lastmod")
self.fstab = Fstab(self.log_func) self.fstab = Fstab(self.log_func)
self.gen_fk = self._gen_fk if self.args.log_fk else gen_filekey
if self.args.hash_mt < 2: if self.args.hash_mt < 2:
self.mth: Optional[MTHash] = None self.mth: Optional[MTHash] = None
@@ -212,6 +218,9 @@ class Up2k(object):
self.log_func("up2k", msg, c) self.log_func("up2k", msg, c)
def _gen_fk(self, salt: str, fspath: str, fsize: int, inode: int) -> str:
return gen_filekey_dbg(salt, fspath, fsize, inode, self.log, self.args.log_fk)
def _block(self, why: str) -> None: def _block(self, why: str) -> None:
self.blocked = why self.blocked = why
self.log("uploads temporarily blocked due to " + why, 3) self.log("uploads temporarily blocked due to " + why, 3)
@@ -387,7 +396,7 @@ class Up2k(object):
def _vis_job_progress(self, job: dict[str, Any]) -> str: def _vis_job_progress(self, job: dict[str, Any]) -> str:
perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"])) perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"]))
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = djoin(job["ptop"], job["prel"], job["name"])
return "{:5.1f}% {}".format(perc, path) return "{:5.1f}% {}".format(perc, path)
def _vis_reg_progress(self, reg: dict[str, dict[str, Any]]) -> list[str]: def _vis_reg_progress(self, reg: dict[str, dict[str, Any]]) -> list[str]:
@@ -441,6 +450,7 @@ class Up2k(object):
# only need to protect register_vpath but all in one go feels right # only need to protect register_vpath but all in one go feels right
for vol in vols: for vol in vols:
try: try:
bos.makedirs(vol.realpath) # gonna happen at snap anyways
bos.listdir(vol.realpath) bos.listdir(vol.realpath)
except: except:
self.volstate[vol.vpath] = "OFFLINE (cannot access folder)" self.volstate[vol.vpath] = "OFFLINE (cannot access folder)"
@@ -476,7 +486,7 @@ class Up2k(object):
if next((zv for zv in vols if "e2ds" in zv.flags), None): if next((zv for zv in vols if "e2ds" in zv.flags), None):
self._block("indexing") self._block("indexing")
if self.args.re_dhash: if self.args.re_dhash or [zv for zv in vols if "e2tsr" in zv.flags]:
self.args.re_dhash = False self.args.re_dhash = False
self._drop_caches() self._drop_caches()
@@ -642,9 +652,16 @@ class Up2k(object):
ff = "\033[0;35m{}{:.0}" ff = "\033[0;35m{}{:.0}"
fv = "\033[0;36m{}:\033[90m{}" fv = "\033[0;36m{}:\033[90m{}"
fx = set(("html_head",)) fx = set(("html_head",))
fdl = ("dbd", "lg_sbf", "md_sbf", "mte", "mth", "mtp", "nrand", "rand")
fd = {x: x for x in fdl}
fl = {
k: v
for k, v in flags.items()
if k not in fd or v != getattr(self.args, fd[k])
}
a = [ a = [
(ft if v is True else ff if v is False else fv).format(k, str(v)) (ft if v is True else ff if v is False else fv).format(k, str(v))
for k, v in flags.items() for k, v in fl.items()
if k not in fx if k not in fx
] ]
if a: if a:
@@ -675,7 +692,7 @@ class Up2k(object):
pass pass
for k, job in reg2.items(): for k, job in reg2.items():
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = djoin(job["ptop"], job["prel"], job["name"])
if bos.path.exists(path): if bos.path.exists(path):
reg[k] = job reg[k] = job
job["poke"] = time.time() job["poke"] = time.time()
@@ -842,6 +859,7 @@ class Up2k(object):
seen = seen + [rcdir] seen = seen + [rcdir]
unreg: list[str] = [] unreg: list[str] = []
files: list[tuple[int, int, str]] = [] files: list[tuple[int, int, str]] = []
fat32 = True
assert self.pp and self.mem_cur assert self.pp and self.mem_cur
self.pp.msg = "a{} {}".format(self.pp.n, cdir) self.pp.msg = "a{} {}".format(self.pp.n, cdir)
@@ -866,6 +884,9 @@ class Up2k(object):
lmod = int(inf.st_mtime) lmod = int(inf.st_mtime)
sz = inf.st_size sz = inf.st_size
if fat32 and inf.st_mtime % 2:
fat32 = False
if stat.S_ISDIR(inf.st_mode): if stat.S_ISDIR(inf.st_mode):
rap = absreal(abspath) rap = absreal(abspath)
if dev and inf.st_dev != dev: if dev and inf.st_dev != dev:
@@ -953,6 +974,9 @@ class Up2k(object):
self.log(t.format(top, rp, len(in_db), rep_db)) self.log(t.format(top, rp, len(in_db), rep_db))
dts = -1 dts = -1
if fat32 and abs(dts - lmod) == 1:
dts = lmod
if dts == lmod and dsz == sz and (nohash or dw[0] != "#" or not sz): if dts == lmod and dsz == sz and (nohash or dw[0] != "#" or not sz):
continue continue
@@ -1063,7 +1087,7 @@ class Up2k(object):
else: else:
rd = drd rd = drd
abspath = os.path.join(top, rd) abspath = djoin(top, rd)
self.pp.msg = "b{} {}".format(ndirs - nchecked, abspath) self.pp.msg = "b{} {}".format(ndirs - nchecked, abspath)
try: try:
if os.path.isdir(abspath): if os.path.isdir(abspath):
@@ -1104,7 +1128,7 @@ class Up2k(object):
if crd != rd: if crd != rd:
crd = rd crd = rd
try: try:
cdc = set(os.listdir(os.path.join(top, rd))) cdc = set(os.listdir(djoin(top, rd)))
except: except:
cdc.clear() cdc.clear()
@@ -1180,7 +1204,7 @@ class Up2k(object):
rd = drd rd = drd
fn = dfn fn = dfn
abspath = os.path.join(ptop, rd, fn) abspath = djoin(ptop, rd, fn)
if rei and rei.search(abspath): if rei and rei.search(abspath):
continue continue
@@ -1389,7 +1413,7 @@ class Up2k(object):
q = "insert into mt values (?,'t:mtp','a')" q = "insert into mt values (?,'t:mtp','a')"
cur.execute(q, (w[:16],)) cur.execute(q, (w[:16],))
abspath = os.path.join(ptop, rd, fn) abspath = djoin(ptop, rd, fn)
self.pp.msg = "c{} {}".format(nq, abspath) self.pp.msg = "c{} {}".format(nq, abspath)
if not mpool: if not mpool:
n_tags = self._tagscan_file(cur, entags, w, abspath, ip, at) n_tags = self._tagscan_file(cur, entags, w, abspath, ip, at)
@@ -1480,6 +1504,10 @@ class Up2k(object):
t0 = time.time() t0 = time.time()
for ptop, flags in self.flags.items(): for ptop, flags in self.flags.items():
if "mtp" in flags: if "mtp" in flags:
if ptop not in self.entags:
t = "skipping mtp for unavailable volume {}"
self.log(t.format(ptop), 1)
continue
self._run_one_mtp(ptop, gid) self._run_one_mtp(ptop, gid)
td = time.time() - t0 td = time.time() - t0
@@ -1549,7 +1577,7 @@ class Up2k(object):
q = "select rd, fn, ip, at from up where substr(w,1,16)=? limit 1" q = "select rd, fn, ip, at from up where substr(w,1,16)=? limit 1"
rd, fn, ip, at = cur.execute(q, (w,)).fetchone() rd, fn, ip, at = cur.execute(q, (w,)).fetchone()
rd, fn = s3dec(rd, fn) rd, fn = s3dec(rd, fn)
abspath = os.path.join(ptop, rd, fn) abspath = djoin(ptop, rd, fn)
q = "select k from mt where w = ?" q = "select k from mt where w = ?"
zq = cur.execute(q, (w,)).fetchall() zq = cur.execute(q, (w,)).fetchall()
@@ -2001,6 +2029,7 @@ class Up2k(object):
reg = self.registry[ptop] reg = self.registry[ptop]
vfs = self.asrv.vfs.all_vols[cj["vtop"]] vfs = self.asrv.vfs.all_vols[cj["vtop"]]
n4g = vfs.flags.get("noforget") n4g = vfs.flags.get("noforget")
rand = vfs.flags.get("rand") or cj.get("rand")
lost: list[tuple["sqlite3.Cursor", str, str]] = [] lost: list[tuple["sqlite3.Cursor", str, str]] = []
vols = [(ptop, jcur)] if jcur else [] vols = [(ptop, jcur)] if jcur else []
@@ -2025,7 +2054,7 @@ class Up2k(object):
if dp_dir.startswith("//") or dp_fn.startswith("//"): if dp_dir.startswith("//") or dp_fn.startswith("//"):
dp_dir, dp_fn = s3dec(dp_dir, dp_fn) dp_dir, dp_fn = s3dec(dp_dir, dp_fn)
dp_abs = "/".join([ptop, dp_dir, dp_fn]) dp_abs = djoin(ptop, dp_dir, dp_fn)
try: try:
st = bos.stat(dp_abs) st = bos.stat(dp_abs)
if stat.S_ISLNK(st.st_mode): if stat.S_ISLNK(st.st_mode):
@@ -2046,6 +2075,8 @@ class Up2k(object):
"sprs": sprs, # dontcare; finished anyways "sprs": sprs, # dontcare; finished anyways
"size": dsize, "size": dsize,
"lmod": dtime, "lmod": dtime,
"host": cj["host"],
"user": cj["user"],
"addr": ip, "addr": ip,
"at": at, "at": at,
"hash": [], "hash": [],
@@ -2063,7 +2094,12 @@ class Up2k(object):
) )
alts.append((score, -len(alts), j)) alts.append((score, -len(alts), j))
job = sorted(alts, reverse=True)[0][2] if alts else None if alts:
best = sorted(alts, reverse=True)[0]
job = best[2]
else:
job = None
if job and wark in reg: if job and wark in reg:
# self.log("pop " + wark + " " + job["name"] + " handle_json db", 4) # self.log("pop " + wark + " " + job["name"] + " handle_json db", 4)
del reg[wark] del reg[wark]
@@ -2093,7 +2129,7 @@ class Up2k(object):
# ensure the files haven't been deleted manually # ensure the files haven't been deleted manually
names = [job[x] for x in ["name", "tnam"] if x in job] names = [job[x] for x in ["name", "tnam"] if x in job]
for fn in names: for fn in names:
path = os.path.join(job["ptop"], job["prel"], fn) path = djoin(job["ptop"], job["prel"], fn)
try: try:
if bos.path.getsize(path) > 0: if bos.path.getsize(path) > 0:
# upload completed or both present # upload completed or both present
@@ -2105,9 +2141,9 @@ class Up2k(object):
break break
else: else:
# file contents match, but not the path # file contents match, but not the path
src = os.path.join(job["ptop"], job["prel"], job["name"]) src = djoin(job["ptop"], job["prel"], job["name"])
dst = os.path.join(cj["ptop"], cj["prel"], cj["name"]) dst = djoin(cj["ptop"], cj["prel"], cj["name"])
vsrc = os.path.join(job["vtop"], job["prel"], job["name"]) vsrc = djoin(job["vtop"], job["prel"], job["name"])
vsrc = vsrc.replace("\\", "/") # just for prints anyways vsrc = vsrc.replace("\\", "/") # just for prints anyways
if job["need"]: if job["need"]:
self.log("unfinished:\n {0}\n {1}".format(src, dst)) self.log("unfinished:\n {0}\n {1}".format(src, dst))
@@ -2134,29 +2170,40 @@ class Up2k(object):
# symlink to the client-provided name, # symlink to the client-provided name,
# returning the previous upload info # returning the previous upload info
job = deepcopy(job) job = deepcopy(job)
for k in ["ptop", "vtop", "prel"]: for k in "ptop vtop prel addr".split():
job[k] = cj[k] job[k] = cj[k]
pdir = djoin(cj["ptop"], cj["prel"]) pdir = djoin(cj["ptop"], cj["prel"])
job["name"] = self._untaken(pdir, cj, now) if rand:
dst = os.path.join(job["ptop"], job["prel"], job["name"]) job["name"] = rand_name(
pdir, cj["name"], vfs.flags["nrand"]
)
else:
job["name"] = self._untaken(pdir, cj, now)
dst = djoin(job["ptop"], job["prel"], job["name"])
if not self.args.nw: if not self.args.nw:
bos.unlink(dst) # TODO ed pls
try: try:
self._symlink(src, dst, lmod=cj["lmod"]) dvf = self.flags[job["ptop"]]
self._symlink(src, dst, dvf, lmod=cj["lmod"], rm=True)
except: except:
if bos.path.exists(dst):
bos.unlink(dst)
if not n4g: if not n4g:
raise raise
if cur: if cur:
a = [cj[x] for x in "prel name lmod size addr".split()] a = [job[x] for x in "prel name lmod size addr".split()]
a += [cj.get("at") or time.time()] a += [job.get("at") or time.time()]
self.db_add(cur, wark, *a) self.db_add(cur, wark, *a)
cur.connection.commit() cur.connection.commit()
if not job: if not job:
ap1 = djoin(cj["ptop"], cj["prel"])
if rand:
cj["name"] = rand_name(ap1, cj["name"], vfs.flags["nrand"])
if vfs.lim: if vfs.lim:
ap1 = djoin(cj["ptop"], cj["prel"])
ap2, cj["prel"] = vfs.lim.all( ap2, cj["prel"] = vfs.lim.all(
cj["addr"], cj["prel"], cj["size"], ap1, reg cj["addr"], cj["prel"], cj["size"], ap1, reg
) )
@@ -2174,6 +2221,8 @@ class Up2k(object):
} }
# client-provided, sanitized by _get_wark: name, size, lmod # client-provided, sanitized by _get_wark: name, size, lmod
for k in [ for k in [
"host",
"user",
"addr", "addr",
"vtop", "vtop",
"ptop", "ptop",
@@ -2207,7 +2256,7 @@ class Up2k(object):
purl = "{}/{}".format(job["vtop"], job["prel"]).strip("/") purl = "{}/{}".format(job["vtop"], job["prel"]).strip("/")
purl = "/{}/".format(purl) if purl else "/" purl = "/{}/".format(purl) if purl else "/"
return { ret = {
"name": job["name"], "name": job["name"],
"purl": purl, "purl": purl,
"size": job["size"], "size": job["size"],
@@ -2217,6 +2266,18 @@ class Up2k(object):
"wark": wark, "wark": wark,
} }
if (
not ret["hash"]
and "fk" in vfs.flags
and (cj["user"] in vfs.axs.uread or cj["user"] in vfs.axs.upget)
):
ap = absreal(djoin(job["ptop"], job["prel"], job["name"]))
ino = 0 if ANYWIN else bos.stat(ap).st_ino
fk = self.gen_fk(self.args.fk_salt, ap, job["size"], ino)
ret["fk"] = fk[: vfs.flags["fk"]]
return ret
def _untaken(self, fdir: str, job: dict[str, Any], ts: float) -> str: def _untaken(self, fdir: str, job: dict[str, Any], ts: float) -> str:
fname = job["name"] fname = job["name"]
ip = job["addr"] ip = job["addr"]
@@ -2224,7 +2285,7 @@ class Up2k(object):
if self.args.nw: if self.args.nw:
return fname return fname
fp = os.path.join(fdir, fname) fp = djoin(fdir, fname)
if job.get("replace") and bos.path.exists(fp): if job.get("replace") and bos.path.exists(fp):
self.log("replacing existing file at {}".format(fp)) self.log("replacing existing file at {}".format(fp))
bos.unlink(fp) bos.unlink(fp)
@@ -2239,7 +2300,13 @@ class Up2k(object):
return zfw["orz"][1] return zfw["orz"][1]
def _symlink( def _symlink(
self, src: str, dst: str, verbose: bool = True, lmod: float = 0 self,
src: str,
dst: str,
flags: dict[str, Any],
verbose: bool = True,
rm: bool = False,
lmod: float = 0,
) -> None: ) -> None:
if verbose: if verbose:
self.log("linking dupe:\n {0}\n {1}".format(src, dst)) self.log("linking dupe:\n {0}\n {1}".format(src, dst))
@@ -2249,7 +2316,7 @@ class Up2k(object):
linked = False linked = False
try: try:
if self.args.no_dedup: if "copydupes" in flags:
raise Exception("disabled in config") raise Exception("disabled in config")
lsrc = src lsrc = src
@@ -2278,13 +2345,16 @@ class Up2k(object):
lsrc = lsrc.replace("/", "\\") lsrc = lsrc.replace("/", "\\")
ldst = ldst.replace("/", "\\") ldst = ldst.replace("/", "\\")
if rm and bos.path.exists(dst):
bos.unlink(dst)
try: try:
if self.args.hardlink: if "hardlink" in flags:
os.link(fsenc(src), fsenc(dst)) os.link(fsenc(src), fsenc(dst))
linked = True linked = True
except Exception as ex: except Exception as ex:
self.log("cannot hardlink: " + repr(ex)) self.log("cannot hardlink: " + repr(ex))
if self.args.never_symlink: if "neversymlink" in flags:
raise Exception("symlink-fallback disabled in cfg") raise Exception("symlink-fallback disabled in cfg")
if not linked: if not linked:
@@ -2328,7 +2398,7 @@ class Up2k(object):
t = "that chunk is already being written to:\n {}\n {} {}/{}\n {}" t = "that chunk is already being written to:\n {}\n {} {}/{}\n {}"
raise Pebkac(400, t.format(wark, chash, idx, nh, job["name"])) raise Pebkac(400, t.format(wark, chash, idx, nh, job["name"]))
path = os.path.join(job["ptop"], job["prel"], job["tnam"]) path = djoin(job["ptop"], job["prel"], job["tnam"])
chunksize = up2k_chunksize(job["size"]) chunksize = up2k_chunksize(job["size"])
ofs = [chunksize * x for x in nchunk] ofs = [chunksize * x for x in nchunk]
@@ -2359,9 +2429,9 @@ class Up2k(object):
self.db_act = time.time() self.db_act = time.time()
try: try:
job = self.registry[ptop][wark] job = self.registry[ptop][wark]
pdir = os.path.join(job["ptop"], job["prel"]) pdir = djoin(job["ptop"], job["prel"])
src = os.path.join(pdir, job["tnam"]) src = djoin(pdir, job["tnam"])
dst = os.path.join(pdir, job["name"]) dst = djoin(pdir, job["name"])
except Exception as ex: except Exception as ex:
return "confirm_chunk, wark, " + repr(ex) # type: ignore return "confirm_chunk, wark, " + repr(ex) # type: ignore
@@ -2394,15 +2464,35 @@ class Up2k(object):
self.db_act = time.time() self.db_act = time.time()
try: try:
job = self.registry[ptop][wark] job = self.registry[ptop][wark]
pdir = os.path.join(job["ptop"], job["prel"]) pdir = djoin(job["ptop"], job["prel"])
src = os.path.join(pdir, job["tnam"]) src = djoin(pdir, job["tnam"])
dst = os.path.join(pdir, job["name"]) dst = djoin(pdir, job["name"])
except Exception as ex: except Exception as ex:
raise Pebkac(500, "finish_upload, wark, " + repr(ex)) raise Pebkac(500, "finish_upload, wark, " + repr(ex))
# self.log("--- " + wark + " " + dst + " finish_upload atomic " + dst, 4) # self.log("--- " + wark + " " + dst + " finish_upload atomic " + dst, 4)
atomic_move(src, dst) atomic_move(src, dst)
upt = job.get("at") or time.time()
xau = self.flags[ptop].get("xau")
if xau and not runhook(
self.log,
xau,
dst,
djoin(job["vtop"], job["prel"], job["name"]),
job["host"],
job["user"],
job["addr"],
upt,
job["size"],
"",
):
t = "upload blocked by xau"
self.log(t, 1)
bos.unlink(dst)
self.registry[ptop].pop(wark, None)
raise Pebkac(403, t)
times = (int(time.time()), int(job["lmod"])) times = (int(time.time()), int(job["lmod"]))
if ANYWIN: if ANYWIN:
z1 = (dst, job["size"], times, job["sprs"]) z1 = (dst, job["size"], times, job["sprs"])
@@ -2414,7 +2504,6 @@ class Up2k(object):
pass pass
z2 = [job[x] for x in "ptop wark prel name lmod size addr".split()] z2 = [job[x] for x in "ptop wark prel name lmod size addr".split()]
upt = job.get("at") or time.time()
wake_sr = False wake_sr = False
try: try:
flt = job["life"] flt = job["life"]
@@ -2444,11 +2533,11 @@ class Up2k(object):
cur = self.cur.get(ptop) cur = self.cur.get(ptop)
for rd, fn, lmod in dupes: for rd, fn, lmod in dupes:
d2 = os.path.join(ptop, rd, fn) d2 = djoin(ptop, rd, fn)
if os.path.exists(d2): if os.path.exists(d2):
continue continue
self._symlink(dst, d2, lmod=lmod) self._symlink(dst, d2, self.flags[ptop], lmod=lmod)
if cur: if cur:
self.db_rm(cur, rd, fn) self.db_rm(cur, rd, fn)
self.db_add(cur, wark, rd, fn, *z2[-4:]) self.db_add(cur, wark, rd, fn, *z2[-4:])
@@ -2610,6 +2699,8 @@ class Up2k(object):
self.log("rm: skip type-{:x} file [{}]".format(st.st_mode, atop)) self.log("rm: skip type-{:x} file [{}]".format(st.st_mode, atop))
return 0, [], [] return 0, [], []
xbd = vn.flags.get("xbd")
xad = vn.flags.get("xad")
n_files = 0 n_files = 0
for dbv, vrem, _, adir, files, rd, vd in g: for dbv, vrem, _, adir, files, rd, vd in g:
for fn in [x[0] for x in files]: for fn in [x[0] for x in files]:
@@ -2620,11 +2711,17 @@ class Up2k(object):
break break
n_files += 1 n_files += 1
abspath = os.path.join(adir, fn) abspath = djoin(adir, fn)
volpath = "{}/{}".format(vrem, fn).strip("/") volpath = "{}/{}".format(vrem, fn).strip("/")
vpath = "{}/{}".format(dbv.vpath, volpath).strip("/") vpath = "{}/{}".format(dbv.vpath, volpath).strip("/")
self.log("rm {}\n {}".format(vpath, abspath)) self.log("rm {}\n {}".format(vpath, abspath))
_ = dbv.get(volpath, uname, *permsets[0]) _ = dbv.get(volpath, uname, *permsets[0])
if xbd and not runhook(
self.log, xbd, abspath, vpath, "", uname, "", 0, 0, ""
):
self.log("delete blocked by xbd: {}".format(abspath), 1)
continue
with self.mutex: with self.mutex:
cur = None cur = None
try: try:
@@ -2636,6 +2733,8 @@ class Up2k(object):
cur.connection.commit() cur.connection.commit()
bos.unlink(abspath) bos.unlink(abspath)
if xad:
runhook(self.log, xad, abspath, vpath, "", uname, "", 0, 0, "")
ok: list[str] = [] ok: list[str] = []
ng: list[str] = [] ng: list[str] = []
@@ -2728,6 +2827,13 @@ class Up2k(object):
if bos.path.exists(dabs): if bos.path.exists(dabs):
raise Pebkac(400, "mv2: target file exists") raise Pebkac(400, "mv2: target file exists")
xbr = svn.flags.get("xbr")
xar = dvn.flags.get("xar")
if xbr and not runhook(self.log, xbr, sabs, svp, "", uname, "", 0, 0, ""):
t = "move blocked by xbr: {}".format(svp)
self.log(t, 1)
raise Pebkac(405, t)
bos.makedirs(os.path.dirname(dabs)) bos.makedirs(os.path.dirname(dabs))
if bos.path.islink(sabs): if bos.path.islink(sabs):
@@ -2736,7 +2842,7 @@ class Up2k(object):
self.log(t.format(sabs, dabs, dlabs)) self.log(t.format(sabs, dabs, dlabs))
mt = bos.path.getmtime(sabs, False) mt = bos.path.getmtime(sabs, False)
bos.unlink(sabs) bos.unlink(sabs)
self._symlink(dlabs, dabs, False, lmod=mt) self._symlink(dlabs, dabs, dvn.flags, False, lmod=mt)
# folders are too scary, schedule rescan of both vols # folders are too scary, schedule rescan of both vols
self.need_rescan.add(svn.vpath) self.need_rescan.add(svn.vpath)
@@ -2744,6 +2850,9 @@ class Up2k(object):
with self.rescan_cond: with self.rescan_cond:
self.rescan_cond.notify_all() self.rescan_cond.notify_all()
if xar:
runhook(self.log, xar, dabs, dvp, "", uname, "", 0, 0, "")
return "k" return "k"
c1, w, ftime_, fsize_, ip, at = self._find_from_vpath(svn.realpath, srem) c1, w, ftime_, fsize_, ip, at = self._find_from_vpath(svn.realpath, srem)
@@ -2757,21 +2866,6 @@ class Up2k(object):
ftime = ftime_ ftime = ftime_
fsize = fsize_ or 0 fsize = fsize_ or 0
if w:
assert c1
if c2 and c2 != c1:
self._copy_tags(c1, c2, w)
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
self._relink(w, svn.realpath, srem, dabs)
curs.add(c1)
if c2:
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
curs.add(c2)
else:
self.log("not found in src db: [{}]".format(svp))
try: try:
atomic_move(sabs, dabs) atomic_move(sabs, dabs)
except OSError as ex: except OSError as ex:
@@ -2788,6 +2882,24 @@ class Up2k(object):
os.unlink(b1) os.unlink(b1)
if w:
assert c1
if c2 and c2 != c1:
self._copy_tags(c1, c2, w)
self._forget_file(svn.realpath, srem, c1, w, c1 != c2)
self._relink(w, svn.realpath, srem, dabs)
curs.add(c1)
if c2:
self.db_add(c2, w, drd, dfn, ftime, fsize, ip or "", at or 0)
curs.add(c2)
else:
self.log("not found in src db: [{}]".format(svp))
if xar:
runhook(self.log, xar, dabs, dvp, "", uname, "", 0, 0, "")
return "k" return "k"
def _copy_tags( def _copy_tags(
@@ -2878,14 +2990,14 @@ class Up2k(object):
or to first remaining full if no dabs (delete) or to first remaining full if no dabs (delete)
""" """
dupes = [] dupes = []
sabs = os.path.join(sptop, srem) sabs = djoin(sptop, srem)
q = "select rd, fn from up where substr(w,1,16)=? and w=?" q = "select rd, fn from up where substr(w,1,16)=? and w=?"
for ptop, cur in self.cur.items(): for ptop, cur in self.cur.items():
for rd, fn in cur.execute(q, (wark[:16], wark)): for rd, fn in cur.execute(q, (wark[:16], wark)):
if rd.startswith("//") or fn.startswith("//"): if rd.startswith("//") or fn.startswith("//"):
rd, fn = s3dec(rd, fn) rd, fn = s3dec(rd, fn)
dvrem = "/".join([rd, fn]).strip("/") dvrem = vjoin(rd, fn).strip("/")
if ptop != sptop or srem != dvrem: if ptop != sptop or srem != dvrem:
dupes.append([ptop, dvrem]) dupes.append([ptop, dvrem])
self.log("found {} dupe: [{}] {}".format(wark, ptop, dvrem)) self.log("found {} dupe: [{}] {}".format(wark, ptop, dvrem))
@@ -2896,7 +3008,7 @@ class Up2k(object):
full: dict[str, tuple[str, str]] = {} full: dict[str, tuple[str, str]] = {}
links: dict[str, tuple[str, str]] = {} links: dict[str, tuple[str, str]] = {}
for ptop, vp in dupes: for ptop, vp in dupes:
ap = os.path.join(ptop, vp) ap = djoin(ptop, vp)
try: try:
d = links if bos.path.islink(ap) else full d = links if bos.path.islink(ap) else full
d[ap] = (ptop, vp) d[ap] = (ptop, vp)
@@ -2912,14 +3024,14 @@ class Up2k(object):
bos.unlink(slabs) bos.unlink(slabs)
bos.rename(sabs, slabs) bos.rename(sabs, slabs)
bos.utime(slabs, (int(time.time()), int(mt)), False) bos.utime(slabs, (int(time.time()), int(mt)), False)
self._symlink(slabs, sabs, False) self._symlink(slabs, sabs, self.flags.get(ptop) or {}, False)
full[slabs] = (ptop, rem) full[slabs] = (ptop, rem)
sabs = slabs sabs = slabs
if not dabs: if not dabs:
dabs = list(sorted(full.keys()))[0] dabs = list(sorted(full.keys()))[0]
for alink in links: for alink, parts in links.items():
lmod = None lmod = None
try: try:
if alink != sabs and absreal(alink) != sabs: if alink != sabs and absreal(alink) != sabs:
@@ -2931,7 +3043,8 @@ class Up2k(object):
except: except:
pass pass
self._symlink(dabs, alink, False, lmod=lmod or 0) flags = self.flags.get(parts[0]) or {}
self._symlink(dabs, alink, flags, False, lmod=lmod or 0)
return len(full) + len(links) return len(full) + len(links)
@@ -2999,7 +3112,7 @@ class Up2k(object):
def _new_upload(self, job: dict[str, Any]) -> None: def _new_upload(self, job: dict[str, Any]) -> None:
pdir = djoin(job["ptop"], job["prel"]) pdir = djoin(job["ptop"], job["prel"])
if not job["size"] and bos.path.isfile(os.path.join(pdir, job["name"])): if not job["size"] and bos.path.isfile(djoin(pdir, job["name"])):
return return
self.registry[job["ptop"]][job["wark"]] = job self.registry[job["ptop"]][job["wark"]] = job
@@ -3007,6 +3120,25 @@ class Up2k(object):
# if len(job["name"].split(".")) > 8: # if len(job["name"].split(".")) > 8:
# raise Exception("aaa") # raise Exception("aaa")
xbu = self.flags[job["ptop"]].get("xbu")
ap_chk = djoin(pdir, job["name"])
vp_chk = djoin(job["vtop"], job["prel"], job["name"])
if xbu and not runhook(
self.log,
xbu,
ap_chk,
vp_chk,
job["host"],
job["user"],
job["addr"],
job["t0"],
job["size"],
"",
):
t = "upload blocked by xbu: {}".format(vp_chk)
self.log(t, 1)
raise Pebkac(403, t)
tnam = job["name"] + ".PARTIAL" tnam = job["name"] + ".PARTIAL"
if self.args.dotpart: if self.args.dotpart:
tnam = "." + tnam tnam = "." + tnam
@@ -3025,7 +3157,7 @@ class Up2k(object):
suffix = "-{:.6f}-{}".format(job["t0"], dip) suffix = "-{:.6f}-{}".format(job["t0"], dip)
with ren_open(tnam, "wb", fdir=pdir, suffix=suffix) as zfw: with ren_open(tnam, "wb", fdir=pdir, suffix=suffix) as zfw:
f, job["tnam"] = zfw["orz"] f, job["tnam"] = zfw["orz"]
abspath = os.path.join(pdir, job["tnam"]) abspath = djoin(pdir, job["tnam"])
sprs = job["sprs"] sprs = job["sprs"]
sz = job["size"] sz = job["size"]
relabel = False relabel = False
@@ -3124,7 +3256,7 @@ class Up2k(object):
x x
for x in reg.values() for x in reg.values()
if x["need"] if x["need"]
and not bos.path.exists(os.path.join(x["ptop"], x["prel"], x["name"])) and not bos.path.exists(djoin(x["ptop"], x["prel"], x["name"]))
] ]
if rm or lost: if rm or lost:
@@ -3137,7 +3269,7 @@ class Up2k(object):
del reg[job["wark"]] del reg[job["wark"]]
try: try:
# remove the filename reservation # remove the filename reservation
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = djoin(job["ptop"], job["prel"], job["name"])
if bos.path.getsize(path) == 0: if bos.path.getsize(path) == 0:
bos.unlink(path) bos.unlink(path)
except: except:
@@ -3146,7 +3278,7 @@ class Up2k(object):
try: try:
if len(job["hash"]) == len(job["need"]): if len(job["hash"]) == len(job["need"]):
# PARTIAL is empty, delete that too # PARTIAL is empty, delete that too
path = os.path.join(job["ptop"], job["prel"], job["tnam"]) path = djoin(job["ptop"], job["prel"], job["tnam"])
bos.unlink(path) bos.unlink(path)
except: except:
pass pass
@@ -3195,7 +3327,7 @@ class Up2k(object):
continue continue
# self.log("\n " + repr([ptop, rd, fn])) # self.log("\n " + repr([ptop, rd, fn]))
abspath = os.path.join(ptop, rd, fn) abspath = djoin(ptop, rd, fn)
try: try:
tags = self.mtag.get(abspath) tags = self.mtag.get(abspath)
ntags1 = len(tags) ntags1 = len(tags)
@@ -3221,7 +3353,7 @@ class Up2k(object):
continue continue
# TODO is undef if vol 404 on startup # TODO is undef if vol 404 on startup
entags = self.entags[ptop] entags = self.entags.get(ptop)
if not entags: if not entags:
self.log("no entags okay.jpg", c=3) self.log("no entags okay.jpg", c=3)
continue continue
@@ -3245,7 +3377,7 @@ class Up2k(object):
if "e2d" not in self.flags[ptop]: if "e2d" not in self.flags[ptop]:
continue continue
abspath = os.path.join(ptop, rd, fn) abspath = djoin(ptop, rd, fn)
self.log("hashing " + abspath) self.log("hashing " + abspath)
inf = bos.stat(abspath) inf = bos.stat(abspath)
if not inf.st_size: if not inf.st_size:
@@ -3324,6 +3456,6 @@ def up2k_wark_from_hashlist(salt: str, filesize: int, hashes: list[str]) -> str:
def up2k_wark_from_metadata(salt: str, sz: int, lastmod: int, rd: str, fn: str) -> str: def up2k_wark_from_metadata(salt: str, sz: int, lastmod: int, rd: str, fn: str) -> str:
ret = fsenc("{}\n{}\n{}\n{}\n{}".format(salt, lastmod, sz, rd, fn)) ret = sfsenc("{}\n{}\n{}\n{}\n{}".format(salt, lastmod, sz, rd, fn))
ret = base64.urlsafe_b64encode(hashlib.sha512(ret).digest()) ret = base64.urlsafe_b64encode(hashlib.sha512(ret).digest())
return "#{}".format(ret.decode("ascii"))[:44] return "#{}".format(ret.decode("ascii"))[:44]

View File

@@ -6,6 +6,7 @@ import contextlib
import errno import errno
import hashlib import hashlib
import hmac import hmac
import json
import logging import logging
import math import math
import mimetypes import mimetypes
@@ -13,6 +14,7 @@ import os
import platform import platform
import re import re
import select import select
import shutil
import signal import signal
import socket import socket
import stat import stat
@@ -142,6 +144,8 @@ SYMTIME = sys.version_info > (3, 6) and os.utime in os.supports_follow_symlinks
META_NOBOTS = '<meta name="robots" content="noindex, nofollow">' META_NOBOTS = '<meta name="robots" content="noindex, nofollow">'
FFMPEG_URL = "https://www.gyan.dev/ffmpeg/builds/ffmpeg-git-full.7z"
HTTPCODE = { HTTPCODE = {
200: "OK", 200: "OK",
201: "Created", 201: "Created",
@@ -228,6 +232,7 @@ application msi=x-ms-installer cab=vnd.ms-cab-compressed rpm=x-rpm crx=x-chrome-
application epub=epub+zip mobi=x-mobipocket-ebook lit=x-ms-reader rss=rss+xml atom=atom+xml torrent=x-bittorrent application epub=epub+zip mobi=x-mobipocket-ebook lit=x-ms-reader rss=rss+xml atom=atom+xml torrent=x-bittorrent
application p7s=pkcs7-signature dcm=dicom shx=vnd.shx shp=vnd.shp dbf=x-dbf gml=gml+xml gpx=gpx+xml amf=x-amf application p7s=pkcs7-signature dcm=dicom shx=vnd.shx shp=vnd.shp dbf=x-dbf gml=gml+xml gpx=gpx+xml amf=x-amf
application swf=x-shockwave-flash m3u=vnd.apple.mpegurl db3=vnd.sqlite3 sqlite=vnd.sqlite3 application swf=x-shockwave-flash m3u=vnd.apple.mpegurl db3=vnd.sqlite3 sqlite=vnd.sqlite3
text ass=plain ssa=plain
image jpg=jpeg xpm=x-xpixmap psd=vnd.adobe.photoshop jpf=jpx tif=tiff ico=x-icon djvu=vnd.djvu image jpg=jpeg xpm=x-xpixmap psd=vnd.adobe.photoshop jpf=jpx tif=tiff ico=x-icon djvu=vnd.djvu
image heic=heic-sequence heif=heif-sequence hdr=vnd.radiance svg=svg+xml image heic=heic-sequence heif=heif-sequence hdr=vnd.radiance svg=svg+xml
audio caf=x-caf mp3=mpeg m4a=mp4 mid=midi mpc=musepack aif=aiff au=basic qcp=qcelp audio caf=x-caf mp3=mpeg m4a=mp4 mid=midi mpc=musepack aif=aiff au=basic qcp=qcelp
@@ -288,6 +293,20 @@ REKOBO_KEY = {
REKOBO_LKEY = {k.lower(): v for k, v in REKOBO_KEY.items()} REKOBO_LKEY = {k.lower(): v for k, v in REKOBO_KEY.items()}
pybin = sys.executable or ""
is_exe = bool(getattr(sys, "frozen", False))
if is_exe:
pybin = ""
for p in "python3 python".split():
try:
p = shutil.which(p)
if p:
pybin = p
break
except:
pass
def py_desc() -> str: def py_desc() -> str:
interp = platform.python_implementation() interp = platform.python_implementation()
py_ver = ".".join([str(x) for x in sys.version_info]) py_ver = ".".join([str(x) for x in sys.version_info])
@@ -361,8 +380,11 @@ class Daemon(threading.Thread):
name: Optional[str] = None, name: Optional[str] = None,
a: Optional[Iterable[Any]] = None, a: Optional[Iterable[Any]] = None,
r: bool = True, r: bool = True,
ka: Optional[dict[Any, Any]] = None,
) -> None: ) -> None:
threading.Thread.__init__(self, target=target, name=name, args=a or ()) threading.Thread.__init__(
self, target=target, name=name, args=a or (), kwargs=ka
)
self.daemon = True self.daemon = True
if r: if r:
self.start() self.start()
@@ -378,6 +400,9 @@ class Netdev(object):
def __str__(self): def __str__(self):
return "{}-{}{}".format(self.idx, self.name, self.desc) return "{}-{}{}".format(self.idx, self.name, self.desc)
def __repr__(self):
return "'{}-{}'".format(self.idx, self.name)
def __lt__(self, rhs): def __lt__(self, rhs):
return str(self) < str(rhs) return str(self) < str(rhs)
@@ -437,9 +462,7 @@ class HLog(logging.Handler):
else: else:
c = 1 c = 1
if record.name.startswith("PIL") and lv < logging.WARNING: if record.name == "pyftpdlib":
return
elif record.name == "pyftpdlib":
m = self.ptn_ftp.match(msg) m = self.ptn_ftp.match(msg)
if m: if m:
ip = m.group(1) ip = m.group(1)
@@ -469,7 +492,7 @@ class NetMap(object):
) )
ips = [x for x in ips if x not in ("::1", "127.0.0.1")] ips = [x for x in ips if x not in ("::1", "127.0.0.1")]
ips = [[x for x in netdevs if x.startswith(y + "/")][0] for y in ips] ips = find_prefix(ips, netdevs)
self.cache: dict[str, str] = {} self.cache: dict[str, str] = {}
self.b2sip: dict[bytes, str] = {} self.b2sip: dict[bytes, str] = {}
@@ -1147,20 +1170,12 @@ def ren_open(
fun = kwargs.pop("fun", open) fun = kwargs.pop("fun", open)
fdir = kwargs.pop("fdir", None) fdir = kwargs.pop("fdir", None)
suffix = kwargs.pop("suffix", None) suffix = kwargs.pop("suffix", None)
overwrite = kwargs.pop("overwrite", None)
if fname == os.devnull: if fname == os.devnull:
with fun(fname, *args, **kwargs) as f: with fun(fname, *args, **kwargs) as f:
yield {"orz": (f, fname)} yield {"orz": (f, fname)}
return return
if overwrite:
assert fdir
fpath = os.path.join(fdir, fname)
with fun(fsenc(fpath), *args, **kwargs) as f:
yield {"orz": (f, fname)}
return
if suffix: if suffix:
ext = fname.split(".")[-1] ext = fname.split(".")[-1]
if len(ext) < 7: if len(ext) < 7:
@@ -1187,7 +1202,7 @@ def ren_open(
else: else:
fpath = fname fpath = fname
if suffix and os.path.exists(fsenc(fpath)): if suffix and os.path.lexists(fsenc(fpath)):
fpath += suffix fpath += suffix
fname += suffix fname += suffix
ext += suffix ext += suffix
@@ -1507,6 +1522,28 @@ def read_header(sr: Unrecv) -> list[str]:
return ret[:ofs].decode("utf-8", "surrogateescape").lstrip("\r\n").split("\r\n") return ret[:ofs].decode("utf-8", "surrogateescape").lstrip("\r\n").split("\r\n")
def rand_name(fdir: str, fn: str, rnd: int) -> str:
ok = False
try:
ext = "." + fn.rsplit(".", 1)[1]
except:
ext = ""
for extra in range(16):
for _ in range(16):
if ok:
break
nc = rnd + extra
nb = int((6 + 6 * nc) / 8)
zb = os.urandom(nb)
zb = base64.urlsafe_b64encode(zb)
fn = zb[:nc].decode("utf-8") + ext
ok = not os.path.exists(fsenc(os.path.join(fdir, fn)))
return fn
def gen_filekey(salt: str, fspath: str, fsize: int, inode: int) -> str: def gen_filekey(salt: str, fspath: str, fsize: int, inode: int) -> str:
return base64.urlsafe_b64encode( return base64.urlsafe_b64encode(
hashlib.sha512( hashlib.sha512(
@@ -1549,14 +1586,16 @@ def gen_filekey_dbg(
return ret return ret
def gencookie(k: str, v: str, dur: Optional[int]) -> str: def gencookie(k: str, v: str, r: str, tls: bool, dur: Optional[int]) -> str:
v = v.replace(";", "") v = v.replace(";", "")
if dur: if dur:
exp = formatdate(time.time() + dur, usegmt=True) exp = formatdate(time.time() + dur, usegmt=True)
else: else:
exp = "Fri, 15 Aug 1997 01:00:00 GMT" exp = "Fri, 15 Aug 1997 01:00:00 GMT"
return "{}={}; Path=/; Expires={}; SameSite=Lax".format(k, v, exp) return "{}={}; Path=/{}; Expires={}{}; SameSite=Lax".format(
k, v, r, exp, "; Secure" if tls else ""
)
def humansize(sz: float, terse: bool = False) -> str: def humansize(sz: float, terse: bool = False) -> str:
@@ -1682,7 +1721,7 @@ def relchk(rp: str) -> str:
def absreal(fpath: str) -> str: def absreal(fpath: str) -> str:
try: try:
return fsdec(os.path.abspath(os.path.realpath(fsenc(fpath)))) return fsdec(os.path.abspath(os.path.realpath(afsenc(fpath))))
except: except:
if not WINDOWS: if not WINDOWS:
raise raise
@@ -1712,6 +1751,15 @@ def ipnorm(ip: str) -> str:
return ip return ip
def find_prefix(ips: list[str], netdevs: dict[str, Netdev]) -> list[str]:
ret = []
for ip in ips:
hit = next((x for x in netdevs if x.startswith(ip + "/")), None)
if hit:
ret.append(hit)
return ret
def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str: def html_escape(s: str, quot: bool = False, crlf: bool = False) -> str:
"""html.escape but also newlines""" """html.escape but also newlines"""
s = s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") s = s.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
@@ -1793,6 +1841,24 @@ def _w8enc3(txt: str) -> bytes:
return txt.encode(FS_ENCODING, "surrogateescape") return txt.encode(FS_ENCODING, "surrogateescape")
def _msdec(txt: bytes) -> str:
ret = txt.decode(FS_ENCODING, "surrogateescape")
return ret[4:] if ret.startswith("\\\\?\\") else ret
def _msaenc(txt: str) -> bytes:
return txt.replace("/", "\\").encode(FS_ENCODING, "surrogateescape")
def _msenc(txt: str) -> bytes:
txt = txt.replace("/", "\\")
if ":" not in txt and not txt.startswith("\\\\"):
txt = absreal(txt)
ret = txt.encode(FS_ENCODING, "surrogateescape")
return ret if ret.startswith(b"\\\\") else b"\\\\?\\" + ret
w8dec = _w8dec3 if not PY2 else _w8dec2 w8dec = _w8dec3 if not PY2 else _w8dec2
w8enc = _w8enc3 if not PY2 else _w8enc2 w8enc = _w8enc3 if not PY2 else _w8enc2
@@ -1807,8 +1873,13 @@ def w8b64enc(txt: str) -> str:
return base64.urlsafe_b64encode(w8enc(txt)).decode("ascii") return base64.urlsafe_b64encode(w8enc(txt)).decode("ascii")
if not PY2 or not WINDOWS: if not PY2 and WINDOWS:
fsenc = w8enc sfsenc = w8enc
afsenc = _msaenc
fsenc = _msenc
fsdec = _msdec
elif not PY2 or not WINDOWS:
fsenc = afsenc = sfsenc = w8enc
fsdec = w8dec fsdec = w8dec
else: else:
# moonrunes become \x3f with bytestrings, # moonrunes become \x3f with bytestrings,
@@ -1819,7 +1890,7 @@ else:
def _not_actually_mbcs_dec(txt: bytes) -> str: def _not_actually_mbcs_dec(txt: bytes) -> str:
return txt return txt
fsenc = _not_actually_mbcs_enc fsenc = afsenc = sfsenc = _not_actually_mbcs_enc
fsdec = _not_actually_mbcs_dec fsdec = _not_actually_mbcs_dec
@@ -2008,6 +2079,20 @@ def read_socket_chunked(
raise Pebkac(400, t.format(x)) raise Pebkac(400, t.format(x))
def list_ips() -> list[str]:
from .stolen.ifaddr import get_adapters
ret: set[str] = set()
for nic in get_adapters():
for ipo in nic.ips:
if len(ipo.ip) < 7:
ret.add(ipo.ip[0]) # ipv6 is (ip,0,0)
else:
ret.add(ipo.ip)
return list(ret)
def yieldfile(fn: str) -> Generator[bytes, None, None]: def yieldfile(fn: str) -> Generator[bytes, None, None]:
with open(fsenc(fn), "rb", 512 * 1024) as f: with open(fsenc(fn), "rb", 512 * 1024) as f:
while True: while True:
@@ -2428,6 +2513,129 @@ def retchk(
raise Exception(t) raise Exception(t)
def _runhook(
log: "NamedLogger",
cmd: str,
ap: str,
vp: str,
host: str,
uname: str,
ip: str,
at: float,
sz: int,
txt: str,
) -> bool:
chk = False
fork = False
jtxt = False
wait = 0
tout = 0
kill = "t"
cap = 0
ocmd = cmd
while "," in cmd[:6]:
arg, cmd = cmd.split(",", 1)
if arg == "c":
chk = True
elif arg == "f":
fork = True
elif arg == "j":
jtxt = True
elif arg.startswith("w"):
wait = float(arg[1:])
elif arg.startswith("t"):
tout = float(arg[1:])
elif arg.startswith("c"):
cap = int(arg[1:]) # 0=none 1=stdout 2=stderr 3=both
elif arg.startswith("k"):
kill = arg[1:] # [t]ree [m]ain [n]one
else:
t = "hook: invalid flag {} in {}"
log(t.format(arg, ocmd))
env = os.environ.copy()
try:
if is_exe:
raise Exception()
pypath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
zsl = [str(pypath)] + [str(x) for x in sys.path if x]
pypath = str(os.pathsep.join(zsl))
env["PYTHONPATH"] = pypath
except:
if not is_exe:
raise
ka = {
"env": env,
"timeout": tout,
"kill": kill,
"capture": cap,
}
if jtxt:
ja = {
"ap": ap,
"vp": vp,
"ip": ip,
"host": host,
"user": uname,
"at": at or time.time(),
"sz": sz,
"txt": txt,
}
arg = json.dumps(ja)
else:
arg = txt or ap
acmd = [cmd, arg]
if cmd.endswith(".py"):
acmd = [pybin] + acmd
bcmd = [fsenc(x) if x == ap else sfsenc(x) for x in acmd]
t0 = time.time()
if fork:
Daemon(runcmd, ocmd, [acmd], ka=ka)
else:
rc, v, err = runcmd(bcmd, **ka) # type: ignore
if chk and rc:
retchk(rc, bcmd, err, log, 5)
return False
wait -= time.time() - t0
if wait > 0:
time.sleep(wait)
return True
def runhook(
log: "NamedLogger",
cmds: list[str],
ap: str,
vp: str,
host: str,
uname: str,
ip: str,
at: float,
sz: int,
txt: str,
) -> bool:
vp = vp.replace("\\", "/")
for cmd in cmds:
try:
if not _runhook(log, cmd, ap, vp, host, uname, ip, at, sz, txt):
return False
except Exception as ex:
log("hook: {}".format(ex))
if ",c," in "," + cmd:
return False
break
return True
def gzip_orig_sz(fn: str) -> int: def gzip_orig_sz(fn: str) -> int:
with open(fsenc(fn), "rb") as f: with open(fsenc(fn), "rb") as f:
f.seek(-4, 2) f.seek(-4, 2)

View File

@@ -27,7 +27,7 @@ window.baguetteBox = (function () {
isOverlayVisible = false, isOverlayVisible = false,
touch = {}, // start-pos touch = {}, // start-pos
touchFlag = false, // busy touchFlag = false, // busy
re_i = /.+\.(gif|jpe?g|png|webp)(\?|$)/i, re_i = /.+\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp)(\?|$)/i,
re_v = /.+\.(webm|mkv|mp4)(\?|$)/i, re_v = /.+\.(webm|mkv|mp4)(\?|$)/i,
anims = ['slideIn', 'fadeIn', 'none'], anims = ['slideIn', 'fadeIn', 'none'],
data = {}, // all galleries data = {}, // all galleries
@@ -277,8 +277,8 @@ window.baguetteBox = (function () {
playpause(); playpause();
else if (k == "KeyU" || k == "KeyO") else if (k == "KeyU" || k == "KeyO")
relseek(k == "KeyU" ? -10 : 10); relseek(k == "KeyU" ? -10 : 10);
else if (k.indexOf('Digit') === 0) else if (k.indexOf('Digit') === 0 && v)
vid().currentTime = vid().duration * parseInt(k.slice(-1)) * 0.1; v.currentTime = v.duration * parseInt(k.slice(-1)) * 0.1;
else if (k == "KeyM" && v) { else if (k == "KeyM" && v) {
v.muted = vmute = !vmute; v.muted = vmute = !vmute;
mp_ctl(); mp_ctl();

View File

@@ -572,6 +572,11 @@ html.dy {
* { * {
line-height: 1.2em; line-height: 1.2em;
} }
::selection {
color: var(--bg-d1);
background: var(--fg);
text-shadow: none;
}
html,body,tr,th,td,#files,a { html,body,tr,th,td,#files,a {
color: inherit; color: inherit;
background: none; background: none;
@@ -754,8 +759,9 @@ html.y #files thead th {
display: inline; display: inline;
} }
#path a { #path a {
margin: 0 0 0 -.2em; padding: 0 .35em;
padding: 0 0 0 .4em; position: relative;
z-index: 1;
/* ie: */ /* ie: */
border-bottom: .1em solid #777\9; border-bottom: .1em solid #777\9;
margin-right: 1em\9; margin-right: 1em\9;
@@ -763,18 +769,17 @@ html.y #files thead th {
#path a:first-child { #path a:first-child {
padding-left: .8em; padding-left: .8em;
} }
#path a:not(:last-child):after { #path i {
content: '';
width: 1.05em; width: 1.05em;
height: 1.05em; height: 1.05em;
margin: -.2em .3em -.2em -.4em; margin: -.5em .15em -.15em -.7em;
display: inline-block; display: inline-block;
border: 1px solid rgba(255,224,192,0.3); border: 1px solid rgba(255,224,192,0.3);
border-width: .05em .05em 0 0; border-width: .05em .05em 0 0;
transform: rotate(45deg); transform: rotate(45deg);
background: linear-gradient(45deg, rgba(0,0,0,0) 40%, rgba(0,0,0,0.25) 75%, rgba(0,0,0,0.35)); background: linear-gradient(45deg, rgba(0,0,0,0) 40%, rgba(0,0,0,0.25) 75%, rgba(0,0,0,0.35));
} }
html.y #path a:not(:last-child)::after { html.y #path i {
background: none; background: none;
border-color: rgba(0,0,0,0.2); border-color: rgba(0,0,0,0.2);
border-width: .1em .1em 0 0; border-width: .1em .1em 0 0;
@@ -793,6 +798,24 @@ html.y #path a:hover {
.logue:empty { .logue:empty {
display: none; display: none;
} }
#doc>iframe,
.logue>iframe {
background: var(--bgg);
border: 1px solid var(--bgg);
border-width: 0 .3em 0 .3em;
border-radius: .5em;
visibility: hidden;
margin: 0 -.3em;
width: 100%;
height: 0;
}
#doc>iframe.focus,
.logue>iframe.focus {
box-shadow: 0 0 .1em .1em var(--a);
}
#pro.logue>iframe {
height: 100vh;
}
#pro.logue { #pro.logue {
margin-bottom: .8em; margin-bottom: .8em;
} }
@@ -817,6 +840,10 @@ html.y #path a:hover {
.mdo { .mdo {
max-width: 52em; max-width: 52em;
} }
.mdo.sb,
#epi.logue.mdo>iframe {
max-width: 54em;
}
.mdo, .mdo,
.mdo * { .mdo * {
line-height: 1.4em; line-height: 1.4em;
@@ -2557,7 +2584,6 @@ html.b #u2conf a.b:hover {
#u2conf input[type="checkbox"]:checked+label:hover { #u2conf input[type="checkbox"]:checked+label:hover {
background: var(--u2-o-1h-bg); background: var(--u2-o-1h-bg);
} }
#op_up2k.srch #u2conf td:nth-child(1)>*,
#op_up2k.srch #u2conf td:nth-child(2)>*, #op_up2k.srch #u2conf td:nth-child(2)>*,
#op_up2k.srch #u2conf td:nth-child(3)>* { #op_up2k.srch #u2conf td:nth-child(3)>* {
background: #777; background: #777;

View File

@@ -36,7 +36,7 @@
<input type="file" name="f" multiple /><br /> <input type="file" name="f" multiple /><br />
<input type="submit" value="start upload"> <input type="submit" value="start upload">
</form> </form>
<a id="bbsw" href="?b=u"><br />switch to basic browser</a> <a id="bbsw" href="?b=u" rel="nofollow"><br />switch to basic browser</a>
</div> </div>
<div id="op_mkdir" class="opview opbox act"> <div id="op_mkdir" class="opview opbox act">
@@ -85,7 +85,7 @@
<div id="bdoc"></div> <div id="bdoc"></div>
{%- endif %} {%- endif %}
<div id="pro" class="logue">{{ logues[0] }}</div> <div id="pro" class="logue">{{ "" if sb_lg else logues[0] }}</div>
<table id="files"> <table id="files">
<thead> <thead>
@@ -119,9 +119,9 @@
</tbody> </tbody>
</table> </table>
<div id="epi" class="logue">{{ logues[1] }}</div> <div id="epi" class="logue">{{ "" if sb_lg else logues[1] }}</div>
<h2><a href="{{ r }}/?h" id="goh">control-panel</a></h2> <h2 id="wfp"><a href="{{ r }}/?h" id="goh">control-panel</a></h2>
<a href="#" id="repl">π</a> <a href="#" id="repl">π</a>
@@ -135,6 +135,7 @@
<script> <script>
var SR = {{ r|tojson }}, var SR = {{ r|tojson }},
TS = "{{ ts }}",
acct = "{{ acct }}", acct = "{{ acct }}",
perms = {{ perms }}, perms = {{ perms }},
themes = {{ themes }}, themes = {{ themes }},
@@ -150,12 +151,15 @@
have_del = {{ have_del|tojson }}, have_del = {{ have_del|tojson }},
have_unpost = {{ have_unpost }}, have_unpost = {{ have_unpost }},
have_zip = {{ have_zip|tojson }}, have_zip = {{ have_zip|tojson }},
sb_md = "{{ sb_md }}",
sb_lg = "{{ sb_lg }}",
lifetime = {{ lifetime }}, lifetime = {{ lifetime }},
turbolvl = {{ turbolvl }}, turbolvl = {{ turbolvl }},
frand = {{ frand|tojson }},
u2sort = "{{ u2sort }}", u2sort = "{{ u2sort }}",
have_emp = {{ have_emp|tojson }}, have_emp = {{ have_emp|tojson }},
txt_ext = "{{ txt_ext }}", txt_ext = "{{ txt_ext }}",
{% if no_prism %}no_prism = 1,{% endif %} logues = {{ logues|tojson if sb_lg else "[]" }},
readme = {{ readme|tojson }}, readme = {{ readme|tojson }},
ls0 = {{ ls0|tojson }}; ls0 = {{ ls0|tojson }};

View File

@@ -108,8 +108,8 @@ var Ls = {
"ot_msg": "msg: send a message to the server log", "ot_msg": "msg: send a message to the server log",
"ot_mp": "media player options", "ot_mp": "media player options",
"ot_cfg": "configuration options", "ot_cfg": "configuration options",
"ot_u2i": 'up2k: upload files (if you have write-access) or toggle into the search-mode to see if they exist somewhere on the server$N$Nuploads are resumable, multithreaded, and file timestamps are preserved, but it uses more CPU than the basic uploader<br /><br />during uploads, this icon becomes a progress indicator!', "ot_u2i": 'up2k: upload files (if you have write-access) or toggle into the search-mode to see if they exist somewhere on the server$N$Nuploads are resumable, multithreaded, and file timestamps are preserved, but it uses more CPU than [🎈]&nbsp; (the basic uploader)<br /><br />during uploads, this icon becomes a progress indicator!',
"ot_u2w": 'up2k: upload files with resume support (close your browser and drop the same files in later)$N$Nmultithreaded, and file timestamps are preserved, but it uses more CPU than the basic uploader<br /><br />during uploads, this icon becomes a progress indicator!', "ot_u2w": 'up2k: upload files with resume support (close your browser and drop the same files in later)$N$Nmultithreaded, and file timestamps are preserved, but it uses more CPU than [🎈]&nbsp; (the basic uploader)<br /><br />during uploads, this icon becomes a progress indicator!',
"ab_mkdir": "make directory", "ab_mkdir": "make directory",
"ab_mkdoc": "new markdown doc", "ab_mkdoc": "new markdown doc",
@@ -134,6 +134,7 @@ var Ls = {
"wt_next": "next track$NHotkey: L", "wt_next": "next track$NHotkey: L",
"ul_par": "parallel uploads:", "ul_par": "parallel uploads:",
"ut_rand": "randomize filenames",
"ut_mt": "continue hashing other files while uploading$N$Nmaybe disable if your CPU or HDD is a bottleneck", "ut_mt": "continue hashing other files while uploading$N$Nmaybe disable if your CPU or HDD is a bottleneck",
"ut_ask": "ask for confirmation before upload starts", "ut_ask": "ask for confirmation before upload starts",
"ut_pot": "improve upload speed on slow devices$Nby making the UI less complex", "ut_pot": "improve upload speed on slow devices$Nby making the UI less complex",
@@ -158,6 +159,9 @@ var Ls = {
"uct_q": "idle, pending", "uct_q": "idle, pending",
"utl_name": "filename", "utl_name": "filename",
"utl_ulist": "list",
"utl_ucopy": "copy",
"utl_links": "links",
"utl_stat": "status", "utl_stat": "status",
"utl_prog": "progress", "utl_prog": "progress",
@@ -346,6 +350,7 @@ var Ls = {
"s_a1": "specific metadata properties", "s_a1": "specific metadata properties",
"md_eshow": "cannot show ", "md_eshow": "cannot show ",
"md_off": "[📜<em>readme</em>] disabled in [⚙️] -- document hidden",
"xhr403": "403: Access denied\n\ntry pressing F5, maybe you got logged out", "xhr403": "403: Access denied\n\ntry pressing F5, maybe you got logged out",
"cf_ok": "sorry about that -- DD" + wah + "oS protection kicked in\n\nthings should resume in about 30 sec\n\nif nothing happens, hit F5 to reload the page", "cf_ok": "sorry about that -- DD" + wah + "oS protection kicked in\n\nthings should resume in about 30 sec\n\nif nothing happens, hit F5 to reload the page",
@@ -366,7 +371,10 @@ var Ls = {
"fz_zipc": "cp437 with crc32 computed early,$Nfor MS-DOS PKZIP v2.04g (october 1993)$N(takes longer to process before download can start)", "fz_zipc": "cp437 with crc32 computed early,$Nfor MS-DOS PKZIP v2.04g (october 1993)$N(takes longer to process before download can start)",
"un_m1": "you can delete your recent uploads below", "un_m1": "you can delete your recent uploads below",
"un_upd": "refresh list", "un_upd": "refresh",
"un_m4": "or share the files visible below:",
"un_ulist": "show",
"un_ucopy": "copy",
"un_flt": "optional filter:&nbsp; URL must contain", "un_flt": "optional filter:&nbsp; URL must contain",
"un_fclr": "clear filter", "un_fclr": "clear filter",
"un_derr": 'unpost-delete failed:\n', "un_derr": 'unpost-delete failed:\n',
@@ -553,8 +561,8 @@ var Ls = {
"ot_msg": "msg: send en beskjed til serverloggen", "ot_msg": "msg: send en beskjed til serverloggen",
"ot_mp": "musikkspiller-instillinger", "ot_mp": "musikkspiller-instillinger",
"ot_cfg": "andre innstillinger", "ot_cfg": "andre innstillinger",
"ot_u2i": 'up2k: last opp filer (hvis du har skrivetilgang) eller bytt til søkemodus for å sjekke om filene finnes et-eller-annet sted på serveren$N$Nopplastninger kan gjenopptas etter avbrudd, skjer stykkevis for potensielt høyere ytelse, og ivaretar datostempling -- men bruker litt mer prosessorkraft enn den primitive opplasteren bup<br /><br />mens opplastninger foregår så vises fremdriften her oppe!', "ot_u2i": 'up2k: last opp filer (hvis du har skrivetilgang) eller bytt til søkemodus for å sjekke om filene finnes et-eller-annet sted på serveren$N$Nopplastninger kan gjenopptas etter avbrudd, skjer stykkevis for potensielt høyere ytelse, og ivaretar datostempling -- men bruker litt mer prosessorkraft enn [🎈]&nbsp; (den primitive opplasteren "bup")<br /><br />mens opplastninger foregår så vises fremdriften her oppe!',
"ot_u2w": 'up2k: filopplastning med støtte for å gjenoppta avbrutte opplastninger -- steng ned nettleseren og dra de samme filene inn i nettleseren igjen for å plukke opp igjen der du slapp$N$Nopplastninger skjer stykkevis for potensielt høyere ytelse, og ivaretar datostempling -- men bruker litt mer prosessorkraft enn den primitive opplasteren "bup"<br /><br />mens opplastninger foregår så vises fremdriften her oppe!', "ot_u2w": 'up2k: filopplastning med støtte for å gjenoppta avbrutte opplastninger -- steng ned nettleseren og dra de samme filene inn i nettleseren igjen for å plukke opp igjen der du slapp$N$Nopplastninger skjer stykkevis for potensielt høyere ytelse, og ivaretar datostempling -- men bruker litt mer prosessorkraft enn [🎈]&nbsp; (den primitive opplasteren "bup")<br /><br />mens opplastninger foregår så vises fremdriften her oppe!',
"ab_mkdir": "lag mappe", "ab_mkdir": "lag mappe",
"ab_mkdoc": "nytt dokument", "ab_mkdoc": "nytt dokument",
@@ -571,14 +579,15 @@ var Ls = {
"wt_selinv": "inverter utvalg", "wt_selinv": "inverter utvalg",
"wt_selzip": "last ned de valgte filene som et arkiv", "wt_selzip": "last ned de valgte filene som et arkiv",
"wt_seldl": "last ned de valgte filene$NSnarvei: Y", "wt_seldl": "last ned de valgte filene$NSnarvei: Y",
"wt_npirc": "kopier sang-info (irc-formattert)", "wt_npirc": "kopiér sang-info (irc-formattert)",
"wt_nptxt": "kopier sang-info", "wt_nptxt": "kopiér sang-info",
"wt_grid": "bytt mellom ikoner og listevisning$NSnarvei: G", "wt_grid": "bytt mellom ikoner og listevisning$NSnarvei: G",
"wt_prev": "forrige sang$NSnarvei: J", "wt_prev": "forrige sang$NSnarvei: J",
"wt_play": "play / pause$NSnarvei: P", "wt_play": "play / pause$NSnarvei: P",
"wt_next": "neste sang$NSnarvei: L", "wt_next": "neste sang$NSnarvei: L",
"ul_par": "samtidige handl.:", "ul_par": "samtidige handl.:",
"ut_rand": "finn opp nye tilfeldige filnavn",
"ut_mt": "fortsett å befare køen mens opplastning foregår$N$Nskru denne av dersom du har en$Ntreg prosessor eller harddisk", "ut_mt": "fortsett å befare køen mens opplastning foregår$N$Nskru denne av dersom du har en$Ntreg prosessor eller harddisk",
"ut_ask": "bekreft filutvalg før opplastning starter", "ut_ask": "bekreft filutvalg før opplastning starter",
"ut_pot": "forbedre ytelsen på trege enheter ved å$Nforenkle brukergrensesnittet", "ut_pot": "forbedre ytelsen på trege enheter ved å$Nforenkle brukergrensesnittet",
@@ -603,6 +612,9 @@ var Ls = {
"uct_q": "køen", "uct_q": "køen",
"utl_name": "filnavn", "utl_name": "filnavn",
"utl_ulist": "vis",
"utl_ucopy": "kopiér",
"utl_links": "lenker",
"utl_stat": "status", "utl_stat": "status",
"utl_prog": "fremdrift", "utl_prog": "fremdrift",
@@ -791,6 +803,7 @@ var Ls = {
"s_a1": "konkrete egenskaper", "s_a1": "konkrete egenskaper",
"md_eshow": "kan ikke vise ", "md_eshow": "kan ikke vise ",
"md_off": "[📜<em>readme</em>] er avskrudd i [⚙️] -- dokument skjult",
"xhr403": "403: Tilgang nektet\n\nkanskje du ble logget ut? prøv å trykk F5", "xhr403": "403: Tilgang nektet\n\nkanskje du ble logget ut? prøv å trykk F5",
"cf_ok": "beklager -- liten tilfeldig kontroll, alt OK\n\nting skal fortsette om ca. 30 sekunder\n\nhvis ikkeno skjer, trykk F5 for å laste siden på nytt", "cf_ok": "beklager -- liten tilfeldig kontroll, alt OK\n\nting skal fortsette om ca. 30 sekunder\n\nhvis ikkeno skjer, trykk F5 for å laste siden på nytt",
@@ -811,7 +824,10 @@ var Ls = {
"fz_zipc": "cp437 med tidlig crc32,$Nfor MS-DOS PKZIP v2.04g (oktober 1993)$N(øker behandlingstid på server)", "fz_zipc": "cp437 med tidlig crc32,$Nfor MS-DOS PKZIP v2.04g (oktober 1993)$N(øker behandlingstid på server)",
"un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp", "un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp",
"un_upd": "oppdater listen", "un_upd": "oppdater",
"un_m4": "eller hvis du vil dele nedlastnings-lenkene:",
"un_ulist": "vis",
"un_ucopy": "kopiér",
"un_flt": "valgfritt filter:&nbsp; filnavn / filsti må inneholde", "un_flt": "valgfritt filter:&nbsp; filnavn / filsti må inneholde",
"un_fclr": "nullstill filter", "un_fclr": "nullstill filter",
"un_derr": 'unpost-sletting feilet:\n', "un_derr": 'unpost-sletting feilet:\n',
@@ -851,7 +867,7 @@ var Ls = {
"u_hashdone": 'befaring ferdig', "u_hashdone": 'befaring ferdig',
"u_hashing": 'les', "u_hashing": 'les',
"u_fixed": "OK!&nbsp; Løste seg 👍", "u_fixed": "OK!&nbsp; Løste seg 👍",
"u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert harmløst, fortsetter\n\nfil: {2}", "u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert greit, fortsetter\n\nfil: {2}",
"u_cuerr2": "server nektet opplastningen (del {0} av {1});\nprøver igjen senere\n\nfil: {2}\n\nerror ", "u_cuerr2": "server nektet opplastningen (del {0} av {1});\nprøver igjen senere\n\nfil: {2}\n\nerror ",
"u_ehstmp": "prøver igjen; se mld nederst", "u_ehstmp": "prøver igjen; se mld nederst",
"u_ehsfin": "server nektet forespørselen om å ferdigstille filen; prøver igjen...", "u_ehsfin": "server nektet forespørselen om å ferdigstille filen; prøver igjen...",
@@ -953,7 +969,7 @@ ebi('op_up2k').innerHTML = (
'<table id="u2conf">\n' + '<table id="u2conf">\n' +
' <tr>\n' + ' <tr>\n' +
' <td class="c"><br />' + L.ul_par + '</td>\n' + ' <td class="c" data-perm="read"><br />' + L.ul_par + '</td>\n' +
' <td class="c" rowspan="2">\n' + ' <td class="c" rowspan="2">\n' +
' <input type="checkbox" id="multitask" />\n' + ' <input type="checkbox" id="multitask" />\n' +
' <label for="multitask" tt="' + L.ut_mt + '">🏃</label>\n' + ' <label for="multitask" tt="' + L.ut_mt + '">🏃</label>\n' +
@@ -963,8 +979,8 @@ ebi('op_up2k').innerHTML = (
' <label for="potato" tt="' + L.ut_pot + '">🥔</label>\n' + ' <label for="potato" tt="' + L.ut_pot + '">🥔</label>\n' +
' </td>\n' + ' </td>\n' +
' <td class="c" rowspan="2">\n' + ' <td class="c" rowspan="2">\n' +
' <input type="checkbox" id="ask_up" />\n' + ' <input type="checkbox" id="u2rand" />\n' +
' <label for="ask_up" tt="' + L.ut_ask + '">💭</label>\n' + ' <label for="u2rand" tt="' + L.ut_rand + '">🎲</label>\n' +
' </td>\n' + ' </td>\n' +
' <td class="c" data-perm="read" data-dep="idx" rowspan="2">\n' + ' <td class="c" data-perm="read" data-dep="idx" rowspan="2">\n' +
' <input type="checkbox" id="fsearch" />\n' + ' <input type="checkbox" id="fsearch" />\n' +
@@ -974,7 +990,7 @@ ebi('op_up2k').innerHTML = (
' <td data-perm="read" rowspan="2" id="u2c3w"></td>\n' + ' <td data-perm="read" rowspan="2" id="u2c3w"></td>\n' +
' </tr>\n' + ' </tr>\n' +
' <tr>\n' + ' <tr>\n' +
' <td class="c">\n' + ' <td class="c" data-perm="read">\n' +
' <a href="#" class="b" id="nthread_sub">&ndash;</a><input\n' + ' <a href="#" class="b" id="nthread_sub">&ndash;</a><input\n' +
' class="txtbox" id="nthread" value="2" tt="' + L.ut_par + '"/><a\n' + ' class="txtbox" id="nthread" value="2" tt="' + L.ut_par + '"/><a\n' +
' href="#" class="b" id="nthread_add">+</a><br />&nbsp;\n' + ' href="#" class="b" id="nthread_add">+</a><br />&nbsp;\n' +
@@ -1012,7 +1028,7 @@ ebi('op_up2k').innerHTML = (
'<div id="u2tabw" class="na"><table id="u2tab">\n' + '<div id="u2tabw" class="na"><table id="u2tab">\n' +
' <thead>\n' + ' <thead>\n' +
' <tr>\n' + ' <tr>\n' +
' <td>' + L.utl_name + '</td>\n' + ' <td>' + L.utl_name + ' &nbsp;(<a href="#" id="luplinks">' + L.utl_ulist + '</a>/<a href="#" id="cuplinks">' + L.utl_ucopy + '</a>' + L.utl_links + ')</td>\n' +
' <td>' + L.utl_stat + '</td>\n' + ' <td>' + L.utl_stat + '</td>\n' +
' <td>' + L.utl_prog + '</td>\n' + ' <td>' + L.utl_prog + '</td>\n' +
' </tr>\n' + ' </tr>\n' +
@@ -1073,6 +1089,7 @@ ebi('op_cfg').innerHTML = (
'<div>\n' + '<div>\n' +
' <h3>' + L.cl_uopts + '</h3>\n' + ' <h3>' + L.cl_uopts + '</h3>\n' +
' <div>\n' + ' <div>\n' +
' <a id="ask_up" class="tgl btn" href="#" tt="' + L.ut_ask + '">💭</a>\n' +
' <a id="hashw" class="tgl btn" href="#" tt="' + L.cut_mt + '">mt</a>\n' + ' <a id="hashw" class="tgl btn" href="#" tt="' + L.cut_mt + '">mt</a>\n' +
' <a id="u2turbo" class="tgl btn ttb" href="#" tt="' + L.cut_turbo + '">turbo</a>\n' + ' <a id="u2turbo" class="tgl btn ttb" href="#" tt="' + L.cut_turbo + '">turbo</a>\n' +
' <a id="u2tdate" class="tgl btn ttb" href="#" tt="' + L.cut_datechk + '">date-chk</a>\n' + ' <a id="u2tdate" class="tgl btn ttb" href="#" tt="' + L.cut_datechk + '">date-chk</a>\n' +
@@ -1200,6 +1217,17 @@ function goto(dest) {
} }
var SBW, SBH; // scrollbar size
(function () {
var el = mknod('div');
el.style.cssText = 'overflow:scroll;width:100px;height:100px';
document.body.appendChild(el);
SBW = el.offsetWidth - el.clientWidth;
SBH = el.offsetHeight - el.clientHeight;
document.body.removeChild(el);
})();
var have_webp = sread('have_webp'); var have_webp = sread('have_webp');
(function () { (function () {
if (have_webp !== null) if (have_webp !== null)
@@ -1451,9 +1479,9 @@ try {
catch (ex) { } catch (ex) { }
var re_au_native = can_ogg ? /\.(opus|ogg|m4a|aac|mp3|wav|flac)$/i : var re_au_native = can_ogg ? /\.(aac|flac|m4a|mp3|ogg|opus|wav)$/i :
have_acode ? /\.(opus|m4a|aac|mp3|wav|flac)$/i : /\.(m4a|aac|mp3|wav|flac)$/i, have_acode ? /\.(aac|flac|m4a|mp3|opus|wav)$/i : /\.(aac|flac|m4a|mp3|wav)$/i,
re_au_all = /\.(aac|m4a|ogg|opus|flac|alac|mp3|mp2|ac3|dts|wma|ra|wav|aif|aiff|au|alaw|ulaw|mulaw|amr|gsm|ape|tak|tta|wv|mpc)$/i; re_au_all = /\.(aac|ac3|aif|aiff|alac|alaw|amr|ape|au|dfpwm|dts|flac|gsm|it|m4a|mo3|mod|mp2|mp3|mpc|mptm|mt2|mulaw|ogg|okt|opus|ra|s3m|tak|tta|ulaw|wav|wma|wv|xm|xpk)$/i;
// extract songs + add play column // extract songs + add play column
@@ -1697,29 +1725,9 @@ var widget = (function () {
m += '[' + cv + s2ms(mp.au.currentTime) + ck + '/' + cv + s2ms(mp.au.duration) + ck + ']'; m += '[' + cv + s2ms(mp.au.currentTime) + ck + '/' + cv + s2ms(mp.au.duration) + ck + ']';
var o = mknod('input'); cliptxt(m, function () {
o.style.cssText = 'position:fixed;top:45%;left:48%;padding:1em;z-index:9'; toast.ok(1, 'copied to clipboard', null, 'top');
o.value = m; });
document.body.appendChild(o);
var cln = function () {
o.value = 'copied to clipboard ';
setTimeout(function () {
document.body.removeChild(o);
}, 500);
};
var fb = function () {
console.log('fb');
o.focus();
o.select();
document.execCommand("copy");
cln();
};
try {
// https only
navigator.clipboard.writeText(m).then(cln, fb);
}
catch (ex) { fb(); }
}; };
r.set(sread('au_open') == 1); r.set(sread('au_open') == 1);
setTimeout(function () { setTimeout(function () {
@@ -1785,6 +1793,9 @@ var pbar = (function () {
r.wurl = url; r.wurl = url;
var img = new Image(); var img = new Image();
img.onload = function () { img.onload = function () {
if (r.wurl != url)
return;
r.wimg = img; r.wimg = img;
r.onresize(); r.onresize();
}; };
@@ -3392,7 +3403,7 @@ var fileman = (function () {
} }
var xhr = new XHR(); var xhr = new XHR();
xhr.open('GET', f[0].src + '?move=' + dst, true); xhr.open('POST', f[0].src + '?move=' + dst, true);
xhr.onload = xhr.onerror = rename_cb; xhr.onload = xhr.onerror = rename_cb;
xhr.send(); xhr.send();
} }
@@ -3423,7 +3434,7 @@ var fileman = (function () {
} }
toast.show('inf r', 0, esc(L.fd_busy.format(vps.length + 1, vp)), 'r'); toast.show('inf r', 0, esc(L.fd_busy.format(vps.length + 1, vp)), 'r');
xhr.open('GET', vp + '?delete', true); xhr.open('POST', vp + '?delete', true);
xhr.onload = xhr.onerror = delete_cb; xhr.onload = xhr.onerror = delete_cb;
xhr.send(); xhr.send();
} }
@@ -3531,7 +3542,7 @@ var fileman = (function () {
var dst = get_evpath() + vp.split('/').pop(); var dst = get_evpath() + vp.split('/').pop();
xhr.open('GET', vp + '?move=' + dst, true); xhr.open('POST', vp + '?move=' + dst, true);
xhr.onload = xhr.onerror = paste_cb; xhr.onload = xhr.onerror = paste_cb;
xhr.send(); xhr.send();
} }
@@ -4068,7 +4079,7 @@ var thegrid = (function () {
var oth = ebi(this.getAttribute('ref')), var oth = ebi(this.getAttribute('ref')),
href = noq_href(this), href = noq_href(this),
aplay = ebi('a' + oth.getAttribute('id')), aplay = ebi('a' + oth.getAttribute('id')),
is_img = /\.(gif|jpe?g|png|webp|webm|mkv|mp4)(\?|$)/i.test(href), is_img = /\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp|webm|mkv|mp4)(\?|$)/i.test(href),
is_dir = href.endsWith('/'), is_dir = href.endsWith('/'),
in_tree = is_dir && treectl.find(oth.textContent.slice(0, -1)), in_tree = is_dir && treectl.find(oth.textContent.slice(0, -1)),
have_sel = QS('#files tr.sel'), have_sel = QS('#files tr.sel'),
@@ -4528,7 +4539,9 @@ document.onkeydown = function (e) {
return seek_au_rel(n) || true; return seek_au_rel(n) || true;
if (k == 'KeyY') if (k == 'KeyY')
return msel.getsel().length ? ebi('seldl').click() : dl_song(); return msel.getsel().length ? ebi('seldl').click() :
showfile.active() ? ebi('dldoc').click() :
dl_song();
n = k == 'KeyI' ? -1 : k == 'KeyK' ? 1 : 0; n = k == 'KeyI' ? -1 : k == 'KeyK' ? 1 : 0;
if (n !== 0) if (n !== 0)
@@ -5187,8 +5200,8 @@ var treectl = (function () {
function rendertree(res, ts, top0, dst, rst) { function rendertree(res, ts, top0, dst, rst) {
var cur = ebi('treeul').getAttribute('ts'); var cur = ebi('treeul').getAttribute('ts');
if (cur && parseInt(cur) > ts) { if (cur && parseInt(cur) > ts + 20 && QS('#treeul>li>a+a')) {
console.log("reject tree"); console.log("reject tree; " + cur + " / " + (ts - cur));
return; return;
} }
ebi('treeul').setAttribute('ts', ts); ebi('treeul').setAttribute('ts', ts);
@@ -5412,7 +5425,7 @@ var treectl = (function () {
for (var a = 0; a < res.dirs.length; a++) for (var a = 0; a < res.dirs.length; a++)
dirs.push(res.dirs[a].href.split('/')[0].split('?')[0]); dirs.push(res.dirs[a].href.split('/')[0].split('?')[0]);
rendertree({ "a": dirs }, Date.now(), ".", get_evpath()); rendertree({ "a": dirs }, this.ts, ".", get_evpath());
} }
r.gentab(this.top, res); r.gentab(this.top, res);
@@ -5420,8 +5433,8 @@ var treectl = (function () {
despin('#files'); despin('#files');
despin('#gfiles'); despin('#gfiles');
ebi('pro').innerHTML = res.logues ? res.logues[0] || "" : ""; sandbox(ebi('pro'), sb_lg, '', res.logues ? res.logues[0] || "" : "");
ebi('epi').innerHTML = res.logues ? res.logues[1] || "" : ""; sandbox(ebi('epi'), sb_lg, '', res.logues ? res.logues[1] || "" : "");
clmod(ebi('epi'), 'mdo'); clmod(ebi('epi'), 'mdo');
if (res.readme) if (res.readme)
@@ -5529,7 +5542,7 @@ var treectl = (function () {
have_up2k_idx = res.idx; have_up2k_idx = res.idx;
have_tags_idx = res.itag; have_tags_idx = res.itag;
lifetime = res.lifetime; lifetime = res.lifetime;
apply_perms(res.perms); apply_perms(res);
fileman.render(); fileman.render();
} }
if (sel.length) if (sel.length)
@@ -5726,8 +5739,40 @@ function despin(sel) {
} }
function apply_perms(newperms) { var wfp_debounce = (function () {
perms = newperms || []; var r = { 'n': 0, 't': 0 };
r.hide = function () {
if (!sb_lg && !sb_md)
return;
if (++r.n <= 1) {
r.n = 1;
clearTimeout(r.t);
r.t = setTimeout(r.reset, 300);
ebi('wfp').style.opacity = 0.1;
}
};
r.show = function () {
if (!sb_lg && !sb_md)
return;
if (--r.n <= 0) {
r.n = 0;
clearTimeout(r.t);
ebi('wfp').style.opacity = 'unset';
}
};
r.reset = function () {
r.n = 0;
r.show();
};
return r;
})();
function apply_perms(res) {
perms = res.perms || [];
var a = QS('#ops a[data-dest="up2k"]'); var a = QS('#ops a[data-dest="up2k"]');
if (have_up2k_idx) { if (have_up2k_idx) {
@@ -5801,6 +5846,8 @@ function apply_perms(newperms) {
(have_write || tds[a].getAttribute('data-perm') == 'read') ? (have_write || tds[a].getAttribute('data-perm') == 'read') ?
'table-cell' : 'none'; 'table-cell' : 'none';
} }
if (res.frand)
ebi('u2rand').parentNode.style.display = 'none';
if (up2k) if (up2k)
up2k.set_fsearch(); up2k.set_fsearch();
@@ -6555,6 +6602,59 @@ var msel = (function () {
})(); })();
var globalcss = (function () {
var ret = '';
return function () {
if (ret)
return ret;
var dcs = document.styleSheets;
for (var a = 0; a < dcs.length; a++) {
var base = dcs[a].href,
ds = dcs[a].cssRules;
if (!base)
continue;
base = base.replace(/[^/]+$/, '');
for (var b = 0; b < ds.length; b++) {
var css = ds[b].cssText.split(/\burl\(/g);
ret += css[0];
for (var c = 1; c < css.length; c++) {
var delim = (/^["']/.exec(css[c])) ? css[c].slice(0, 1) : '';
ret += 'url(' + delim + ((css[c].slice(0, 8).indexOf('://') + 1 || css[c].startsWith('/')) ? '' : base) +
css[c].slice(delim ? 1 : 0);
}
ret += '\n';
}
}
return ret;
};
})();
var sandboxjs = (function () {
var ret = '',
busy = false,
url = SR + '/.cpr/util.js?_=' + TS,
tag = '<script src="' + url + '"></script>';
return function () {
if (ret || busy)
return ret || tag;
var xhr = new XHR();
xhr.open('GET', url, true);
xhr.onload = function () {
if (this.status == 200)
ret = '<script>' + this.responseText + '</script>';
};
xhr.send();
busy = true;
return tag;
};
})();
function show_md(md, name, div, url, depth) { function show_md(md, name, div, url, depth) {
var errmsg = L.md_eshow + name + ':\n\n', var errmsg = L.md_eshow + name + ':\n\n',
now = get_evpath(); now = get_evpath();
@@ -6563,10 +6663,12 @@ function show_md(md, name, div, url, depth) {
if (url != now) if (url != now)
return; return;
wfp_debounce.hide();
if (!marked) { if (!marked) {
if (depth) if (depth)
return toast.warn(10, errmsg + 'failed to load marked.js') return toast.warn(10, errmsg + 'failed to load marked.js')
wfp_debounce.n--;
return import_js(SR + '/.cpr/deps/marked.js', function () { return import_js(SR + '/.cpr/deps/marked.js', function () {
show_md(md, name, div, url, 1); show_md(md, name, div, url, 1);
}); });
@@ -6574,7 +6676,7 @@ function show_md(md, name, div, url, depth) {
md_plug = {} md_plug = {}
md = load_md_plug(md, 'pre'); md = load_md_plug(md, 'pre');
md = load_md_plug(md, 'post'); md = load_md_plug(md, 'post', sb_md);
var marked_opts = { var marked_opts = {
headerPrefix: 'md-', headerPrefix: 'md-',
@@ -6587,7 +6689,8 @@ function show_md(md, name, div, url, depth) {
try { try {
clmod(div, 'mdo', 1); clmod(div, 'mdo', 1);
div.innerHTML = marked.parse(md, marked_opts); if (sandbox(div, sb_md, 'mdo', marked.parse(md, marked_opts)))
return;
ext = md_plug.post; ext = md_plug.post;
ext = ext ? [ext[0].render, ext[0].render2] : []; ext = ext ? [ext[0].render, ext[0].render2] : [];
@@ -6621,6 +6724,7 @@ function show_md(md, name, div, url, depth) {
catch (ex) { catch (ex) {
toast.warn(10, errmsg + ex); toast.warn(10, errmsg + ex);
} }
wfp_debounce.show();
} }
@@ -6633,7 +6737,7 @@ function set_tabindex() {
function show_readme(md) { function show_readme(md) {
if (!treectl.ireadme) if (!treectl.ireadme)
return; return sandbox(ebi('epi'), '', '', 'a');
show_md(md, 'README.md', ebi('epi')); show_md(md, 'README.md', ebi('epi'));
} }
@@ -6641,6 +6745,94 @@ if (readme)
show_readme(readme); show_readme(readme);
function sandbox(tgt, rules, cls, html) {
if (!treectl.ireadme) {
tgt.innerHTML = html ? L.md_off : '';
return;
}
if (!rules || (html || '').indexOf('<') == -1) {
tgt.innerHTML = html;
clmod(tgt, 'sb');
return false;
}
clmod(tgt, 'sb', 1);
var tid = tgt.getAttribute('id'),
hash = location.hash,
want = '';
if (!cls)
wfp_debounce.hide();
if (hash.startsWith('#md-'))
want = hash.slice(1);
var env = '', tags = QSA('script');
for (var a = 0; a < tags.length; a++) {
var js = tags[a].innerHTML;
if (js && js.indexOf('have_up2k_idx') + 1)
env = js.split(/\blogues *=/)[0] + 'a;';
}
html = '<html class="iframe ' + document.documentElement.className + '"><head><style>' + globalcss() +
'</style><base target="_parent"></head><body id="b" class="logue ' + cls + '">' + html +
'<script>' + env + '</script>' + sandboxjs() +
'<script>var d=document.documentElement,' +
'loc=new URL("' + location.href.split('?')[0] + '");' +
'function say(m){window.parent.postMessage(m,"*")};' +
'setTimeout(function(){var its=0,pih=-1,f=function(){' +
'var ih=2+Math.min(parseInt(getComputedStyle(d).height),d.scrollHeight);' +
'if(ih!=pih){pih=ih;say("iheight #' + tid + ' "+ih,"*")}' +
'if(++its<20)return setTimeout(f,20);if(its==20)setInterval(f,200)' +
'};f();' +
'window.onfocus=function(){say("igot #' + tid + '")};' +
'window.onblur=function(){say("ilost #' + tid + '")};' +
'var el="' + want + '"&&ebi("' + want + '");' +
'if(el)say("iscroll #' + tid + ' "+el.offsetTop);' +
(cls == 'mdo' && md_plug.post ?
'const x={' + md_plug.post + '};' +
'if(x.render)x.render(ebi("b"));' +
'if(x.render2)x.render2(ebi("b"));' : '') +
'},1)</script></body></html>';
var fr = mknod('iframe');
fr.setAttribute('sandbox', rules ? 'allow-' + rules.replace(/ /g, ' allow-') : '');
fr.setAttribute('srcdoc', html);
tgt.innerHTML = '';
tgt.appendChild(fr);
return true;
}
window.addEventListener("message", function (e) {
try {
console.log('msg:' + e.data);
var t = e.data.split(/ /g);
if (t[0] == 'iheight') {
var el = QS(t[1] + '>iframe');
el.style.height = (parseInt(t[2]) + SBH) + 'px';
el.style.visibility = 'unset';
wfp_debounce.show();
}
else if (t[0] == 'iscroll') {
var y1 = QS(t[1]).offsetTop,
y2 = parseInt(t[2]);
console.log(y1, y2);
document.documentElement.scrollTop = y1 + y2;
}
else if (t[0] == 'igot' || t[0] == 'ilost') {
clmod(QS(t[1] + '>iframe'), 'focus', t[0] == 'igot');
}
} catch (ex) {
console.log('msg-err: ' + ex);
}
}, false);
if (sb_lg && logues.length) {
sandbox(ebi('pro'), sb_lg, '', logues[0]);
sandbox(ebi('epi'), sb_lg, '', logues[1]);
}
(function () { (function () {
try { try {
var tr = ebi('files').tBodies[0].rows; var tr = ebi('files').tBodies[0].rows;
@@ -6667,6 +6859,7 @@ function ev_row_tgl(e) {
var unpost = (function () { var unpost = (function () {
ebi('op_unpost').innerHTML = ( ebi('op_unpost').innerHTML = (
L.un_m1 + ' &ndash; <a id="unpost_refresh" href="#">' + L.un_upd + '</a>' + L.un_m1 + ' &ndash; <a id="unpost_refresh" href="#">' + L.un_upd + '</a>' +
'<p>' + L.un_m4 + ' <a id="unpost_ulist" href="#">' + L.un_ulist + '</a> / <a id="unpost_ucopy" href="#">' + L.un_ucopy + '</a>' +
'<p>' + L.un_flt + ' <input type="text" id="unpost_filt" size="20" placeholder="documents/passwords" /><a id="unpost_nofilt" href="#">' + L.un_fclr + '</a></p>' + '<p>' + L.un_flt + ' <input type="text" id="unpost_filt" size="20" placeholder="documents/passwords" /><a id="unpost_nofilt" href="#">' + L.un_fclr + '</a></p>' +
'<div id="unpost"></div>' '<div id="unpost"></div>'
); );
@@ -6732,6 +6925,16 @@ var unpost = (function () {
ct.innerHTML = "<p><em>" + L.un_m3 + "</em></p>"; ct.innerHTML = "<p><em>" + L.un_m3 + "</em></p>";
}; };
function linklist() {
var ret = [],
base = document.location.origin.replace(/\/$/, '');
for (var a = 0; a < r.files.length; a++)
ret.push(base + r.files[a].vp);
return ret.join('\r\n');
}
function unpost_delete_cb() { function unpost_delete_cb() {
if (this.status !== 200) { if (this.status !== 200) {
var msg = this.responseText; var msg = this.responseText;
@@ -6808,6 +7011,19 @@ var unpost = (function () {
goto('unpost'); goto('unpost');
}; };
ebi('unpost_ulist').onclick = function (e) {
ev(e);
modal.alert(linklist());
};
ebi('unpost_ucopy').onclick = function (e) {
ev(e);
var txt = linklist();
cliptxt(txt + '\n', function () {
toast.inf(5, txt.split('\n').length + ' links copied to clipboard');
});
};
return r; return r;
})(); })();
@@ -6903,18 +7119,19 @@ function reload_browser() {
filecols.set_style(); filecols.set_style();
var parts = get_evpath().split('/'), var parts = get_evpath().split('/'),
rm = QSA('#path>a+a+a'), rm = ebi('entree'),
ftab = ebi('files'), ftab = ebi('files'),
link = '/', o; link = '', o;
for (a = rm.length - 1; a >= 0; a--) while (rm.nextSibling)
rm[a].parentNode.removeChild(rm[a]); rm.parentNode.removeChild(rm.nextSibling);
for (var a = 1; a < parts.length - 1; a++) { for (var a = 0; a < parts.length - 1; a++) {
link += parts[a] + '/'; link += parts[a] + '/';
o = mknod('a'); o = mknod('a');
o.setAttribute('href', link); o.setAttribute('href', link);
o.textContent = uricom_dec(parts[a]); o.textContent = uricom_dec(parts[a]) || '/';
ebi('path').appendChild(mknod('i'));
ebi('path').appendChild(o); ebi('path').appendChild(o);
} }

View File

@@ -930,7 +930,9 @@ var set_lno = (function () {
(function () { (function () {
function keydown(ev) { function keydown(ev) {
ev = ev || window.event; ev = ev || window.event;
var kc = ev.code || ev.keyCode || ev.which; var kc = ev.code || ev.keyCode || ev.which,
editing = document.activeElement == dom_src;
//console.log(ev.key, ev.code, ev.keyCode, ev.which); //console.log(ev.key, ev.code, ev.keyCode, ev.which);
if (ctrl(ev) && (ev.code == "KeyS" || kc == 83)) { if (ctrl(ev) && (ev.code == "KeyS" || kc == 83)) {
save(); save();
@@ -941,12 +943,17 @@ var set_lno = (function () {
if (d) if (d)
d.click(); d.click();
} }
if (document.activeElement != dom_src) if (editing)
return true; set_lno();
set_lno();
if (ctrl(ev)) { if (ctrl(ev)) {
if (ev.code == "KeyE") {
dom_nsbs.click();
return false;
}
if (!editing)
return true;
if (ev.code == "KeyH" || kc == 72) { if (ev.code == "KeyH" || kc == 72) {
md_header(ev.shiftKey); md_header(ev.shiftKey);
return false; return false;
@@ -971,10 +978,6 @@ var set_lno = (function () {
iter_uni(); iter_uni();
return false; return false;
} }
if (ev.code == "KeyE") {
dom_nsbs.click();
return false;
}
var up = ev.code == "ArrowUp" || kc == 38; var up = ev.code == "ArrowUp" || kc == 38;
var dn = ev.code == "ArrowDown" || kc == 40; var dn = ev.code == "ArrowDown" || kc == 40;
if (up || dn) { if (up || dn) {
@@ -987,6 +990,9 @@ var set_lno = (function () {
} }
} }
else { else {
if (!editing)
return true;
if (ev.code == "Tab" || kc == 9) { if (ev.code == "Tab" || kc == 9) {
md_indent(ev.shiftKey); md_indent(ev.shiftKey);
return false; return false;

View File

@@ -51,12 +51,30 @@ a.g {
border-color: #3a0; border-color: #3a0;
box-shadow: 0 .3em 1em #4c0; box-shadow: 0 .3em 1em #4c0;
} }
#repl { #repl,
#pb a {
border: none; border: none;
background: none; background: none;
color: inherit; color: inherit;
padding: 0; padding: 0;
} }
#repl {
position: fixed;
bottom: .25em;
left: .2em;
}
#pb {
opacity: .5;
position: fixed;
bottom: .25em;
right: .3em;
}
#pb span {
opacity: .6;
}
#pb a {
margin: 0;
}
table { table {
border-collapse: collapse; border-collapse: collapse;
} }

View File

@@ -46,7 +46,7 @@
<tbody> <tbody>
{% for mp in avol %} {% for mp in avol %}
{%- if mp in vstate and vstate[mp] %} {%- if mp in vstate and vstate[mp] %}
<tr><td><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></td><td><a class="s" href="{{ mp }}?scan">rescan</a></td><td>{{ vstate[mp] }}</td></tr> <tr><td><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></td><td><a class="s" href="{{ r }}{{ mp }}?scan">rescan</a></td><td>{{ vstate[mp] }}</td></tr>
{%- endif %} {%- endif %}
{% endfor %} {% endfor %}
</tbody> </tbody>
@@ -62,7 +62,7 @@
<h1 id="f">you can browse:</h1> <h1 id="f">you can browse:</h1>
<ul> <ul>
{% for mp in rvol %} {% for mp in rvol %}
<li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li> <li><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}
@@ -71,7 +71,7 @@
<h1 id="g">you can upload to:</h1> <h1 id="g">you can upload to:</h1>
<ul> <ul>
{% for mp in wvol %} {% for mp in wvol %}
<li><a href="{{ mp }}{{ url_suf }}">{{ mp }}</a></li> <li><a href="{{ r }}{{ mp }}{{ url_suf }}">{{ mp }}</a></li>
{% endfor %} {% endfor %}
</ul> </ul>
{%- endif %} {%- endif %}
@@ -98,6 +98,9 @@
</ul> </ul>
</div> </div>
<a href="#" id="repl">π</a> <a href="#" id="repl">π</a>
{%- if not this.args.nb %}
<span id="pb"><span>powered by</span> <a href="{{ this.args.pb_url }}">copyparty {{ver}}</a></span>
{%- endif %}
<script> <script>
var SR = {{ r|tojson }}, var SR = {{ r|tojson }},

View File

@@ -15,7 +15,7 @@
<body> <body>
<div id="wrap" class="w"> <div id="wrap" class="w">
<div class="cn"> <div class="cn">
<p class="btns"><a href="{{ r }}/{{ vp }}">browse files</a> // <a href="{{ r }}/?h">control panel</a></p> <p class="btns"><a href="/{{ rvp }}">browse files</a> // <a href="{{ r }}/?h">control panel</a></p>
<p>or choose your OS for cooler alternatives:</p> <p>or choose your OS for cooler alternatives:</p>
<div class="ossel"> <div class="ossel">
<a id="swin" href="#">Windows</a> <a id="swin" href="#">Windows</a>
@@ -47,7 +47,7 @@
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p> <p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
<pre> <pre>
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %} rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>W:</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ rvp }} <b>W:</b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
@@ -55,19 +55,19 @@
<p>if you want to use the native WebDAV client in windows instead (slow and buggy), first run <a href="{{ r }}/.cpr/a/webdav-cfg.bat">webdav-cfg.bat</a> to remove the 47 MiB filesize limit (also fixes latency and password login), then connect:</p> <p>if you want to use the native WebDAV client in windows instead (slow and buggy), first run <a href="{{ r }}/.cpr/a/webdav-cfg.bat">webdav-cfg.bat</a> to remove the 47 MiB filesize limit (also fixes latency and password login), then connect:</p>
<pre> <pre>
net use <b>w:</b> http{{ s }}://{{ ep }}/{{ vp }}{% if accs %} k /user:<b>{{ pw }}</b>{% endif %} net use <b>w:</b> http{{ s }}://{{ ep }}/{{ rvp }}{% if accs %} k /user:<b>{{ pw }}</b>{% endif %}
</pre> </pre>
</div> </div>
<div class="os lin"> <div class="os lin">
<pre> <pre>
yum install davfs2 yum install davfs2
{% if accs %}printf '%s\n' <b>{{ pw }}</b> k | {% endif %}mount -t davfs -ouid=1000 http{{ s }}://{{ ep }}/{{ vp }} <b>mp</b> {% if accs %}printf '%s\n' <b>{{ pw }}</b> k | {% endif %}mount -t davfs -ouid=1000 http{{ s }}://{{ ep }}/{{ rvp }} <b>mp</b>
</pre> </pre>
<p>or you can use rclone instead, which is much slower but doesn't require root:</p> <p>or you can use rclone instead, which is much slower but doesn't require root:</p>
<pre> <pre>
rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %} rclone config create {{ aname }}-dav webdav url=http{{ s }}://{{ rip }}{{ hport }} vendor=other{% if accs %} user=k pass=<b>{{ pw }}</b>{% endif %}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ vp }} <b>mp</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-dav:{{ rvp }} <b>mp</b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>--no-check-certificate</code> to the mount command</em><br />---</p>
@@ -77,20 +77,20 @@
<!-- gnome-bug: ignores vp --> <!-- gnome-bug: ignores vp -->
<pre> <pre>
{%- if accs %} {%- if accs %}
echo <b>{{ pw }}</b> | gio mount dav{{ s }}://k@{{ ep }}/{{ vp }} echo <b>{{ pw }}</b> | gio mount dav{{ s }}://k@{{ ep }}/{{ rvp }}
{%- else %} {%- else %}
gio mount -a dav{{ s }}://{{ ep }}/{{ vp }} gio mount -a dav{{ s }}://{{ ep }}/{{ rvp }}
{%- endif %} {%- endif %}
</pre> </pre>
</div> </div>
<div class="os mac"> <div class="os mac">
<pre> <pre>
osascript -e ' mount volume "http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }}" ' osascript -e ' mount volume "http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ rvp }}" '
</pre> </pre>
<p>or you can open up a Finder, press command-K and paste this instead:</p> <p>or you can open up a Finder, press command-K and paste this instead:</p>
<pre> <pre>
http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ vp }} http{{ s }}://k:<b>{{ pw }}</b>@{{ ep }}/{{ rvp }}
</pre> </pre>
{% if s %} {% if s %}
@@ -108,26 +108,26 @@
<p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p> <p>if you can, install <a href="https://winfsp.dev/rel/">winfsp</a>+<a href="https://downloads.rclone.org/rclone-current-windows-amd64.zip">rclone</a> and then paste this in cmd:</p>
<pre> <pre>
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }} rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>W:</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ rvp }} <b>W:</b>
</pre> </pre>
<p>if you want to use the native FTP client in windows instead (please dont), press <code>win+R</code> and run this command:</p> <p>if you want to use the native FTP client in windows instead (please dont), press <code>win+R</code> and run this command:</p>
<pre> <pre>
explorer {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}<b>{{ pw }}</b>:k@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} explorer {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}<b>{{ pw }}</b>:k@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
</pre> </pre>
</div> </div>
<div class="os lin"> <div class="os lin">
<pre> <pre>
rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }} rclone config create {{ aname }}-ftp ftp host={{ rip }} port={{ args.ftp or args.ftps }} pass=k user={% if accs %}<b>{{ pw }}</b>{% else %}anonymous{% endif %} tls={{ "false" if args.ftp else "true" }}
rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ vp }} <b>mp</b> rclone mount --vfs-cache-mode writes --dir-cache-time 5s {{ aname }}-ftp:{{ rvp }} <b>mp</b>
</pre> </pre>
<p>emergency alternative (gnome/gui-only):</p> <p>emergency alternative (gnome/gui-only):</p>
<!-- gnome-bug: ignores vp --> <!-- gnome-bug: ignores vp -->
<pre> <pre>
{%- if accs %} {%- if accs %}
echo <b>{{ pw }}</b> | gio mount ftp{{ "" if args.ftp else "s" }}://k@{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} echo <b>{{ pw }}</b> | gio mount ftp{{ "" if args.ftp else "s" }}://k@{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
{%- else %} {%- else %}
gio mount -a ftp{{ "" if args.ftp else "s" }}://{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} gio mount -a ftp{{ "" if args.ftp else "s" }}://{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
{%- endif %} {%- endif %}
</pre> </pre>
</div> </div>
@@ -135,7 +135,7 @@
<div class="os mac"> <div class="os mac">
<p>note: FTP is read-only on macos; please use WebDAV instead</p> <p>note: FTP is read-only on macos; please use WebDAV instead</p>
<pre> <pre>
open {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}k:<b>{{ pw }}</b>@{% else %}anonymous:@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ vp }} open {{ "ftp" if args.ftp else "ftps" }}://{% if accs %}k:<b>{{ pw }}</b>@{% else %}anonymous:@{% endif %}{{ host }}:{{ args.ftp or args.ftps }}/{{ rvp }}
</pre> </pre>
</div> </div>
{% endif %} {% endif %}
@@ -149,7 +149,7 @@
<span class="os lin">doesn't need root</span> <span class="os lin">doesn't need root</span>
</p> </p>
<pre> <pre>
partyfuse.py{% if accs %} -a <b>{{ pw }}</b>{% endif %} http{{ s }}://{{ ep }}/{{ vp }} <b><span class="os win">W:</span><span class="os lin mac">mp</span></b> partyfuse.py{% if accs %} -a <b>{{ pw }}</b>{% endif %} http{{ s }}://{{ ep }}/{{ rvp }} <b><span class="os win">W:</span><span class="os lin mac">mp</span></b>
</pre> </pre>
{% if s %} {% if s %}
<p><em>note: if you are on LAN (or just dont have valid certificates), add <code>-td</code></em></p> <p><em>note: if you are on LAN (or just dont have valid certificates), add <code>-td</code></em></p>

View File

@@ -42,6 +42,10 @@ html {
text-shadow: 1px 1px 0 #000; text-shadow: 1px 1px 0 #000;
color: #fff; color: #fff;
} }
#toast.top {
top: 2em;
bottom: unset;
}
#toast a { #toast a {
color: inherit; color: inherit;
text-shadow: inherit; text-shadow: inherit;
@@ -69,6 +73,7 @@ html {
#toastb { #toastb {
max-height: 70vh; max-height: 70vh;
overflow-y: auto; overflow-y: auto;
padding: 1px;
} }
#toast.scroll #toastb { #toast.scroll #toastb {
overflow-y: scroll; overflow-y: scroll;

View File

@@ -856,6 +856,7 @@ function up2k_init(subtle) {
fdom_ctr = 0, fdom_ctr = 0,
biggest_file = 0; biggest_file = 0;
bcfg_bind(uc, 'rand', 'u2rand', false, null, false);
bcfg_bind(uc, 'multitask', 'multitask', true, null, false); bcfg_bind(uc, 'multitask', 'multitask', true, null, false);
bcfg_bind(uc, 'potato', 'potato', false, set_potato, false); bcfg_bind(uc, 'potato', 'potato', false, set_potato, false);
bcfg_bind(uc, 'ask_up', 'ask_up', true, null, false); bcfg_bind(uc, 'ask_up', 'ask_up', true, null, false);
@@ -895,9 +896,9 @@ function up2k_init(subtle) {
"finished": 0 "finished": 0
}, },
"time": { "time": {
"hashing": 0, "hashing": 0.01,
"uploading": 0, "uploading": 0.01,
"busy": 0 "busy": 0.01
}, },
"eta": { "eta": {
"h": "", "h": "",
@@ -1363,6 +1364,10 @@ function up2k_init(subtle) {
if (uc.fsearch) if (uc.fsearch)
entry.srch = 1; entry.srch = 1;
else if (uc.rand) {
entry.rand = true;
entry.name = 'a\n' + entry.name;
}
if (biggest_file < entry.size) if (biggest_file < entry.size)
biggest_file = entry.size; biggest_file = entry.size;
@@ -1398,7 +1403,7 @@ function up2k_init(subtle) {
ebi('u2tabw').className = 'ye'; ebi('u2tabw').className = 'ye';
setTimeout(function () { setTimeout(function () {
if (!actx || actx.state != 'suspended' || toast.tag == L.u_unpt) if (!actx || actx.state != 'suspended' || toast.visible)
return; return;
toast.warn(30, "<div onclick=\"start_actx();toast.inf(3,'thanks!')\">please click this text to<br />unlock full upload speed</div>"); toast.warn(30, "<div onclick=\"start_actx();toast.inf(3,'thanks!')\">please click this text to<br />unlock full upload speed</div>");
@@ -1418,6 +1423,35 @@ function up2k_init(subtle) {
} }
more_one_file(); more_one_file();
function linklist() {
var ret = [],
base = document.location.origin.replace(/\/$/, '');
for (var a = 0; a < st.files.length; a++) {
var t = st.files[a],
url = t.purl + uricom_enc(t.name);
if (t.fk)
url += '?k=' + t.fk;
ret.push(base + url);
}
return ret.join('\r\n');
}
ebi('luplinks').onclick = function (e) {
ev(e);
modal.alert(linklist());
};
ebi('cuplinks').onclick = function (e) {
ev(e);
var txt = linklist();
cliptxt(txt + '\n', function () {
toast.inf(5, txt.split('\n').length + ' links copied to clipboard');
});
};
var etaref = 0, etaskip = 0, utw_minh = 0, utw_read = 0; var etaref = 0, etaskip = 0, utw_minh = 0, utw_read = 0;
function etafun() { function etafun() {
var nhash = st.busy.head.length + st.busy.hash.length + st.todo.head.length + st.todo.hash.length, var nhash = st.busy.head.length + st.busy.hash.length + st.todo.head.length + st.todo.hash.length,
@@ -1555,11 +1589,11 @@ function up2k_init(subtle) {
st.busy.handshake.length) st.busy.handshake.length)
return false; return false;
if (t.n - st.car > 8) if (t.n - st.car > Math.max(8, parallel_uploads))
// prevent runahead from a stuck upload (slow server hdd) // prevent runahead from a stuck upload (slow server hdd)
return false; return false;
if ((uc.multitask ? 1 : 0) < if ((uc.multitask ? parallel_uploads : 0) <
st.todo.upload.length + st.todo.upload.length +
st.busy.upload.length) st.busy.upload.length)
return false; return false;
@@ -1571,21 +1605,22 @@ function up2k_init(subtle) {
if (!parallel_uploads) if (!parallel_uploads)
return false; return false;
var nhs = st.todo.handshake.length + st.busy.handshake.length,
nup = st.todo.upload.length + st.busy.upload.length;
if (uc.multitask) { if (uc.multitask) {
if (nhs + nup < parallel_uploads)
return true;
if (!uc.az) if (!uc.az)
return st.todo.handshake.length + st.busy.handshake.length < 2; return nhs < 2;
var ahead = st.bytes.hashed - st.bytes.finished, var ahead = st.bytes.hashed - st.bytes.finished,
nmax = ahead < biggest_file / 8 ? 32 : 16; nmax = ahead < biggest_file / 8 ? 32 : 16;
return ahead < biggest_file && return ahead < biggest_file && nhs < nmax;
st.todo.handshake.length + st.busy.handshake.length < nmax;
} }
return handshakes_permitted() && 0 == return handshakes_permitted() && 0 == nhs + nup;
st.todo.handshake.length +
st.busy.handshake.length +
st.todo.upload.length +
st.busy.upload.length;
} }
var tasker = (function () { var tasker = (function () {
@@ -1750,20 +1785,22 @@ function up2k_init(subtle) {
var sr = uc.fsearch, var sr = uc.fsearch,
ok = pvis.ctr.ok, ok = pvis.ctr.ok,
ng = pvis.ctr.ng, ng = pvis.ctr.ng,
spd = Math.floor(st.bytes.finished / st.time.busy),
suf = '\n\n{0} @ {1}/s'.format(shumantime(st.time.busy), humansize(spd)),
t = uc.ask_up ? 0 : 10; t = uc.ask_up ? 0 : 10;
console.log('toast', ok, ng); console.log('toast', ok, ng);
if (ok && ng) if (ok && ng)
toast.warn(t, uc.nagtxt = (sr ? L.ur_sm : L.ur_um).format(ok, ng)); toast.warn(t, uc.nagtxt = (sr ? L.ur_sm : L.ur_um).format(ok, ng) + suf);
else if (ok > 1) else if (ok > 1)
toast.ok(t, uc.nagtxt = (sr ? L.ur_aso : L.ur_auo).format(ok)); toast.ok(t, uc.nagtxt = (sr ? L.ur_aso : L.ur_auo).format(ok) + suf);
else if (ok) else if (ok)
toast.ok(t, uc.nagtxt = sr ? L.ur_1so : L.ur_1uo); toast.ok(t, uc.nagtxt = (sr ? L.ur_1so : L.ur_1uo) + suf);
else if (ng > 1) else if (ng > 1)
toast.err(t, uc.nagtxt = (sr ? L.ur_asn : L.ur_aun).format(ng)); toast.err(t, uc.nagtxt = (sr ? L.ur_asn : L.ur_aun).format(ng) + suf);
else if (ng) else if (ng)
toast.err(t, uc.nagtxt = sr ? L.ur_1sn : L.ur_1un); toast.err(t, uc.nagtxt = (sr ? L.ur_1sn : L.ur_1un) + suf);
timer.rm(etafun); timer.rm(etafun);
timer.rm(donut.do); timer.rm(donut.do);
@@ -2210,13 +2247,24 @@ function up2k_init(subtle) {
t.sprs = response.sprs; t.sprs = response.sprs;
var rsp_purl = url_enc(response.purl); var fk = response.fk,
if (rsp_purl !== t.purl || response.name !== t.name) { rsp_purl = url_enc(response.purl),
// server renamed us (file exists / path restrictions) rename = rsp_purl !== t.purl || response.name !== t.name;
console.log("server-rename [" + t.purl + "] [" + t.name + "] to [" + rsp_purl + "] [" + response.name + "]");
if (rename || fk) {
if (rename)
console.log("server-rename [" + t.purl + "] [" + t.name + "] to [" + rsp_purl + "] [" + response.name + "]");
t.purl = rsp_purl; t.purl = rsp_purl;
t.name = response.name; t.name = response.name;
pvis.seth(t.n, 0, linksplit(t.purl + uricom_enc(t.name)).join(' '));
var url = t.purl + uricom_enc(t.name);
if (fk) {
t.fk = fk;
url += '?k=' + fk;
}
pvis.seth(t.n, 0, linksplit(url).join(' '));
} }
var chunksize = get_chunksize(t.size), var chunksize = get_chunksize(t.size),
@@ -2319,9 +2367,10 @@ function up2k_init(subtle) {
} }
var err_pend = rsp.indexOf('partial upload exists at a different') + 1, var err_pend = rsp.indexOf('partial upload exists at a different') + 1,
err_plug = rsp.indexOf('upload blocked by x') + 1,
err_dupe = rsp.indexOf('upload rejected, file already exists') + 1; err_dupe = rsp.indexOf('upload rejected, file already exists') + 1;
if (err_pend || err_dupe) { if (err_pend || err_plug || err_dupe) {
err = rsp; err = rsp;
ofs = err.indexOf('\n/'); ofs = err.indexOf('\n/');
if (ofs !== -1) { if (ofs !== -1) {
@@ -2364,6 +2413,8 @@ function up2k_init(subtle) {
}; };
if (t.srch) if (t.srch)
req.srch = 1; req.srch = 1;
else if (t.rand)
req.rand = true;
xhr.open('POST', t.purl, true); xhr.open('POST', t.purl, true);
xhr.responseType = 'text'; xhr.responseType = 'text';
@@ -2378,8 +2429,17 @@ function up2k_init(subtle) {
function can_upload_next() { function can_upload_next() {
var upt = st.todo.upload[0], var upt = st.todo.upload[0],
upf = st.files[upt.nfile], upf = st.files[upt.nfile],
nhs = st.busy.handshake.length,
hs = nhs && st.busy.handshake[0],
now = Date.now(); now = Date.now();
if (nhs >= 16)
return false;
if (hs && hs.t_uploaded && Date.now() - hs.t_busied > 10000)
// verification HS possibly held back by uploads
return false;
for (var a = 0, aa = st.busy.handshake.length; a < aa; a++) { for (var a = 0, aa = st.busy.handshake.length; a < aa; a++) {
var hs = st.busy.handshake[a]; var hs = st.busy.handshake[a];
if (hs.n < upt.nfile && hs.t_busied > now - 10 * 1000 && !st.files[hs.n].bytes_uploaded) if (hs.n < upt.nfile && hs.t_busied > now - 10 * 1000 && !st.files[hs.n].bytes_uploaded)
@@ -2419,6 +2479,14 @@ function up2k_init(subtle) {
function orz(xhr) { function orz(xhr) {
var txt = ((xhr.response && xhr.response.err) || xhr.responseText) + ''; var txt = ((xhr.response && xhr.response.err) || xhr.responseText) + '';
if (txt.indexOf('upload blocked by x') + 1) {
apop(st.busy.upload, upt);
apop(t.postlist, npart);
pvis.seth(t.n, 1, "ERROR");
pvis.seth(t.n, 2, txt.split(/\n/)[0]);
pvis.move(t.n, 'ng');
return;
}
if (xhr.status == 200) { if (xhr.status == 200) {
pvis.prog(t, npart, cdr - car); pvis.prog(t, npart, cdr - car);
st.bytes.finished += cdr - car; st.bytes.finished += cdr - car;
@@ -2553,9 +2621,15 @@ function up2k_init(subtle) {
if (dir.target) { if (dir.target) {
clmod(obj, 'err', 1); clmod(obj, 'err', 1);
var v = Math.floor(parseInt(obj.value)); var v = Math.floor(parseInt(obj.value));
if (v < 0 || v > 64 || v !== v) if (v < 0 || v !== v)
return; return;
if (v > 64) {
var p = obj.selectionStart;
v = obj.value = 64;
obj.selectionStart = obj.selectionEnd = p;
}
parallel_uploads = v; parallel_uploads = v;
swrite('nthread', v); swrite('nthread', v);
clmod(obj, 'err'); clmod(obj, 'err');
@@ -2844,7 +2918,7 @@ ebi('ico1').onclick = function () {
if (QS('#op_up2k.act')) if (QS('#op_up2k.act'))
goto_up2k(); goto_up2k();
apply_perms(perms); apply_perms({ "perms": perms, "frand": frand });
(function () { (function () {

View File

@@ -692,7 +692,9 @@ function noq_href(el) {
function get_pwd() { function get_pwd() {
var pwd = ('; ' + document.cookie).split('; cppwd='); var k = HTTPS ? 's=' : 'd=',
pwd = ('; ' + document.cookie).split('; cppw' + k);
if (pwd.length < 2) if (pwd.length < 2)
return null; return null;
@@ -976,6 +978,7 @@ function sethash(hv) {
} }
} }
function dl_file(url) { function dl_file(url) {
console.log('DL [%s]', url); console.log('DL [%s]', url);
var o = mknod('a'); var o = mknod('a');
@@ -985,6 +988,25 @@ function dl_file(url) {
} }
function cliptxt(txt, ok) {
var fb = function () {
console.log('fb');
var o = mknod('input');
o.value = txt;
document.body.appendChild(o);
o.focus();
o.select();
document.execCommand("copy");
document.body.removeChild(o);
ok();
};
try {
navigator.clipboard.writeText(txt).then(ok, fb);
}
catch (ex) { fb(); }
}
var timer = (function () { var timer = (function () {
var r = {}; var r = {};
r.q = []; r.q = [];
@@ -1258,17 +1280,17 @@ var toast = (function () {
r.tag = tag; r.tag = tag;
}; };
r.ok = function (sec, txt, tag) { r.ok = function (sec, txt, tag, cls) {
r.show('ok', sec, txt, tag); r.show('ok ' + (cls || ''), sec, txt, tag);
}; };
r.inf = function (sec, txt, tag) { r.inf = function (sec, txt, tag, cls) {
r.show('inf', sec, txt, tag); r.show('inf ' + (cls || ''), sec, txt, tag);
}; };
r.warn = function (sec, txt, tag) { r.warn = function (sec, txt, tag, cls) {
r.show('warn', sec, txt, tag); r.show('warn ' + (cls || ''), sec, txt, tag);
}; };
r.err = function (sec, txt, tag) { r.err = function (sec, txt, tag, cls) {
r.show('err', sec, txt, tag); r.show('err ' + (cls || ''), sec, txt, tag);
}; };
return r; return r;
@@ -1532,25 +1554,33 @@ var md_plug_err = function (ex, js) {
if (ex) if (ex)
console.log(ex, js); console.log(ex, js);
}; };
function load_md_plug(md_text, plug_type) { function load_md_plug(md_text, plug_type, defer) {
if (defer)
md_plug[plug_type] = null;
if (!have_emp) if (!have_emp)
return md_text; return md_text;
var find = '\n```copyparty_' + plug_type + '\n'; var find = '\n```copyparty_' + plug_type + '\n',
var ofs = md_text.indexOf(find); md = md_text.replace(/\r/g, ''),
if (ofs === -1) ofs = md.indexOf(find),
ofs2 = md.indexOf('\n```', ofs + 1);
if (ofs < 0 || ofs2 < 0)
return md_text; return md_text;
var ofs2 = md_text.indexOf('\n```', ofs + 1); var js = md.slice(ofs + find.length, ofs2 + 1);
if (ofs2 == -1) md = md.slice(0, ofs + 1) + md.slice(ofs2 + 4);
return md_text; md = md.replace(/$/g, '\r');
var js = md_text.slice(ofs + find.length, ofs2 + 1); if (defer) { // insert into sandbox
var md = md_text.slice(0, ofs + 1) + md_text.slice(ofs2 + 4); md_plug[plug_type] = js;
return md;
}
var old_plug = md_plug[plug_type]; var old_plug = md_plug[plug_type];
if (!old_plug || old_plug[1] != js) { if (!old_plug || old_plug[1] != js) {
js = 'const x = { ' + js + ' }; x;'; js = 'const loc = new URL("' + location.href + '"), x = { ' + js + ' }; x;';
try { try {
var x = eval(js); var x = eval(js);
if (x['ctor']) { if (x['ctor']) {

View File

@@ -13,15 +13,21 @@
# other stuff # other stuff
## [`example.conf`](example.conf)
* example config file for `-c`
## [`versus.md`](versus.md)
* similar software / alternatives (with pros/cons)
## [`changelog.md`](changelog.md) ## [`changelog.md`](changelog.md)
* occasionally grabbed from github release notes * occasionally grabbed from github release notes
## [`devnotes.md`](devnotes.md)
* technical stuff
## [`rclone.md`](rclone.md) ## [`rclone.md`](rclone.md)
* notes on using rclone as a fuse client/server * notes on using rclone as a fuse client/server
## [`example.conf`](example.conf)
* example config file for `-c`
# junk # junk

View File

@@ -1,3 +1,211 @@
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2023-0211-1802 `v1.6.4` 🔧🎲🔗🐳🇦🎶
* read-only demo server at https://a.ocv.me/pub/demo/
* [1.6 theme song](https://a.ocv.me/pub/demo/music/.bonus/#af-134e597c) // [similar software](https://github.com/9001/copyparty/blob/hovudstraum/docs/versus.md)
## new features
* 🔧 new [config syntax](https://github.com/9001/copyparty/blob/hovudstraum/docs/example.conf) (#20)
* the new syntax is still kinda esoteric and funky but it's an improvement
* old config files are still supported
* `--vc` prints the autoconverted config which you can copy back into the config file to upgrade
* `--vc` will also [annotate and explain](https://user-images.githubusercontent.com/241032/217356028-eb3e141f-80a6-4bc6-8d04-d8d1d874c3e9.png) the config files
* new argument `--cgen` to generate config from commandline arguments
* kinda buggy, especially the `[global]` section, so give it a lookover before saving it
* 🎲 randomize filenames on upload
* either optionally, using the 🎲 button in the up2k ui
* or force-enabled; globally with `--rand` or per-volume with volflag `rand`
* specify filename length with `nrand` (globally or volflag), default 9
* 🔗 export a list of links to your recent uploads
* `copy links` in the up2k tab (🚀) will copy links to all uploads since last page refresh,
* `copy` in the unpost tab (🧯) will copy links to all your recent uploads (max 2000 files / 12 hours by default)
* filekeys are included if that's enabled and you have access to view those (permissions `G` or `r`)
* 🇦 [arch package](https://github.com/9001/copyparty/tree/hovudstraum/contrib/package/arch) -- added in #18, thx @icxes
* maybe in aur soon!
* 🐳 [docker containers](https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker) -- 5 editions,
* [min](https://hub.docker.com/r/copyparty/min) (57 MiB), just copyparty without thumbnails or audio transcoding
* [im](https://hub.docker.com/r/copyparty/im) (70 MiB), thumbnails of popular image formats + media tags with mutagen
* [ac (163 MiB)](https://hub.docker.com/r/copyparty/ac) 🥇 adds audio/video thumbnails + audio transcoding + better tags
* [iv](https://hub.docker.com/r/copyparty/iv) (211 MiB), makes heif/avic/jxl faster to thumbnail
* [dj](https://hub.docker.com/r/copyparty/dj) (309 MiB), adds optional detection of musical key / bpm
* 🎶 [chiptune player](https://a.ocv.me/pub/demo/music/chiptunes/#af-f6fb2e5f)
* transcodes mod/xm/s3m/it/mo3/mptm/mt2/okt to opus
* uses FFmpeg (libopenmpt) so the accuracy is not perfect, but most files play OK enough
* not **yet** supported in the docker container since Alpine's FFmpeg was built without libopenmpt
* windows: support long filepaths (over 260 chars)
* uses the `//?/` winapi syntax to also support windows 7
* `--ver` shows the server version on the control panel
## bugfixes
* markdown files didn't scale properly in the document browser
* detect and refuse multiple volume definitions sharing the same filesystem path
* don't return incomplete transcodes if multiple clients try to play the same flac file
* [prisonparty](https://github.com/9001/copyparty/blob/hovudstraum/bin/prisonparty.sh): more reliable chroot cleanup, sigusr1 for config reload
* pypi packaging: compress web resources, include webdav.bat
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2023-0131-2103 `v1.6.3` sandbox k
* read-only demo server at https://a.ocv.me/pub/demo/
* and since [1.6.0](https://github.com/9001/copyparty/releases/tag/v1.6.2) only got 2 days of prime time,
* [1.6 theme song](https://a.ocv.me/pub/demo/music/.bonus/#af-134e597c) (hosted on the demo server)
* [similar software](https://github.com/9001/copyparty/blob/hovudstraum/docs/versus.md) / feature comparison
## new features
* dotfiles are hidden from search results by default
* use `--dotsrch` or volflags `dotsrch` / `nodotsrch` to specify otherwise
* they were already being excluded from tar/zip-files if `-ed` is not set, so this makes more sense -- dotfiles *should* now be undiscoverable unless `-ed` or `--smb` is set, but please use [volumes](https://github.com/9001/copyparty#accounts-and-volumes) for isolation / access-control instead, much safer
## bugfixes
* lots of cosmetic fixes for the new readme/prologue/epilogue sandbox
* rushed it into the previous release when someone suggested it, bad idea
* still flickers a bit (especially prologues), and hotkeys are blocked while the sandboxed document has focus
* can be disabled with `--no-sb-md --no-sb-lg` (not recommended)
* support webdav uploads from davfs2 (fix LOCK response)
* always unlink files before overwriting them, in case they are hardlinks
* was primarily an issue with `--daw` and webdav clients
* on windows, replace characters in PUT filenames as necessary
* [prisonparty](https://github.com/9001/copyparty/blob/hovudstraum/bin/prisonparty.sh): support opus transcoding on debian
* `rm -rf .hist/ac` to clear the transcode cache if the old version broke some songs
## other changes
* add `rel="nofollow"` to zip download links, basic-browser link
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2023-0129-1842 `v1.6.2` cors k
[Ellie Goulding - Stay Awake (kors k Hardcore Bootleg).mp3](https://a.ocv.me/pub/demo/music/.bonus/#af-134e597c)
* 👆 the read-only demo server at https://a.ocv.me/pub/demo/
## breaking changes
but nothing is affected (that i know of):
* all requests must pass [cors validation](https://github.com/9001/copyparty#cors)
* but they almost definitely did already
* sharex and others are OK since they don't supply an `Origin` header
* [API calls](https://github.com/9001/copyparty/blob/hovudstraum/docs/devnotes.md#http-api) `?delete` and `?move` are now POST instead of GET
* not aware of any clients using these
## known issues
* the document sandbox is a bit laggy and sometimes eats hotkeys
* disable it with `--no-sb-md --no-sb-lg` if you trust everyone who has write and/or move access
## new features
* [event hooks](https://github.com/9001/copyparty/tree/hovudstraum/bin/hooks) -- run programs on new [uploads](https://user-images.githubusercontent.com/241032/215304439-1c1cb3c8-ec6f-4c17-9f27-81f969b1811a.png), renames, deletes
* [configurable cors](https://github.com/9001/copyparty#cors) (cross-origin resource sharing) behavior; defaults are mostly same as before
* `--allow-csrf` disables all csrf protections and makes it intentionally trivial to send authenticated requests from other domains
* sandboxed readme.md / prologues / epilogues
* documents can still run scripts like before, but can no longer tamper with the web-ui / read the login session, so the old advice of `--no-readme` and `--no-logues` is mostly deprecated
* unfortunately disables hotkeys while the text has focus + blocks dragdropping files onto that area, oh well
* password can be provided through http header `PW:` (instead of cookie `cppwd` or or url-param `?pw`)
* detect network changes (new NICs, IPs) and reconfigure / reannoucne zeroconf
* fixes mdns when running as a systemd service and copyparty is started before networking is up
* add `--freebind` to start listening on IPs before the NIC is up yet (linux-only)
* per-volume deduplication-control with volflags `hardlink`, `neversymlink`, `copydupes`
* detect curl and return a [colorful, sortable plaintext](https://user-images.githubusercontent.com/241032/215322619-ea5fd606-3654-40ad-94ee-2bc058647bb2.png) directory listing instead
* add optional [powered-by-copyparty](https://user-images.githubusercontent.com/241032/215322626-11d1f02b-25f4-45df-a3d9-f8c51354a8eb.png) footnode on the controlpanel
* can be disabled with `-nb` or redirected with `--pb-url`
## bugfixes
* change some API calls (`?delete`, `?move`) from `GET` to `POST`
* don't panic! this was safe against authenticated csrf thanks to [SameSite=Lax](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite#lax)
* `--getmod` restores the GETs if you need the convenience and accept the risks
* [u2cli](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) (command-line uploader):
* recover from network hiccups
* add `-ns` for slow uefi TTYs
* separate login cookies for http / https
* avoids an https login from getting accidentally sent over plaintext
* sadly no longer possible to login with internet explorer 4.0 / windows 3.11
* tar/zip-download of hidden folders
* unpost filtering was buggy for non-ascii characters
* moving a deduplicated file on a volume where deduplication was since disabled
* improved the [linux 6.0.16](https://utcc.utoronto.ca/~cks/space/blog/linux/KernelBindBugIn6016) kernel bug [workaround](https://github.com/9001/copyparty/commit/9065226c3d634a9fc15b14a768116158bc1761ad) because there is similar funk in 5.x
* add custom text selection colors because chrome is currently broken on fedora
* blockdevs (`/dev/nvme0n1`) couldn't be downloaded as files
* misc fixes for location-based reverse-proxying
* macos dualstack thing
## other changes
* added a collection of [cursed usecases](https://github.com/9001/copyparty/tree/hovudstraum/docs/cursed-usecases)
* and [comparisons to similar software](https://github.com/9001/copyparty/blob/hovudstraum/docs/versus.md) in case you ever wanna jump ship
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2023-0112-0515 `v1.5.6` many hands
hello from warsaw airport (goodbye japan ;_;)
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* multiple upload handshakes in parallel
* around **5x faster** when uploading small files
* or **50x faster** if the server is on the other side of the planet
* just crank up the `parallel uploads` like crazy (max is 64)
* upload ui: total time and average speed is shown on completion
## bugfixes
* browser ui didn't allow specifying number of threads for file search
* dont panic if a digit key is pressed while viewing an image
* workaround [linux kernel bug](https://utcc.utoronto.ca/~cks/space/blog/linux/KernelBindBugIn6016) causing log spam on dualstack
* ~~related issue (also mostly harmless) will be fixed next relese 010770684db95bece206943768621f2c7c27bace~~
* they fixed it in linux 6.1 so these workarounds will be gone too
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1230-0754 `v1.5.5` made in japan
hello from tokyo
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* image viewer now supports heif, avif, apng, svg
* [partyfuse and up2k.py](https://github.com/9001/copyparty/tree/hovudstraum/bin): option to read password from textfile
## bugfixes
* thumbnailing could fail if a primitive build of libvips is installed
* ssdp was wonky on dualstack ipv6
* mdns could crash on networks with invalid routes
* support fat32 timestamp precisions
* fixes spurious file reindexing in volumes located on SD cards on android tablets which lie about timestamps until the next device reboot or filesystem remount
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1213-1956 `v1.5.3` folder-sync + turbo-rust
* read-only demo server at https://a.ocv.me/pub/demo/
## new features
* one-way folder sync (client to server) using [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/README.md#up2kpy) `-z --dr`
* great rsync alternative when combined with `-e2ds --hardlink` deduplication on the server
* **50x faster** when uploading small files to HDD, especially SMR
* by switching sqlite to WAL which carries a small chance of temporarily forgetting the ~200 most recent uploads if you have a power outage or your OS crashes; see `--help-dbd` if you have `-mtp` plugins which produces metadata you can't afford to lose
* location-based [reverse-proxying](https://github.com/9001/copyparty/#reverse-proxy) (but it's still recommended to use a dedicated domain/subdomain instead)
* IPv6 link-local automatically enabled for TCP and zeroconf on NICs without a routable IPv6
* zeroconf network filters now accept subnets too, for example `--z-on 192.168.0.0/16`
* `.hist` folders are hidden on windows
* ux:
* more accurate total ETA on upload
* sorting of batch-unpost links was unintuitive / dangerous
* hotkey `Y` turns files into download links if nothing's selected
* option to replace or disable the mediaplayer-toggle mouse cursor with `--mpmc`
## bugfixes
* WAL probably/hopefully fixes #10 (we'll know in 6 months roughly)
* repair db inconsistencies (which can happen if terminated during startup)
* [davfs2](https://wiki.archlinux.org/title/Davfs2) did not approve of the authentication prompt
* the `connect` button on the control-panel didn't work on phones
* couldn't specify windows NICs in arguments `--z-on` / `--z-off` and friends
* ssdp xml escaping for `--zsl` URL
* no longer possible to accidentally launch multiple copyparty instances on the same port on windows
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-1203-2048 `v1.5.1` babel # 2022-1203-2048 `v1.5.1` babel

View File

@@ -1,5 +1,6 @@
# this file gets included twice from ../some.conf, # this file gets included twice from ../some.conf,
# setting user permissions for a volume # setting user permissions for a volume
rw usr1 accs:
r usr2 rw: usr1
% sibling.conf r: usr2
% sibling.conf

View File

@@ -1,3 +1,3 @@
# and this config file gets included from ./another.conf, # and this config file gets included from ./another.conf,
# adding a final permission for each of the two volumes in ../some.conf # adding a final permission for each of the two volumes in ../some.conf
m usr1 usr2 m: usr1, usr2

View File

@@ -1,22 +1,29 @@
# not actually YAML but lets pretend:
# -*- mode: yaml -*-
# vim: ft=yaml:
# lets make two volumes with the same accounts/permissions for both; # lets make two volumes with the same accounts/permissions for both;
# first declare the accounts just once: # first declare the accounts just once:
u usr1:passw0rd [accounts]
u usr2:letmein usr1: passw0rd
usr2: letmein
# and listen on 127.0.0.1 only, port 2434 [global]
-i 127.0.0.1 i: 127.0.0.1 # listen on 127.0.0.1 only,
-p 2434 p: 2434 # port 2434
e2ds # enable file indexing+scanning
e2ts # and multimedia indexing+scanning
# (inline comments are OK if there is 2 spaces before the #)
# share /usr/share/games from the server filesystem # share /usr/share/games from the server filesystem
/usr/share/games [/vidya]
/vidya /usr/share/games
# include config file with volume permissions % foo/another.conf # include config file with volume permissions
% foo/another.conf
# and share your ~/Music folder too # and share your ~/Music folder too
~/Music [/bangers]
/bangers ~/Music
% foo/another.conf % foo/another.conf
# which should result in each of the volumes getting the following permissions: # which should result in each of the volumes getting the following permissions:
# usr1 read/write/move # usr1 read/write/move

View File

@@ -0,0 +1,22 @@
insane ways to use copyparty
## wireless keyboard
problem: you wanna control mpv or whatever software from the couch but you don't have a wireless keyboard
"solution": load some custom javascript which renders a virtual keyboard on the upload UI and each keystroke is actually an upload which gets picked up by a dummy metadata parser which forwards the keystrokes into xdotool
[no joke, this actually exists and it wasn't even my idea or handiwork (thx steen)](https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/meadup.js)
## appxsvc tarpit
problem: `svchost.exe` is using 100% of a cpu core, and upon further inspection (`procmon`) it is `wsappx` desperately trying to install something, repeatedly reading a file named `AppxManifest.xml` and messing with an sqlite3 database
"solution": create a virtual filesystem which is intentionally slow and trick windows into reading it from there instead
* create a file called `AppxManifest.xml` and put something dumb in it
* serve the file from a copyparty instance with `--rsp-slp=9` so every request will hang for 9 sec
* `net use m: http://127.0.0.1:3993/` (mount copyparty using the windows-native webdav client)
* `mklink /d c:\windows\systemapps\microsoftwindows.client.cbs_cw5n1h2txyewy\AppxManifest.xml m:\AppxManifest.xml`

View File

@@ -127,7 +127,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
| method | params | result | | method | params | result |
|--|--|--| |--|--|--|
| GET | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar | | POST | `?move=/foo/bar` | move/rename the file/folder at URL to /foo/bar |
| method | params | body | result | | method | params | body | result |
|--|--|--|--| |--|--|--|--|
@@ -137,7 +137,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
| mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL | | mPOST | | `act=bput`, `f=FILE` | upload `FILE` into the folder at URL |
| mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json | | mPOST | `?j` | `act=bput`, `f=FILE` | ...and reply with json |
| mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL | | mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL |
| GET | `?delete` | | delete URL recursively | | POST | `?delete` | | delete URL recursively |
| jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively | | jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively |
| uPOST | | `msg=foo` | send message `foo` into server log | | uPOST | | `msg=foo` | send message `foo` into server log |
| mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL | | mPOST | | `act=tput`, `body=TEXT` | overwrite markdown document at URL |
@@ -229,7 +229,12 @@ rm -rf copyparty/web/deps
curl -L https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py >x.py curl -L https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py >x.py
python3 x.py --version python3 x.py --version
rm x.py rm x.py
mv /tmp/pe-copyparty/copyparty/web/deps/ copyparty/web/deps/ cp -R /tmp/pe-copyparty.$(id -u)/copyparty/web/deps copyparty/web/
```
or you could build the web-dependencies from source instead (NB: does not include prismjs, need to grab that manually):
```sh
make -C scripts/deps-docker
``` ```
then build the sfx using any of the following examples: then build the sfx using any of the following examples:

View File

@@ -1,59 +1,69 @@
# not actually YAML but lets pretend:
# -*- mode: yaml -*-
# vim: ft=yaml:
# append some arguments to the commandline; # append some arguments to the commandline;
# the first space in a line counts as a separator, # accepts anything listed in --help (leading dashes are optional)
# any additional spaces are part of the value # and inline comments are OK if there is 2 spaces before the '#'
-e2dsa [global]
-e2ts p: 8086, 3939 # listen on ports 8086 and 3939
-i 127.0.0.1 e2dsa # enable file indexing and filesystem scanning
e2ts # and enable multimedia indexing
z, qr # and zeroconf and qrcode (you can comma-separate arguments)
# create users: # create users:
# u username:password [accounts]
u ed:123 ed: 123 # username: password
u k:k k: k
# leave a blank line between volumes # create volumes:
# (and also between users and volumes) [/] # create a volume at "/" (the webroot), which will
. # share the contents of "." (the current directory)
accs:
r: * # everyone gets read-access, but
rw: ed # the user "ed" gets read-write
# create a volume: # let's specify different permissions for the "priv" subfolder
# share "." (the current directory) # by creating another volume at that location:
# as "/" (the webroot) for the following users: [/priv]
# "r" grants read-access for anyone ./priv
# "rw ed" grants read-write to ed accs:
. r: k # the user "k" can see the contents,
/ rw: ed # while "ed" gets read-write
r
rw ed
# custom permissions for the "priv" folder:
# user "k" can only see/read the contents
# user "ed" gets read-write access
./priv
/priv
r k
rw ed
# this does the same thing,
# and will cause an error on startup since /priv is already taken:
./priv
/priv
r ed k
w ed
# share /home/ed/Music/ as /music and let anyone read it # share /home/ed/Music/ as /music and let anyone read it
# (this will replace any folder called "music" in the webroot) # (this will replace any folder called "music" in the webroot)
/home/ed/Music [/music]
/music /home/ed/Music
r accs:
r: *
# and a folder where anyone can upload, but nobody can see the contents
[/dump]
/home/ed/inc
accs:
w: *
flags:
e2d # the e2d volflag enables the uploads database
nodupe # the nodupe volflag rejects duplicate uploads
# (see --help-flags for all available volflags to use)
# and a folder where anyone can upload # and a folder where anyone can upload
# but nobody can see the contents # and anyone can access their own uploads, but nothing else
# and set the e2d flag to enable the uploads database [/sharex]
# and set the nodupe flag to reject duplicate uploads /home/ed/inc/sharex
/home/ed/inc accs:
/dump wG: * # wG = write-upget = see your own uploads only
w rwmd: ed, k # read-write-modify-delete for users "ed" and "k"
c e2d flags:
c nodupe e2d, d2t, fk: 4
# volflag "e2d" enables the uploads database,
# "d2t" disables multimedia parsers (in case the uploads are malicious),
# "dthumb" disables thumbnails (same reason),
# "fk" enables filekeys (necessary for upget permission) (4 chars long)
# -- note that its fine to combine all the volflags on
# one line because only the last volflag has an argument
# this entire config file can be replaced with these arguments: # this entire config file can be replaced with these arguments:
# -u ed:123 -u k:k -v .::r:a,ed -v priv:priv:r,k:rw,ed -v /home/ed/Music:music:r -v /home/ed/inc:dump:w:c,e2d,nodupe # -u ed:123 -u k:k -v .::r:a,ed -v priv:priv:r,k:rw,ed -v /home/ed/Music:music:r -v /home/ed/inc:dump:w:c,e2d,nodupe -v /home/ed/inc/sharex:sharex:wG:c,e2d,d2t,fk=4
# but note that the config file always wins in case of conflicts # but note that the config file always wins in case of conflicts

561
docs/versus.md Normal file
View File

@@ -0,0 +1,561 @@
# alternatives to copyparty
copyparty compared against all similar software i've bumped into
there is probably some unintentional bias so please submit corrections
currently up to date with [awesome-selfhosted](https://github.com/awesome-selfhosted/awesome-selfhosted) but that probably won't last
## toc
* top
* [recommendations](#recommendations)
* [feature comparisons](#feature-comparisons)
* [general](#general)
* [file transfer](#file-transfer)
* [protocols and client support](#protocols-and-client-support)
* [server configuration](#server-configuration)
* [server capabilities](#server-capabilities)
* [client features](#client-features)
* [integration](#integration)
* [another matrix](#another-matrix)
* [reviews](#reviews)
* [copyparty](#copyparty)
* [hfs2](#hfs2)
* [hfs3](#hfs3)
* [nextcloud](#nextcloud)
* [seafile](#seafile)
* [rclone](#rclone)
* [dufs](#dufs)
* [chibisafe](#chibisafe)
* [kodbox](#kodbox)
* [filebrowser](#filebrowser)
* [filegator](#filegator)
* [updog](#updog)
* [goshs](#goshs)
* [gimme-that](#gimme-that)
* [ass](#ass)
* [linx](#linx)
* [briefly considered](#briefly-considered)
# recommendations
* [kodbox](https://github.com/kalcaddle/kodbox) ([review](#kodbox)) appears to be a fantastic alternative if you're not worried about running chinese software, with several advantages over copyparty
* but anything you want to share must be moved into the kodbox filesystem
* [seafile](https://github.com/haiwen/seafile) ([review](#seafile)) and [nextcloud](https://github.com/nextcloud/server) ([review](#nextcloud)) could be decent alternatives if you need something heavier than copyparty
* but their [license](https://snyk.io/learn/agpl-license/) is [problematic](https://opensource.google/documentation/reference/using/agpl-policy)
* and copyparty is way better at uploads in particular (resumable, accelerated)
* and anything you want to share must be moved into the respective filesystems
* [filebrowser](https://github.com/filebrowser/filebrowser) ([review](#filebrowser)) and [dufs](https://github.com/sigoden/dufs) ([review](#dufs)) are simpler copyparties but with a settings gui
* has some of the same strengths of copyparty, being portable and able to work with an existing folder structure
* ...but copyparty is better at uploads + some other things
# feature comparisons
```
<&Kethsar> copyparty is very much bloat ed, so yeah
```
the table headers in the matrixes below are the different softwares, with a quick review of each software in the next section
the softwares,
* `a` = [copyparty](https://github.com/9001/copyparty)
* `b` = [hfs2](https://github.com/rejetto/hfs2)
* `c` = [hfs3](https://www.rejetto.com/hfs/)
* `d` = [nextcloud](https://github.com/nextcloud/server)
* `e` = [seafile](https://github.com/haiwen/seafile)
* `f` = [rclone](https://github.com/rclone/rclone), specifically `rclone serve webdav .`
* `g` = [dufs](https://github.com/sigoden/dufs)
* `h` = [chibisafe](https://github.com/chibisafe/chibisafe)
* `i` = [kodbox](https://github.com/kalcaddle/kodbox)
* `j` = [filebrowser](https://github.com/filebrowser/filebrowser)
* `k` = [filegator](https://github.com/filegator/filegator)
some softwares not in the matrixes,
* [updog](#updog)
* [goshs](#goshs)
* [gimme-that](#gimmethat)
* [ass](#ass)
* [linx](#linx)
symbol legend,
* `█` = absolutely
* `` = partially
* `•` = maybe?
* ` ` = nope
## general
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| intuitive UX | | | █ | █ | █ | | █ | █ | █ | █ | █ |
| config GUI | | █ | █ | █ | █ | | | █ | █ | █ | |
| good documentation | | | | █ | █ | █ | █ | | | █ | █ |
| runs on iOS | | | | | | | | | | | |
| runs on Android | █ | | | | | █ | | | | | |
| runs on WinXP | █ | █ | | | | █ | | | | | |
| runs on Windows | █ | █ | █ | █ | █ | █ | █ | | █ | █ | █ |
| runs on Linux | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| runs on Macos | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| runs on FreeBSD | █ | | | • | █ | █ | █ | • | █ | █ | |
| portable binary | █ | █ | █ | | | █ | █ | | | █ | |
| zero setup, just go | █ | █ | █ | | | | █ | | | █ | |
| android app | | | | █ | █ | | | | | | |
| iOS app | | | | █ | █ | | | | | | |
* `zero setup` = you can get a mostly working setup by just launching the app, without having to install any software or configure whatever
* `a`/copyparty remarks:
* no gui for server settings; only for client-side stuff
* can theoretically run on iOS / iPads using [iSH](https://ish.app/), but only the iPad will offer sufficient multitasking i think
* [android app](https://f-droid.org/en/packages/me.ocv.partyup/) is for uploading only
* `b`/hfs2 runs on linux through wine
* `f`/rclone must be started with the command `rclone serve webdav .` or similar
* `h`/chibisafe has undocumented windows support
## file transfer
*the thing that copyparty is actually kinda good at*
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| download folder as zip | █ | █ | █ | █ | █ | | █ | | █ | █ | |
| download folder as tar | █ | | | | | | | | | █ | |
| upload | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| parallel uploads | █ | | | █ | █ | | • | | █ | | █ |
| resumable uploads | █ | | | | | | | | █ | | █ |
| upload segmenting | █ | | | | | | | █ | █ | | █ |
| upload acceleration | █ | | | | | | | | █ | | █ |
| upload verification | █ | | | █ | █ | | | | █ | | |
| upload deduplication | █ | | | | █ | | | | █ | | |
| upload a 999 TiB file | █ | | | | █ | █ | • | | █ | | █ |
| keep last-modified time | █ | | | █ | █ | █ | | | | | |
| upload rules | | | | | | | | | | | |
| ┗ max disk usage | █ | █ | | | █ | | | | █ | | |
| ┗ max filesize | █ | | | | | | | █ | | | █ |
| ┗ max items in folder | █ | | | | | | | | | | |
| ┗ max file age | █ | | | | | | | | █ | | |
| ┗ max uploads over time | █ | | | | | | | | | | |
| ┗ compress before write | █ | | | | | | | | | | |
| ┗ randomize filename | █ | | | | | | | █ | █ | | |
| ┗ mimetype reject-list | | | | | | | | | • | | |
| ┗ extension reject-list | | | | | | | | █ | • | | |
| checksums provided | | | | █ | █ | | | | █ | | |
| cloud storage backend | | | | █ | █ | █ | | | | | █ |
* `upload segmenting` = files are sliced into chunks, making it possible to upload files larger than 100 MiB on cloudflare for example
* `upload acceleration` = each file can be uploaded using several TCP connections, which can offer a huge speed boost over huge distances / on flaky connections -- like the good old [download accelerators](https://en.wikipedia.org/wiki/GetRight) except in reverse
* `upload verification` = uploads are checksummed or otherwise confirmed to have been transferred correctly
* `checksums provided` = when downloading a file from the server, the file's checksum is provided for verification client-side
* `cloud storage backend` = able to serve files from (and write to) s3 or similar cloud services; `` means the software can do this with some help from `rclone mount` as a bridge
* `a`/copyparty can reject uploaded files (based on complex conditions), for example [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py)
* `j`/filebrowser remarks:
* can provide checksums for single files on request
* can probably do extension/mimetype rejection similar to copyparty
* `k`/filegator download-as-zip is not streaming; it creates the full zipfile before download can start
## protocols and client support
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| serve https | █ | | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| serve webdav | █ | | | █ | █ | █ | █ | | █ | | |
| serve ftp | █ | | | | | █ | | | | | |
| serve ftps | █ | | | | | █ | | | | | |
| serve sftp | | | | | | █ | | | | | |
| serve smb/cifs | | | | | | █ | | | | | |
| serve dlna | | | | | | █ | | | | | |
| listen on unix-socket | | | | █ | █ | | █ | █ | █ | | █ |
| zeroconf | █ | | | | | | | | | | |
| supports netscape 4 | | | | | | █ | | | | | • |
| ...internet explorer 6 | | █ | | █ | | █ | | | | | • |
| mojibake filenames | █ | | | • | • | █ | █ | • | • | • | |
| undecodable filenames | █ | | | • | • | █ | | • | • | | |
* `webdav` = protocol convenient for mounting a remote server as a local filesystem; see zeroconf:
* `zeroconf` = the server announces itself on the LAN, [automatically appearing](https://user-images.githubusercontent.com/241032/215344737-0eae8d98-9496-4256-9aa8-cd2f6971810d.png) on other zeroconf-capable devices
* `mojibake filenames` = filenames decoded with the wrong codec and then reencoded (usually to utf-8), so `宇多田ヒカル` might look like `ëFæ╜ôcâqâJâï`
* `undecodable filenames` = pure binary garbage which cannot be parsed as utf-8
* you can successfully play `$'\355\221'` with mpv through mounting a remote copyparty server with rclone, pog
* `a`/copyparty remarks:
* extremely minimal samba/cifs server
* netscape 4 / ie6 support is mostly listed as a joke altho some people have actually found it useful ([ie4 tho](https://user-images.githubusercontent.com/241032/118192791-fb31fe00-b446-11eb-9647-898ea8efc1f7.png))
## server configuration
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| config from cmd args | █ | | | | | █ | █ | | | █ | |
| config files | █ | █ | █ | | | █ | | █ | | █ | • |
| runtime config reload | █ | █ | █ | | | | | █ | █ | █ | █ |
| same-port http / https | █ | | | | | | | | | | |
| listen multiple ports | █ | | | | | | | | | | |
| virtual file system | █ | █ | █ | | | | █ | | | | |
| reverse-proxy ok | █ | | █ | █ | █ | █ | █ | █ | • | • | • |
| folder-rproxy ok | █ | | | | █ | █ | | • | • | • | • |
* `folder-rproxy` = reverse-proxying without dedicating an entire (sub)domain, using a subfolder instead
## server capabilities
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| accounts | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ | █ |
| single-sign-on | | | | █ | █ | | | | • | | |
| token auth | | | | █ | █ | | | █ | | | |
| per-volume permissions | █ | █ | █ | █ | █ | █ | █ | | █ | █ | |
| per-folder permissions | | | | █ | █ | | █ | | █ | █ | |
| per-file permissions | | | | █ | █ | | █ | | █ | | |
| per-file passwords | █ | | | █ | █ | | █ | | █ | | |
| unmap subfolders | █ | | | | | | █ | | | █ | |
| index.html blocks list | | | | | | | █ | | | • | |
| write-only folders | █ | | | | | | | | | | █ |
| files stored as-is | █ | █ | █ | █ | | █ | █ | | | █ | █ |
| file versioning | | | | █ | █ | | | | | | |
| file encryption | | | | █ | █ | █ | | | | | |
| file indexing | █ | | █ | █ | █ | | | █ | █ | █ | |
| ┗ per-volume db | █ | | • | • | • | | | • | • | | |
| ┗ db stored in folder | █ | | | | | | | • | • | █ | |
| ┗ db stored out-of-tree | █ | | █ | █ | █ | | | • | • | █ | |
| ┗ existing file tree | █ | | █ | | | | | | | █ | |
| file action event hooks | █ | | | | | | | | | █ | |
| one-way folder sync | █ | | | █ | █ | █ | | | | | |
| full sync | | | | █ | █ | | | | | | |
| speed throttle | | █ | █ | | | █ | | | █ | | |
| anti-bruteforce | █ | █ | █ | █ | █ | | | | • | | |
| dyndns updater | | █ | | | | | | | | | |
| self-updater | | | █ | | | | | | | | |
| log rotation | █ | | █ | █ | █ | | | • | █ | | |
| upload tracking / log | █ | █ | • | █ | █ | | | █ | █ | | |
| curl-friendly ls | █ | | | | | | | | | | |
| curl-friendly upload | █ | | | | | █ | █ | • | | | |
* `unmap subfolders` = "shadowing"; mounting a local folder in the middle of an existing filesystem tree in order to disable access below that path
* `files stored as-is` = uploaded files are trivially readable from the server HDD, not sliced into chunks or in weird folder structures or anything like that
* `db stored in folder` = filesystem index can be written to a database file inside the folder itself
* `db stored out-of-tree` = filesystem index can be stored some place else, not necessarily inside the shared folders
* `existing file tree` = will index any existing files it finds
* `file action event hooks` = run script before/after upload, move, rename, ...
* `one-way folder sync` = like rsync, optionally deleting unexpected files at target
* `full sync` = stateful, dropbox-like sync
* `curl-friendly ls` = returns a [sortable plaintext folder listing](https://user-images.githubusercontent.com/241032/215322619-ea5fd606-3654-40ad-94ee-2bc058647bb2.png) when curled
* `curl-friendly upload` = uploading with curl is just `curl -T some.bin http://.../`
* `a`/copyparty remarks:
* one-way folder sync from local to server can be done efficiently with [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py), or with webdav and conventional rsync
* can hot-reload config files (with just a few exceptions)
* can set per-folder permissions if that folder is made into a separate volume, so there is configuration overhead
* [event hooks](https://github.com/9001/copyparty/tree/hovudstraum/bin/hooks) ([discord](https://user-images.githubusercontent.com/241032/215304439-1c1cb3c8-ec6f-4c17-9f27-81f969b1811a.png), [desktop](https://user-images.githubusercontent.com/241032/215335767-9c91ed24-d36e-4b6b-9766-fb95d12d163f.png)) inspired by filebrowser, as well as the more complex [media parser](https://github.com/9001/copyparty/tree/hovudstraum/bin/mtag) alternative
* upload history can be visualized using [partyjournal](https://github.com/9001/copyparty/blob/hovudstraum/bin/partyjournal.py)
* `k`/filegator remarks:
* `per-* permissions` -- can limit a user to one folder and its subfolders
* `unmap subfolders` -- can globally filter a list of paths
## client features
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ---------------------- | - | - | - | - | - | - | - | - | - | - | - |
| single-page app | █ | | █ | █ | █ | | | █ | █ | █ | █ |
| themes | █ | █ | | █ | | | | | █ | | |
| directory tree nav | █ | | | | █ | | | | █ | | |
| multi-column sorting | █ | | | | | | | | | | |
| thumbnails | █ | | | | | | | █ | █ | | |
| ┗ image thumbnails | █ | | | █ | █ | | | █ | █ | █ | |
| ┗ video thumbnails | █ | | | █ | █ | | | | █ | | |
| ┗ audio spectrograms | █ | | | | | | | | | | |
| audio player | █ | | | █ | █ | | | | █ | | |
| ┗ gapless playback | █ | | | | | | | | • | | |
| ┗ audio equalizer | █ | | | | | | | | | | |
| ┗ waveform seekbar | █ | | | | | | | | | | |
| ┗ OS integration | █ | | | | | | | | | | |
| ┗ transcode to lossy | █ | | | | | | | | | | |
| video player | █ | | | █ | █ | | | | █ | █ | |
| ┗ video transcoding | | | | | | | | | █ | | |
| audio BPM detector | █ | | | | | | | | | | |
| audio key detector | █ | | | | | | | | | | |
| search by path / name | █ | █ | █ | █ | █ | | █ | | █ | █ | |
| search by date / size | █ | | | | █ | | | █ | █ | | |
| search by bpm / key | █ | | | | | | | | | | |
| search by custom tags | | | | | | | | █ | █ | | |
| search in file contents | | | | █ | █ | | | | █ | | |
| search by custom parser | █ | | | | | | | | | | |
| find local file | █ | | | | | | | | | | |
| undo recent uploads | █ | | | | | | | | | | |
| create directories | █ | | | █ | █ | | █ | █ | █ | █ | █ |
| image viewer | █ | | | █ | █ | | | | █ | █ | █ |
| markdown viewer | █ | | | | █ | | | | █ | | |
| markdown editor | █ | | | | █ | | | | █ | | |
| readme.md in listing | █ | | | █ | | | | | | | |
| rename files | █ | █ | █ | █ | █ | | █ | | █ | █ | █ |
| batch rename | █ | | | | | | | | █ | | |
| cut / paste files | █ | █ | | █ | █ | | | | █ | | |
| move files | █ | █ | | █ | █ | | █ | | █ | █ | █ |
| delete files | █ | █ | | █ | █ | | █ | █ | █ | █ | █ |
| copy files | | | | | █ | | | | █ | █ | █ |
* `single-page app` = multitasking; possible to continue navigating while uploading
* `audio player » os-integration` = use the [lockscreen](https://user-images.githubusercontent.com/241032/142711926-0700be6c-3e31-47b3-9928-53722221f722.png) or [media hotkeys](https://user-images.githubusercontent.com/241032/215347492-b4250797-6c90-4e09-9a4c-721edf2fb15c.png) to play/pause, prev/next song
* `search by custom tags` = ability to tag files through the UI and search by those
* `find local file` = drop a file into the browser to see if it exists on the server
* `undo recent uploads` = accounts without delete permissions have a time window where they can undo their own uploads
* `a`/copyparty has teeny-tiny skips playing gapless albums depending on audio codec (opus best)
* `b`/hfs2 has a very basic directory tree view, not showing sibling folders
* `f`/rclone can do some file management (mkdir, rename, delete) when hosting througn webdav
* `j`/filebrowser has a plaintext viewer/editor
* `k`/filegator directory tree is a modal window
## integration
| feature / software | a | b | c | d | e | f | g | h | i | j | k |
| ----------------------- | - | - | - | - | - | - | - | - | - | - | - |
| OS alert on upload | █ | | | | | | | | | | |
| discord | █ | | | | | | | | | | |
| ┗ announce uploads | █ | | | | | | | | | | |
| ┗ custom embeds | | | | | | | | | | | |
| sharex | █ | | | █ | | █ | | █ | | | |
| flameshot | | | | | | █ | | | | | |
* sharex `` = yes, but does not provide example sharex config
* `a`/copyparty remarks:
* `OS alert on upload` available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/notify.py)
* `discord » announce uploads` available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/discord-announce.py)
* `j`/filebrowser can probably pull those off with command runners similar to copyparty
## another matrix
| software / feature | lang | lic | size |
| ------------------ | ------ | ------ | ------ |
| copyparty | python | █ mit | 0.6 MB |
| hfs2 | delphi | ░ gpl3 | 2 MB |
| hfs3 | ts | ░ gpl3 | 36 MB |
| nextcloud | php | ‼ agpl | • |
| seafile | c | ‼ agpl | • |
| rclone | c | █ mit | 45 MB |
| dufs | rust | █ apl2 | 2.5 MB |
| chibisafe | ts | █ mit | • |
| kodbox | php | ░ gpl3 | 92 MB |
| filebrowser | go | █ apl2 | 20 MB |
| filegator | php | █ mit | • |
| updog | python | █ mit | 17 MB |
| goshs | go | █ mit | 11 MB |
| gimme-that | python | █ mit | 4.8 MB |
| ass | ts | █ isc | • |
| linx | go | ░ gpl3 | 20 MB |
* `size` = binary (if available) or installed size of program and its dependencies
* copyparty size is for the [standalone python](https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py) file; the [windows exe](https://github.com/9001/copyparty/releases/latest/download/copyparty.exe) is **6 MiB**
# reviews
* ✅ are advantages over copyparty
* ⚠️ are disadvantages
## [copyparty](https://github.com/9001/copyparty)
* resumable uploads which are verified server-side
* upload segmenting allows for potentially much faster uploads on some connections, and terabyte-sized files even on cloudflare
* both of the above are surprisingly uncommon features
* very cross-platform (python, no dependencies)
## [hfs2](https://github.com/rejetto/hfs2)
* the OG, the legend
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ windows-only
* ✅ config GUI
* vfs with gui config, per-volume permissions
* starting to show its age, hence the rewrite:
## [hfs3](https://www.rejetto.com/hfs/)
* nodejs; cross-platform
* vfs with gui config, per-volume permissions
* still early development, let's revisit later
## [nextcloud](https://github.com/nextcloud/server)
* php, mariadb
* ⚠️ [isolated on-disk file hierarchy] in per-user folders
* not that bad, can probably be remedied with bindmounts or maybe symlinks
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no write-only / upload-only folders
* ⚠️ http/webdav only; no ftp, zeroconf
* ⚠️ less awesome music player
* ⚠️ doesn't run on android or ipads
* ✅ great ui/ux
* ✅ config gui
* ✅ apps (android / iphone)
* copyparty: android upload-only app
* ✅ more granular permissions (per-file)
* ✅ search: fulltext indexing of file contents
* ✅ webauthn passwordless authentication
## [seafile](https://github.com/haiwen/seafile)
* c, mariadb
* ⚠️ [isolated on-disk file hierarchy](https://manual.seafile.com/maintain/seafile_fsck/), incompatible with other software
* *much worse than nextcloud* in that regard
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no write-only / upload-only folders
* ⚠️ http/webdav only; no ftp, zeroconf
* ⚠️ less awesome music player
* ⚠️ doesn't run on android or ipads
* ✅ great ui/ux
* ✅ config gui
* ✅ apps (android / iphone)
* copyparty: android upload-only app
* ✅ more granular permissions (per-file)
* ✅ search: fulltext indexing of file contents
## [rclone](https://github.com/rclone/rclone)
* nice standalone c program
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no web-ui, just a server / downloader / uploader utility
* ✅ works with almost any protocol, cloud provider
* ⚠️ copyparty's webdav server is slightly faster
## [dufs](https://github.com/sigoden/dufs)
* rust; cross-platform (windows, linux, macos)
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ doesn't support crazy filenames
* ✅ per-url access control (copyparty is per-volume)
* basic but really snappy ui
* upload, rename, delete, ... see feature matrix
## [chibisafe](https://github.com/chibisafe/chibisafe)
* nodejs; recommends docker
* *it has upload segmenting!*
* ⚠️ but uploads are still not resumable / accelerated / integrity-checked
* ⚠️ not portable
* ⚠️ isolated on-disk file hierarchy, incompatible with other software
* ⚠️ http/webdav only; no ftp or zeroconf
* ✅ pretty ui
* ✅ control panel for server settings and user management
* ✅ user registration
* ✅ searchable image tags; delete by tag
* ✅ browser extension to upload files to the server
* ✅ reject uploads by file extension
* copyparty: can reject uploads [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py) using plugins
* ✅ token auth (api keys)
## [kodbox](https://github.com/kalcaddle/kodbox)
* this thing is insane
* php; [docker](https://hub.docker.com/r/kodcloud/kodbox)
* *upload segmenting, acceleration, and integrity checking!*
* ⚠️ but uploads are not resumable(?)
* ⚠️ not portable
* ⚠️ isolated on-disk file hierarchy, incompatible with other software
* ⚠️ http/webdav only; no ftp or zeroconf
* ⚠️ some parts of the GUI are in chinese
* ✅ fantastic ui/ux
* ✅ control panel for server settings and user management
* ✅ file tags; file discussions!?
* ✅ video transcoding
* ✅ unzip uploaded archives
* ✅ IDE with syntax hilighting
* ✅ wysiwyg editor for openoffice files
## [filebrowser](https://github.com/filebrowser/filebrowser)
* go; cross-platform (windows, linux, mac)
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ http only; no webdav / ftp / zeroconf
* ⚠️ doesn't support crazy filenames
* ⚠️ no directory tree nav
* ⚠️ limited file search
* ✅ settings gui
* ✅ good ui/ux
* ⚠️ but no directory tree for navigation
* ✅ user signup
* ✅ command runner / remote shell
* supposed to have write-only folders but couldn't get it to work
## [filegator](https://github.com/filegator/filegator)
* go; cross-platform (windows, linux, mac)
* ⚠️ http only; no webdav / ftp / zeroconf
* ⚠️ does not support symlinks
* ⚠️ expensive download-as-zip feature
* ⚠️ doesn't support crazy filenames
* ⚠️ limited file search
* *it has upload segmenting and acceleration*
* ⚠️ but uploads are still not integrity-checked
## [updog](https://github.com/sc0tfree/updog)
* python; cross-platform
* basic directory listing with upload feature
* ⚠️ less portable
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ no vfs; single folder, single account
## [goshs](https://github.com/patrickhener/goshs)
* go; cross-platform (windows, linux, mac)
* ⚠️ no vfs; single folder, single account
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ✅ cool clipboard widget
* copyparty: the markdown editor is an ok substitute
* read-only and upload-only modes (same as copyparty's write-only)
* https, webdav
## [gimme-that](https://github.com/nejdetckenobi/gimme-that)
* python, but with c dependencies
* ⚠️ no vfs; single folder, multiple accounts
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ⚠️ weird folder structure for uploads
* ✅ clamav antivirus check on upload! neat
* optional max-filesize, os-notification on uploads
* copyparty: os-notification available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/notify.py)
## [ass](https://github.com/tycrek/ass)
* nodejs; recommends docker
* ⚠️ not portable
* ⚠️ upload only; no browser
* ⚠️ upload through sharex only; no web-ui
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* ✅ token auth
* ✅ gps metadata stripping
* copyparty: possible with [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/image-noexif.py)
* ✅ discord integration (custom embeds, upload webhook)
* copyparty: [upload webhook plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/discord-announce.py)
* ✅ reject uploads by mimetype
* copyparty: can reject uploads [by extension](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-extension.py) or [mimetype](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/reject-mimetype.py) using plugins
* ✅ can use S3 as storage backend; copyparty relies on rclone-mount for that
* ✅ custom 404 pages
## [linx](https://github.com/ZizzyDizzyMC/linx-server/)
* originally [andreimarcu/linx-server](https://github.com/andreimarcu/linx-server) but development has ended
* ⚠️ uploads not resumable / accelerated / integrity-checked
* ⚠️ on cloudflare: max upload size 100 MiB
* some of its unique features have been added to copyparty as former linx users have migrated
* file expiration timers, filename randomization
* ✅ password-protected files
* copyparty: password-protected folders + filekeys to skip the folder password seem to cover most usecases
* ✅ file deletion keys
* ✅ download files as torrents
* ✅ remote uploads (send a link to the server and it downloads it)
* copyparty: available as [a plugin](https://github.com/9001/copyparty/blob/hovudstraum/bin/hooks/wget.py)
* ✅ can use S3 as storage backend; copyparty relies on rclone-mount for that
# briefly considered
* [pydio](https://github.com/pydio/cells): python/agpl3, looks great, fantastic ux -- but needs mariadb, systemwide install
* [gossa](https://github.com/pldubouilh/gossa): go/mit, minimalistic, basic file upload, text editor, mkdir and rename (no delete/move)
* [h5ai](https://larsjung.de/h5ai/): php/mit, slick ui, image viewer, directory tree, no upload feature

View File

@@ -3,9 +3,9 @@ FROM alpine:3.16
WORKDIR /z WORKDIR /z
ENV ver_asmcrypto=c72492f4a66e17a0e5dd8ad7874de354f3ccdaa5 \ ENV ver_asmcrypto=c72492f4a66e17a0e5dd8ad7874de354f3ccdaa5 \
ver_hashwasm=4.9.0 \ ver_hashwasm=4.9.0 \
ver_marked=4.2.4 \ ver_marked=4.2.5 \
ver_mde=2.18.0 \ ver_mde=2.18.0 \
ver_codemirror=5.65.10 \ ver_codemirror=5.65.11 \
ver_fontawesome=5.13.0 \ ver_fontawesome=5.13.0 \
ver_zopfli=1.0.3 ver_zopfli=1.0.3

View File

@@ -0,0 +1,21 @@
FROM alpine:latest
WORKDIR /z
RUN apk --no-cache add \
wget \
py3-pillow \
ffmpeg \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
COPY i/dist/copyparty-sfx.py ./
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "/z/copyparty-sfx.py", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-ac" \
org.opencontainers.image.description="copyparty with Pillow and FFmpeg (image/audio/video thumbnails, audio transcoding, media tags)"

View File

@@ -0,0 +1,36 @@
FROM alpine:latest
WORKDIR /z
COPY i/bin/mtag/install-deps.sh ./
COPY i/bin/mtag/audio-bpm.py /mtag/
COPY i/bin/mtag/audio-key.py /mtag/
RUN apk add -U \
wget \
py3-pillow py3-pip \
ffmpeg \
vips-jxl vips-heif vips-poppler vips-magick \
py3-numpy fftw libsndfile \
&& python3 -m pip install pyvips \
&& apk --no-cache add -t .bd \
bash wget gcc g++ make cmake patchelf \
python3-dev ffmpeg-dev fftw-dev libsndfile-dev \
py3-wheel py3-numpy-dev \
&& bash install-deps.sh \
&& apk del py3-pip .bd \
&& rm -rf /var/cache/apk/* \
&& chmod 777 /root \
&& ln -s /root/vamp /root/.local / \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
COPY i/dist/copyparty-sfx.py ./
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "/z/copyparty-sfx.py", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-dj" \
org.opencontainers.image.description="copyparty with all optional dependencies, including musical key / bpm detection"

View File

@@ -0,0 +1,20 @@
FROM alpine:latest
WORKDIR /z
RUN apk --no-cache add \
wget \
py3-pillow py3-mutagen \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
COPY i/dist/copyparty-sfx.py ./
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "/z/copyparty-sfx.py", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-im" \
org.opencontainers.image.description="copyparty with Pillow and Mutagen (image thumbnails, media tags)"

View File

@@ -0,0 +1,24 @@
FROM alpine:latest
WORKDIR /z
RUN apk --no-cache add \
wget \
py3-pillow py3-pip \
ffmpeg \
vips-jxl vips-heif vips-poppler vips-magick \
&& python3 -m pip install pyvips \
&& apk del py3-pip \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
COPY i/dist/copyparty-sfx.py ./
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "/z/copyparty-sfx.py", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-iv" \
org.opencontainers.image.description="copyparty with Pillow, FFmpeg, libvips (image/audio/video thumbnails, audio transcoding, media tags)"

View File

@@ -0,0 +1,19 @@
FROM alpine:latest
WORKDIR /z
RUN apk --no-cache add \
python3 \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
COPY i/dist/copyparty-sfx.py ./
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "/z/copyparty-sfx.py", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-min" \
org.opencontainers.image.description="just copyparty, no thumbnails / media tags / audio transcoding"

View File

@@ -0,0 +1,19 @@
FROM alpine:latest
WORKDIR /z
RUN apk --no-cache add python3 py3-pip \
&& python3 -m pip install copyparty \
&& apk del py3-pip \
&& mkdir /cfg /w \
&& chmod 777 /cfg /w \
&& echo % /cfg > initcfg
WORKDIR /w
EXPOSE 3923
ENTRYPOINT ["python3", "-m", "copyparty", "-c", "/z/initcfg"]
LABEL org.opencontainers.image.url="https://github.com/9001/copyparty" \
org.opencontainers.image.source="https://github.com/9001/copyparty/tree/hovudstraum/scripts/docker" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="copyparty-min-pip" \
org.opencontainers.image.description="just copyparty, no thumbnails, no media tags, no audio transcoding"

65
scripts/docker/Makefile Normal file
View File

@@ -0,0 +1,65 @@
self := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
all:
-service docker start
-systemctl start docker
rm -rf i
mkdir i
tar -cC../.. dist/copyparty-sfx.py bin/mtag | tar -xvCi
docker build -t copyparty/min:latest -f Dockerfile.min .
echo 'scale=1;'`docker save copyparty/min:latest | pigz -c | wc -c`/1024/1024 | bc
# docker build -t copyparty/min-pip:latest -f Dockerfile.min.pip .
# echo 'scale=1;'`docker save copyparty/min-pip:latest | pigz -c | wc -c`/1024/1024 | bc
docker build -t copyparty/im:latest -f Dockerfile.im .
echo 'scale=1;'`docker save copyparty/im:latest | pigz -c | wc -c`/1024/1024 | bc
docker build -t copyparty/iv:latest -f Dockerfile.iv .
echo 'scale=1;'`docker save copyparty/iv:latest | pigz -c | wc -c`/1024/1024 | bc
docker build -t copyparty/ac:latest -f Dockerfile.ac .
echo 'scale=1;'`docker save copyparty/ac:latest | pigz -c | wc -c`/1024/1024 | bc
docker build -t copyparty/dj:latest -f Dockerfile.dj .
echo 'scale=1;'`docker save copyparty/dj:latest | pigz -c | wc -c`/1024/1024 | bc
docker image ls
push:
docker push copyparty/min
docker push copyparty/im
docker push copyparty/iv
docker push copyparty/ac
docker push copyparty/dj
docker image tag copyparty/min:latest ghcr.io/9001/copyparty-min:latest
docker image tag copyparty/im:latest ghcr.io/9001/copyparty-im:latest
docker image tag copyparty/iv:latest ghcr.io/9001/copyparty-iv:latest
docker image tag copyparty/ac:latest ghcr.io/9001/copyparty-ac:latest
docker image tag copyparty/dj:latest ghcr.io/9001/copyparty-dj:latest
docker push ghcr.io/9001/copyparty-min:latest
docker push ghcr.io/9001/copyparty-im:latest
docker push ghcr.io/9001/copyparty-iv:latest
docker push ghcr.io/9001/copyparty-ac:latest
docker push ghcr.io/9001/copyparty-dj:latest
clean:
-docker kill `docker ps -q`
-docker rm `docker ps -qa`
-docker rmi -f `docker images -a | awk '/<none>/{print$$3}'`
hclean:
-docker kill `docker ps -q`
-docker rm `docker ps -qa`
-docker rmi `docker images -a | awk '!/^alpine/&&NR>1{print$$3}'`
purge:
-docker kill `docker ps -q`
-docker rm `docker ps -qa`
-docker rmi `docker images -qa`
sh:
@printf "\n\033[1;31mopening a shell in the most recently created docker image\033[0m\n"
docker run --rm -it --entrypoint /bin/ash `docker images -aq | head -n 1`

74
scripts/docker/README.md Normal file
View File

@@ -0,0 +1,74 @@
copyparty is availabe in these repos:
* https://hub.docker.com/r/copyparty
* https://github.com/9001?tab=packages&repo_name=copyparty
# getting started
run this command to grab the latest copyparty image and start it:
```bash
docker run --rm -it -u 1000 -p 3923:3923 -v /mnt/nas:/w -v $PWD/cfgdir:/cfg copyparty/ac
```
* `/w` is the path inside the container that gets shared by default, so mount one or more folders to share below there
* `/cfg` is an optional folder with zero or more config files (*.conf) to load
* `copyparty/ac` is the recommended [image edition](#editions)
* you can download the image from github instead by replacing `copyparty/ac` with `ghcr.io/9001/copyparty-ac`
i'm unfamiliar with docker-compose and alternatives so let me know if this section could be better 🙏
## configuration
the container has the same default config as the sfx and the pypi module, meaning it will listen on port 3923 and share the "current folder" (`/w` inside the container) as read-write for anyone
the recommended way to configure copyparty inside a container is to mount a folder which has one or more [config files](https://github.com/9001/copyparty/blob/hovudstraum/docs/example.conf) inside; `-v /your/config/folder:/cfg`
* but you can also provide arguments to the docker command if you prefer that
* config files must be named `something.conf` to get picked up
## editions
with image size after installation and when gzipped
* `min` (57 MiB, 20 gz) is just copyparty itself
* `im` (70 MiB, 25 gz) can thumbnail images with pillow, parse media files with mutagen
* `ac` (163 MiB, 56 gz) is `im` plus ffmpeg for video/audio thumbs + audio transcoding + better tags
* `iv` (211 MiB, 73 gz) is `ac` plus vips for faster heif / avic / jxl thumbnails
* `dj` (309 MiB, 104 gz) is `iv` plus beatroot/keyfinder to detect musical keys and bpm
`ac` is recommended since the additional features available in `iv` and `dj` are rarely useful
## detecting bpm and musical key
the `dj` edition comes with `keyfinder` and `beatroot` which can be used to detect music bpm and musical keys
enable them globally in a config file:
```yaml
[global]
e2dsa, e2ts # enable filesystem indexing and multimedia indexing
mtp: .bpm=f,t30,/mtag/audio-bpm.py # should take ~10sec
mtp: key=f,t190,/mtag/audio-key.py # should take ~50sec
```
or enable them for just one volume,
```yaml
[/music] # share name / URL
music # filesystem path inside the docker volume `/w`
flags:
e2dsa, e2ts
mtp: .bpm=f,t30,/mtag/audio-bpm.py
mtp: key=f,t190,/mtag/audio-key.py
```
or using commandline arguments,
```
-e2dsa -e2ts -mtp .bpm=f,t30,/mtag/audio-bpm.py -mtp key=f,t190,/mtag/audio-key.py
```
# build the images yourself
put `copyparty-sfx.py` into `../dist/` (or [build that from scratch](../../docs/devnotes.md#just-the-sfx) too) then run `make`

View File

@@ -15,10 +15,12 @@ gtar=$(command -v gtar || command -v gnutar) || true
} }
mode="$1" mode="$1"
fast="$2"
[ -z "$mode" ] && [ -z "$mode" ] &&
{ {
echo "need argument 1: (D)ry, (T)est, (U)pload" echo "need argument 1: (D)ry, (T)est, (U)pload"
echo " optional arg 2: fast"
echo echo
exit 1 exit 1
} }
@@ -90,10 +92,13 @@ load_env || {
load_env load_env
} }
# grab licenses
scripts/genlic.sh copyparty/res/COPYING.txt
# remove type hints to support python < 3.9 # remove type hints to support python < 3.9
rm -rf build/pypi rm -rf build/pypi
mkdir -p build/pypi mkdir -p build/pypi
cp -pR setup.py README.md LICENSE copyparty tests bin scripts/strip_hints build/pypi/ cp -pR setup.py README.md LICENSE copyparty contrib bin scripts/strip_hints build/pypi/
tar -c docs/lics.txt scripts/genlic.sh build/*.txt | tar -xC build/pypi/ tar -c docs/lics.txt scripts/genlic.sh build/*.txt | tar -xC build/pypi/
cd build/pypi cd build/pypi
f=../strip-hints-0.1.10.tar.gz f=../strip-hints-0.1.10.tar.gz
@@ -103,6 +108,34 @@ f=../strip-hints-0.1.10.tar.gz
tar --strip-components=2 -xf $f strip-hints-0.1.10/src/strip_hints tar --strip-components=2 -xf $f strip-hints-0.1.10/src/strip_hints
python3 -c 'from strip_hints.a import uh; uh("copyparty")' python3 -c 'from strip_hints.a import uh; uh("copyparty")'
# resolve symlinks
find -type l |
while IFS= read -r f1; do (
cd "${f1%/*}"
f1="./${f1##*/}"
f2="$(readlink "$f1")"
[ -e "$f2" ] || f2="../$f2"
[ -e "$f2" ] || {
echo could not resolve "$f1"
exit 1
}
rm "$f1"
cp -p "$f2" "$f1"
); done
# resolve symlinks on windows
[ "$OSTYPE" = msys ] &&
(cd ../..; git ls-files -s | awk '/^120000/{print$4}') |
while IFS= read -r x; do
[ $(wc -l <"$x") -gt 1 ] && continue
(cd "${x%/*}"; cp -p "../$(cat "${x##*/}")" ${x##*/})
done
rm -rf contrib
[ $fast ] && sed -ri s/5730/10/ copyparty/web/Makefile
(cd copyparty/web && make -j$(nproc) && rm Makefile)
# build
./setup.py clean2 ./setup.py clean2
./setup.py sdist bdist_wheel --universal ./setup.py sdist bdist_wheel --universal

View File

@@ -266,6 +266,14 @@ necho() {
cp -p "$f2" "$f1" cp -p "$f2" "$f1"
); done ); done
# resolve symlinks on windows
[ "$OSTYPE" = msys ] &&
(cd ..; git ls-files -s | awk '/^120000/{print$4}') |
while IFS= read -r x; do
[ $(wc -l <"$x") -gt 1 ] && continue
(cd "${x%/*}"; cp -p "../$(cat "${x##*/}")" ${x##*/})
done
# insert asynchat # insert asynchat
mkdir copyparty/vend mkdir copyparty/vend
for n in asyncore.py asynchat.py; do for n in asyncore.py asynchat.py; do

43
scripts/prep.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
set -e
# general housekeeping before a release
self=$(cd -- "$(dirname "$BASH_SOURCE")"; pwd -P)
ver=$(awk '/^VERSION/{gsub(/[^0-9]/," ");printf "%d.%d.%d\n",$1,$2,$3}' copyparty/__version__.py)
update_arch_pkgbuild() {
cd "$self/../contrib/package/arch"
rm -rf x
mkdir x
(echo "$self/../dist/copyparty-sfx.py"
awk -v self="$self" '
/^\)/{o=0}
/^source=/{o=1;next}
{
sub(/..pkgname./,"copyparty");
sub(/.*pkgver./,self "/..");
sub(/^ +"/,"");sub(/"/,"")
}
o&&!/https/' PKGBUILD
) |
xargs sha256sum > x/sums
(awk -v ver=$ver '
/^pkgver=/{sub(/[0-9\.]+/,ver)};
/^sha256sums=/{exit};
1' PKGBUILD
echo -n 'sha256sums=('
p=; cat x/sums | while read s _; do
echo "$p\"$s\""
p=' '
done
awk '/^sha256sums=/{o=1} o&&/^\)/{o=2} o==2' PKGBUILD
) >a
mv a PKGBUILD
rm -rf x
}
update_arch_pkgbuild

View File

@@ -44,21 +44,33 @@ read a b c d _ < <(
) )
sed -r 's/1,2,3,0/'$a,$b,$c,$d'/;s/1\.2\.3/'$a.$b.$c/ <loader.rc >loader.rc2 sed -r 's/1,2,3,0/'$a,$b,$c,$d'/;s/1\.2\.3/'$a.$b.$c/ <loader.rc >loader.rc2
excl=(
copyparty.broker_mp
copyparty.broker_mpw
ctypes.macholib
curses
inspect
multiprocessing
pdb
pickle
pyftpdlib.prefork
urllib.request
urllib.response
urllib.robotparser
zipfile
)
false || excl+=(
PIL
PIL.ExifTags
PIL.Image
PIL.ImageDraw
PIL.ImageOps
)
excl=( "${excl[@]/#/--exclude-module }" )
$APPDATA/python/python37/scripts/pyinstaller \ $APPDATA/python/python37/scripts/pyinstaller \
-y --clean -p mods --upx-dir=. \ -y --clean -p mods --upx-dir=. \
--exclude-module copyparty.broker_mp \ ${excl[*]} \
--exclude-module copyparty.broker_mpw \
--exclude-module curses \
--exclude-module ctypes.macholib \
--exclude-module inspect \
--exclude-module multiprocessing \
--exclude-module pdb \
--exclude-module pickle \
--exclude-module pyftpdlib.prefork \
--exclude-module urllib.request \
--exclude-module urllib.response \
--exclude-module urllib.robotparser \
--exclude-module zipfile \
--version-file loader.rc2 -i loader.ico -n copyparty -c -F loader.py \ --version-file loader.rc2 -i loader.ico -n copyparty -c -F loader.py \
--add-data 'mods/copyparty/res;copyparty/res' \ --add-data 'mods/copyparty/res;copyparty/res' \
--add-data 'mods/copyparty/web;copyparty/web' --add-data 'mods/copyparty/web;copyparty/web'

View File

@@ -11,6 +11,7 @@ copyparty/broker_mp.py,
copyparty/broker_mpw.py, copyparty/broker_mpw.py,
copyparty/broker_thr.py, copyparty/broker_thr.py,
copyparty/broker_util.py, copyparty/broker_util.py,
copyparty/cfg.py,
copyparty/dxml.py, copyparty/dxml.py,
copyparty/fsutil.py, copyparty/fsutil.py,
copyparty/ftpd.py, copyparty/ftpd.py,

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
set -e set -e
for f in README.md docs/devnotes.md; do for f in README.md docs/devnotes.md docs/versus.md; do
cat $f | awk ' cat $f | awk '
function pr() { function pr() {
@@ -20,6 +20,8 @@ cat $f | awk '
/^#/{ /^#/{
lv=length($1); lv=length($1);
sub(/[^ ]+ /,""); sub(/[^ ]+ /,"");
sub(/\[/,"");
sub(/\]\([^)]+\)/,"");
bab=$0; bab=$0;
gsub(/ /,"-",bab); gsub(/ /,"-",bab);
gsub(/\./,"",bab); gsub(/\./,"",bab);
@@ -31,9 +33,9 @@ cat $f | awk '
{pr()} {pr()}
' > toc ' > toc
grep -E '^#+ [^ ]+ toc$' -B1000 -A2 <$f >p1 grep -E '^#+ *[^ ]+ toc$' -B1000 -A2 <$f >p1
h2="$(awk '/^#+ [^ ]+ toc$/{o=1;next} o&&/^#/{print;exit}' <$f)" h2="$(awk '/^#+ *[^ ]+ toc$/{o=1;next} o&&/^#/{print;exit}' <$f)"
grep -F "$h2" -B2 -A999999 <$f >p2 grep -F "$h2" -B2 -A999999 <$f >p2

View File

@@ -29,12 +29,6 @@ with open(here + "/README.md", "rb") as f:
txt = f.read().decode("utf-8") txt = f.read().decode("utf-8")
long_description = txt long_description = txt
try:
cmd = "bash scripts/genlic.sh copyparty/res/COPYING.txt"
sp.Popen(cmd.split()).wait()
except:
pass
about = {} about = {}
if not VERSION: if not VERSION:
with open(os.path.join(here, NAME, "__version__.py"), "rb") as f: with open(os.path.join(here, NAME, "__version__.py"), "rb") as f:
@@ -95,8 +89,6 @@ args = {
"Development Status :: 5 - Production/Stable", "Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License", "License :: OSI Approved :: MIT License",
"Programming Language :: Python", "Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.4",
@@ -146,4 +138,7 @@ args = {
"cmdclass": {"clean2": clean2}, "cmdclass": {"clean2": clean2},
} }
if sys.version_info < (3, 8):
args["install_requires"].append("ipaddress")
setup(**args) setup(**args)

View File

@@ -54,6 +54,8 @@ the values in the `ex:` columns are linkified to `example.com/$value`
and the table can be sorted by clicking the headers and the table can be sorted by clicking the headers
the sandbox also makes `location` unavailable but there is `loc` instead; this website's url is <big><big><b id="whereami">foo</b></big></big>
the difference is that with `copyparty_pre` you'll probably break various copyparty features but if you use `copyparty_post` then future copyparty versions will probably break you the difference is that with `copyparty_pre` you'll probably break various copyparty features but if you use `copyparty_post` then future copyparty versions will probably break you
@@ -136,6 +138,10 @@ render(dom) {
} }
}, },
render2(dom) { render2(dom) {
// loc == window.location except available inside sandbox
ebi('whereami').innerHTML = loc.href;
// this one also works because util.js gets pulled into the sandbox
window.makeSortable(dom.getElementsByTagName('table')[0]); window.makeSortable(dom.getElementsByTagName('table')[0]);
} }
``` ```

View File

@@ -98,7 +98,7 @@ class Cfg(Namespace):
def __init__(self, a=None, v=None, c=None): def __init__(self, a=None, v=None, c=None):
ka = {} ka = {}
ex = "daw dav_inf dav_mac e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vu e2vp ed emp force_js ihead magic nid nih no_acode no_athumb no_dav no_del no_dupe no_logues no_mv no_readme no_robots no_scandir no_thumb no_vthumb no_zip nw xdev xlink xvol" ex = "daw dav_inf dav_mac dotsrch e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vu e2vp ed emp force_js getmod hardlink ihead magic never_symlink nid nih no_acode no_athumb no_dav no_dedup no_del no_dupe no_logues no_mv no_readme no_robots no_sb_md no_sb_lg no_scandir no_thumb no_vthumb no_zip nrand nw rand vc xdev xlink xvol"
ka.update(**{k: False for k in ex.split()}) ka.update(**{k: False for k in ex.split()})
ex = "dotpart no_rescan no_sendfile no_voldump plain_ip" ex = "dotpart no_rescan no_sendfile no_voldump plain_ip"
@@ -110,9 +110,12 @@ class Cfg(Namespace):
ex = "df loris re_maxage rproxy rsp_slp s_wr_slp theme themes turbo" ex = "df loris re_maxage rproxy rsp_slp s_wr_slp theme themes turbo"
ka.update(**{k: 0 for k in ex.split()}) ka.update(**{k: 0 for k in ex.split()})
ex = "doctitle favico html_head log_fk mth textfiles R RS SR" ex = "doctitle favico html_head lg_sbf log_fk md_sbf mth textfiles R RS SR"
ka.update(**{k: "" for k in ex.split()}) ka.update(**{k: "" for k in ex.split()})
ex = "xad xar xau xbd xbr xbu xm"
ka.update(**{k: [] for k in ex.split()})
super(Cfg, self).__init__( super(Cfg, self).__init__(
a=a or [], a=a or [],
v=v or [], v=v or [],
@@ -193,4 +196,5 @@ class VHttpConn(object):
self.nbyte = 0 self.nbyte = 0
self.ico = None self.ico = None
self.thumbcli = None self.thumbcli = None
self.freshen_pwd = 0.0
self.t0 = time.time() self.t0 = time.time()