Compare commits

...

41 Commits

Author SHA1 Message Date
ed
eeed2a840c v1.3.10 2022-08-04 01:40:14 +02:00
ed
4aaa111925 v1.3.9 2022-08-04 00:39:37 +02:00
ed
e31248f018 include version info on startup and in crash dumps 2022-08-04 00:11:52 +02:00
ed
8b4cf022f2 bbox: tweak end-of-gallery animation 2022-08-03 22:56:51 +02:00
ed
4e7455268a tag-scanner perf 2022-08-03 22:33:20 +02:00
ed
680f8ae814 add xdev/xvol indexing guards 2022-08-03 22:20:28 +02:00
ed
90555a4cea clean-shutdown while hashing huge files 2022-08-03 21:06:10 +02:00
ed
56a62db591 force-exit by hammering ctrl-c 2022-08-03 20:58:23 +02:00
ed
cf51997680 fix make-sfx.sh on windows/msys2 2022-08-03 20:01:54 +02:00
ed
f05cc18d61 add missing polyfill 2022-08-03 19:42:42 +02:00
ed
5384c2e0f5 reentrant cleanup 2022-08-02 20:56:05 +02:00
ed
9bfbf80a0e ui: fix navpane covering files on horizontal scroll 2022-08-02 20:48:26 +02:00
ed
f874d7754f ui: toggle sorting folders before files (default-on) 2022-08-02 20:47:17 +02:00
ed
a669f79480 windows upload perf (fat32, smb) 2022-08-02 20:39:51 +02:00
ed
1c3894743a fix filekeys inside symlinked volumes 2022-08-02 20:26:51 +02:00
ed
75cdf17df4 cache sparsefile-support on windows too 2022-08-02 06:58:25 +02:00
ed
de7dd1e60a more visible upload errors on mobile 2022-08-02 06:17:13 +02:00
ed
0ee574a718 forget uploads that failed to initialize 2022-08-02 06:15:18 +02:00
ed
faac894706 oh 2022-07-29 00:13:18 +02:00
ed
dac2fad48e v1.3.8 2022-07-27 16:07:26 +02:00
ed
77f624b01e improve shumantime + use it everywhere 2022-07-27 15:07:04 +02:00
ed
e24ffebfc8 indicate write-activity on splashpage 2022-07-27 14:53:15 +02:00
ed
70d07d1609 perf 2022-07-27 14:01:30 +02:00
ed
bfb3303d87 include client total ETA in upload logs 2022-07-27 12:07:51 +02:00
ed
660705a436 defer volume reindexing on db activity 2022-07-27 11:48:47 +02:00
ed
74a3f97671 cleanup + bump deps 2022-07-27 00:15:49 +02:00
ed
b3e35bb494 async lsof w/ timeout 2022-07-26 22:38:13 +02:00
ed
76adac7c72 up2k-hook-ytid: add mp4/webm/mkv metadata scanner 2022-07-26 22:09:18 +02:00
ed
5dc75ebb67 async e2ts / e2v + forget deleted shadowed 2022-07-26 12:47:40 +02:00
ed
d686ce12b6 lsof db on stuck transaction 2022-07-25 02:07:59 +02:00
ed
d3c40a423e mutagen: support nullduration tags 2022-07-25 01:21:34 +02:00
ed
2fb1e6dab8 mute exception on zip abort 2022-07-25 01:20:38 +02:00
ed
10430b347f fix dumb prisonparty bug 2022-07-22 20:49:35 +02:00
ed
e0e3f6ac3e up2k-hook-ytid: add override 2022-07-22 10:47:10 +02:00
ed
c694cbffdc a11y: improve skip-to-files 2022-07-20 23:44:57 +02:00
ed
bdd0e5d771 a11y: enter = onclick 2022-07-20 23:32:02 +02:00
ed
aa98e427f0 audio-eq: add crossfeed 2022-07-20 01:54:59 +02:00
ed
daa6f4c94c add video hotkeys for digit-seeking 2022-07-17 23:45:02 +02:00
ed
4a76663fb2 ensure free disk space 2022-07-17 22:33:08 +02:00
ed
cebda5028a v1.3.7 2022-07-16 20:48:23 +02:00
ed
3fa377a580 sqlite diag 2022-07-16 20:43:26 +02:00
38 changed files with 1530 additions and 575 deletions

13
.gitignore vendored
View File

@@ -5,13 +5,16 @@ __pycache__/
MANIFEST.in MANIFEST.in
MANIFEST MANIFEST
copyparty.egg-info/ copyparty.egg-info/
buildenv/
build/
dist/
sfx/
py2/
.venv/ .venv/
/buildenv/
/build/
/dist/
/py2/
/sfx/
/unt/
/log/
# ide # ide
*.sublime-workspace *.sublime-workspace

View File

@@ -56,8 +56,11 @@ try the **[read-only demo server](https://a.ocv.me/pub/demo/)** 👀 running fro
* [searching](#searching) - search by size, date, path/name, mp3-tags, ... * [searching](#searching) - search by size, date, path/name, mp3-tags, ...
* [server config](#server-config) - using arguments or config files, or a mix of both * [server config](#server-config) - using arguments or config files, or a mix of both
* [ftp-server](#ftp-server) - an FTP server can be started using `--ftp 3921` * [ftp-server](#ftp-server) - an FTP server can be started using `--ftp 3921`
* [file indexing](#file-indexing) * [file indexing](#file-indexing) - enables dedup and music search ++
* [upload rules](#upload-rules) - set upload rules using volume flags * [exclude-patterns](#exclude-patterns) - to save some time
* [filesystem guards](#filesystem-guards) - avoid traversing into other filesystems
* [periodic rescan](#periodic-rescan) - filesystem monitoring
* [upload rules](#upload-rules) - set upload rules using volflags
* [compress uploads](#compress-uploads) - files can be autocompressed on upload * [compress uploads](#compress-uploads) - files can be autocompressed on upload
* [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else * [database location](#database-location) - in-volume (`.hist/up2k.db`, default) or somewhere else
* [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload * [metadata from audio files](#metadata-from-audio-files) - set `-e2t` to index tags on upload
@@ -309,7 +312,7 @@ examples:
* `u1` can open the `inc` folder, but cannot see the contents, only upload new files to it * `u1` can open the `inc` folder, but cannot see the contents, only upload new files to it
* `u2` can browse it and move files *from* `/inc` into any folder where `u2` has write-access * `u2` can browse it and move files *from* `/inc` into any folder where `u2` has write-access
* make folder `/mnt/ss` available at `/i`, read-write for u1, get-only for everyone else, and enable accesskeys: `-v /mnt/ss:i:rw,u1:g:c,fk=4` * make folder `/mnt/ss` available at `/i`, read-write for u1, get-only for everyone else, and enable accesskeys: `-v /mnt/ss:i:rw,u1:g:c,fk=4`
* `c,fk=4` sets the `fk` volume-flag to 4, meaning each file gets a 4-character accesskey * `c,fk=4` sets the `fk` volflag to 4, meaning each file gets a 4-character accesskey
* `u1` can upload files, browse the folder, and see the generated accesskeys * `u1` can upload files, browse the folder, and see the generated accesskeys
* other users cannot browse the folder, but can access the files if they have the full file URL with the accesskey * other users cannot browse the folder, but can access the files if they have the full file URL with the accesskey
@@ -373,6 +376,7 @@ the browser has the following hotkeys (always qwerty)
* `Esc` close viewer * `Esc` close viewer
* videos: * videos:
* `U/O` skip 10sec back/forward * `U/O` skip 10sec back/forward
* `0..9` jump to 0%..90%
* `P/K/Space` play/pause * `P/K/Space` play/pause
* `M` mute * `M` mute
* `C` continue playing next video * `C` continue playing next video
@@ -655,7 +659,9 @@ an FTP server can be started using `--ftp 3921`, and/or `--ftps` for explicit T
## file indexing ## file indexing
file indexing relies on two database tables, the up2k filetree (`-e2d`) and the metadata tags (`-e2t`), stored in `.hist/up2k.db`. Configuration can be done through arguments, volume flags, or a mix of both. enables dedup and music search ++
file indexing relies on two database tables, the up2k filetree (`-e2d`) and the metadata tags (`-e2t`), stored in `.hist/up2k.db`. Configuration can be done through arguments, volflags, or a mix of both.
through arguments: through arguments:
* `-e2d` enables file indexing on upload * `-e2d` enables file indexing on upload
@@ -668,7 +674,7 @@ through arguments:
* `-e2vu` patches the database with the new hashes from the filesystem * `-e2vu` patches the database with the new hashes from the filesystem
* `-e2vp` panics and kills copyparty instead * `-e2vp` panics and kills copyparty instead
the same arguments can be set as volume flags, in addition to `d2d`, `d2ds`, `d2t`, `d2ts`, `d2v` for disabling: the same arguments can be set as volflags, in addition to `d2d`, `d2ds`, `d2t`, `d2ts`, `d2v` for disabling:
* `-v ~/music::r:c,e2dsa,e2tsr` does a full reindex of everything on startup * `-v ~/music::r:c,e2dsa,e2tsr` does a full reindex of everything on startup
* `-v ~/music::r:c,d2d` disables **all** indexing, even if any `-e2*` are on * `-v ~/music::r:c,d2d` disables **all** indexing, even if any `-e2*` are on
* `-v ~/music::r:c,d2t` disables all `-e2t*` (tags), does not affect `-e2d*` * `-v ~/music::r:c,d2t` disables all `-e2t*` (tags), does not affect `-e2d*`
@@ -680,7 +686,9 @@ note:
* `e2tsr` is probably always overkill, since `e2ds`/`e2dsa` would pick up any file modifications and `e2ts` would then reindex those, unless there is a new copyparty version with new parsers and the release note says otherwise * `e2tsr` is probably always overkill, since `e2ds`/`e2dsa` would pick up any file modifications and `e2ts` would then reindex those, unless there is a new copyparty version with new parsers and the release note says otherwise
* the rescan button in the admin panel has no effect unless the volume has `-e2ds` or higher * the rescan button in the admin panel has no effect unless the volume has `-e2ds` or higher
to save some time, you can provide a regex pattern for filepaths to only index by filename/path/size/last-modified (and not the hash of the file contents) by setting `--no-hash \.iso$` or the volume-flag `:c,nohash=\.iso$`, this has the following consequences: ### exclude-patterns
to save some time, you can provide a regex pattern for filepaths to only index by filename/path/size/last-modified (and not the hash of the file contents) by setting `--no-hash \.iso$` or the volflag `:c,nohash=\.iso$`, this has the following consequences:
* initial indexing is way faster, especially when the volume is on a network disk * initial indexing is way faster, especially when the volume is on a network disk
* makes it impossible to [file-search](#file-search) * makes it impossible to [file-search](#file-search)
* if someone uploads the same file contents, the upload will not be detected as a dupe, so it will not get symlinked or rejected * if someone uploads the same file contents, the upload will not be detected as a dupe, so it will not get symlinked or rejected
@@ -689,12 +697,29 @@ similarly, you can fully ignore files/folders using `--no-idx [...]` and `:c,noi
if you set `--no-hash [...]` globally, you can enable hashing for specific volumes using flag `:c,nohash=` if you set `--no-hash [...]` globally, you can enable hashing for specific volumes using flag `:c,nohash=`
### filesystem guards
avoid traversing into other filesystems using `--xdev` / volflag `:c,xdev`, skipping any symlinks or bind-mounts to another HDD for example
and/or you can `--xvol` / `:c,xvol` to ignore all symlinks leaving the volume's top directory, but still allow bind-mounts pointing elsewhere
**NB: only affects the indexer** -- users can still access anything inside a volume, unless shadowed by another volume
### periodic rescan
filesystem monitoring; if copyparty is not the only software doing stuff on your filesystem, you may want to enable periodic rescans to keep the index up to date
argument `--re-maxage 60` will rescan all volumes every 60 sec, same as volflag `:c,scan=60` to specify it per-volume
uploads are disabled while a rescan is happening, so rescans will be delayed by `--db-act` (default 10 sec) when there is write-activity going on (uploads, renames, ...)
## upload rules ## upload rules
set upload rules using volume flags, some examples: set upload rules using volflags, some examples:
* `:c,sz=1k-3m` sets allowed filesize between 1 KiB and 3 MiB inclusive (suffixes: `b`, `k`, `m`, `g`) * `:c,sz=1k-3m` sets allowed filesize between 1 KiB and 3 MiB inclusive (suffixes: `b`, `k`, `m`, `g`)
* `:c,df=4g` block uploads if there would be less than 4 GiB free disk space afterwards
* `:c,nosub` disallow uploading into subdirectories; goes well with `rotn` and `rotf`: * `:c,nosub` disallow uploading into subdirectories; goes well with `rotn` and `rotf`:
* `:c,rotn=1000,2` moves uploads into subfolders, up to 1000 files in each folder before making a new one, two levels deep (must be at least 1) * `:c,rotn=1000,2` moves uploads into subfolders, up to 1000 files in each folder before making a new one, two levels deep (must be at least 1)
* `:c,rotf=%Y/%m/%d/%H` enforces files to be uploaded into a structure of subfolders according to that date format * `:c,rotf=%Y/%m/%d/%H` enforces files to be uploaded into a structure of subfolders according to that date format
@@ -713,16 +738,16 @@ you can also set transaction limits which apply per-IP and per-volume, but these
files can be autocompressed on upload, either on user-request (if config allows) or forced by server-config files can be autocompressed on upload, either on user-request (if config allows) or forced by server-config
* volume flag `gz` allows gz compression * volflag `gz` allows gz compression
* volume flag `xz` allows lzma compression * volflag `xz` allows lzma compression
* volume flag `pk` **forces** compression on all files * volflag `pk` **forces** compression on all files
* url parameter `pk` requests compression with server-default algorithm * url parameter `pk` requests compression with server-default algorithm
* url parameter `gz` or `xz` requests compression with a specific algorithm * url parameter `gz` or `xz` requests compression with a specific algorithm
* url parameter `xz` requests xz compression * url parameter `xz` requests xz compression
things to note, things to note,
* the `gz` and `xz` arguments take a single optional argument, the compression level (range 0 to 9) * the `gz` and `xz` arguments take a single optional argument, the compression level (range 0 to 9)
* the `pk` volume flag takes the optional argument `ALGORITHM,LEVEL` which will then be forced for all uploads, for example `gz,9` or `xz,0` * the `pk` volflag takes the optional argument `ALGORITHM,LEVEL` which will then be forced for all uploads, for example `gz,9` or `xz,0`
* default compression is gzip level 9 * default compression is gzip level 9
* all upload methods except up2k are supported * all upload methods except up2k are supported
* the files will be indexed after compression, so dupe-detection and file-search will not work as expected * the files will be indexed after compression, so dupe-detection and file-search will not work as expected
@@ -742,7 +767,7 @@ in-volume (`.hist/up2k.db`, default) or somewhere else
copyparty creates a subfolder named `.hist` inside each volume where it stores the database, thumbnails, and some other stuff copyparty creates a subfolder named `.hist` inside each volume where it stores the database, thumbnails, and some other stuff
this can instead be kept in a single place using the `--hist` argument, or the `hist=` volume flag, or a mix of both: this can instead be kept in a single place using the `--hist` argument, or the `hist=` volflag, or a mix of both:
* `--hist ~/.cache/copyparty -v ~/music::r:c,hist=-` sets `~/.cache/copyparty` as the default place to put volume info, but `~/music` gets the regular `.hist` subfolder (`-` restores default behavior) * `--hist ~/.cache/copyparty -v ~/music::r:c,hist=-` sets `~/.cache/copyparty` as the default place to put volume info, but `~/music` gets the regular `.hist` subfolder (`-` restores default behavior)
note: note:
@@ -780,7 +805,7 @@ see the beautiful mess of a dictionary in [mtag.py](https://github.com/9001/copy
provide custom parsers to index additional tags, also see [./bin/mtag/README.md](./bin/mtag/README.md) provide custom parsers to index additional tags, also see [./bin/mtag/README.md](./bin/mtag/README.md)
copyparty can invoke external programs to collect additional metadata for files using `mtp` (either as argument or volume flag), there is a default timeout of 30sec, and only files which contain audio get analyzed by default (see ay/an/ad below) copyparty can invoke external programs to collect additional metadata for files using `mtp` (either as argument or volflag), there is a default timeout of 30sec, and only files which contain audio get analyzed by default (see ay/an/ad below)
* `-mtp .bpm=~/bin/audio-bpm.py` will execute `~/bin/audio-bpm.py` with the audio file as argument 1 to provide the `.bpm` tag, if that does not exist in the audio metadata * `-mtp .bpm=~/bin/audio-bpm.py` will execute `~/bin/audio-bpm.py` with the audio file as argument 1 to provide the `.bpm` tag, if that does not exist in the audio metadata
* `-mtp key=f,t5,~/bin/audio-key.py` uses `~/bin/audio-key.py` to get the `key` tag, replacing any existing metadata tag (`f,`), aborting if it takes longer than 5sec (`t5,`) * `-mtp key=f,t5,~/bin/audio-key.py` uses `~/bin/audio-key.py` to get the `key` tag, replacing any existing metadata tag (`f,`), aborting if it takes longer than 5sec (`t5,`)
@@ -821,8 +846,8 @@ if this becomes popular maybe there should be a less janky way to do it actually
tell search engines you dont wanna be indexed, either using the good old [robots.txt](https://www.robotstxt.org/robotstxt.html) or through copyparty settings: tell search engines you dont wanna be indexed, either using the good old [robots.txt](https://www.robotstxt.org/robotstxt.html) or through copyparty settings:
* `--no-robots` adds HTTP (`X-Robots-Tag`) and HTML (`<meta>`) headers with `noindex, nofollow` globally * `--no-robots` adds HTTP (`X-Robots-Tag`) and HTML (`<meta>`) headers with `noindex, nofollow` globally
* volume-flag `[...]:c,norobots` does the same thing for that single volume * volflag `[...]:c,norobots` does the same thing for that single volume
* volume-flag `[...]:c,robots` ALLOWS search-engine crawling for that volume, even if `--no-robots` is set globally * volflag `[...]:c,robots` ALLOWS search-engine crawling for that volume, even if `--no-robots` is set globally
also, `--force-js` disables the plain HTML folder listing, making things harder to parse for search engines also, `--force-js` disables the plain HTML folder listing, making things harder to parse for search engines
@@ -969,10 +994,10 @@ quick outline of the up2k protocol, see [uploading](#uploading) for the web-clie
up2k has saved a few uploads from becoming corrupted in-transfer already; caught an android phone on wifi redhanded in wireshark with a bitflip, however bup with https would *probably* have noticed as well (thanks to tls also functioning as an integrity check) up2k has saved a few uploads from becoming corrupted in-transfer already; caught an android phone on wifi redhanded in wireshark with a bitflip, however bup with https would *probably* have noticed as well (thanks to tls also functioning as an integrity check)
regarding the frequent server log message during uploads; regarding the frequent server log message during uploads;
`6.0M 106M/s 2.77G 102.9M/s n948 thank 4/0/3/1 10042/7198` `6.0M 106M/s 2.77G 102.9M/s n948 thank 4/0/3/1 10042/7198 00:01:09`
* this chunk was `6 MiB`, uploaded at `106 MiB/s` * this chunk was `6 MiB`, uploaded at `106 MiB/s`
* on this http connection, `2.77 GiB` transferred, `102.9 MiB/s` average, `948` chunks handled * on this http connection, `2.77 GiB` transferred, `102.9 MiB/s` average, `948` chunks handled
* client says `4` uploads OK, `0` failed, `3` busy, `1` queued, `10042 MiB` total size, `7198 MiB` left * client says `4` uploads OK, `0` failed, `3` busy, `1` queued, `10042 MiB` total size, `7198 MiB` and `00:01:09` left
## why chunk-hashes ## why chunk-hashes
@@ -1045,7 +1070,7 @@ some notes on hardening
other misc notes: other misc notes:
* you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files * you can disable directory listings by giving permission `g` instead of `r`, only accepting direct URLs to files
* combine this with volume-flag `c,fk` to generate per-file accesskeys; users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404 * combine this with volflag `c,fk` to generate per-file accesskeys; users which have full read-access will then see URLs with `?k=...` appended to the end, and `g` users must provide that URL including the correct key to avoid a 404
## gotchas ## gotchas
@@ -1238,11 +1263,15 @@ if you want thumbnails, `apt -y install ffmpeg`
ideas for context to include in bug reports ideas for context to include in bug reports
in general, commandline arguments (and config file if any)
if something broke during an upload (replacing FILENAME with a part of the filename that broke): if something broke during an upload (replacing FILENAME with a part of the filename that broke):
``` ```
journalctl -aS '48 hour ago' -u copyparty | grep -C10 FILENAME | tee bug.log journalctl -aS '48 hour ago' -u copyparty | grep -C10 FILENAME | tee bug.log
``` ```
if there's a wall of base64 in the log (thread stacks) then please include that, especially if you run into something freezing up or getting stuck, for example `OperationalError('database is locked')` -- alternatively you can visit `/?stack` to see the stacks live, so http://127.0.0.1:3923/?stack for example
# building # building

View File

@@ -42,7 +42,7 @@ run [`install-deps.sh`](install-deps.sh) to build/install most dependencies requ
* `mtp` modules will not run if a file has existing tags in the db, so clear out the tags with `-e2tsr` the first time you launch with new `mtp` options * `mtp` modules will not run if a file has existing tags in the db, so clear out the tags with `-e2tsr` the first time you launch with new `mtp` options
## usage with volume-flags ## usage with volflags
instead of affecting all volumes, you can set the options for just one volume like so: instead of affecting all volumes, you can set the options for just one volume like so:

View File

@@ -11,13 +11,13 @@ sysdirs=( /bin /lib /lib32 /lib64 /sbin /usr )
help() { cat <<'EOF' help() { cat <<'EOF'
usage: usage:
./prisonparty.sh <ROOTDIR> <UID> <GID> [VOLDIR [VOLDIR...]] -- python3 copyparty-sfx.py [...]" ./prisonparty.sh <ROOTDIR> <UID> <GID> [VOLDIR [VOLDIR...]] -- python3 copyparty-sfx.py [...]
example: example:
./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 copyparty-sfx.py -v /mnt/nas/music::rwmd" ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 copyparty-sfx.py -v /mnt/nas/music::rwmd
example for running straight from source (instead of using an sfx): example for running straight from source (instead of using an sfx):
PYTHONPATH=$PWD ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 -um copyparty -v /mnt/nas/music::rwmd" PYTHONPATH=$PWD ./prisonparty.sh /var/lib/copyparty-jail 1000 1000 /mnt/nas/music -- python3 -um copyparty -v /mnt/nas/music::rwmd
note that if you have python modules installed as --user (such as bpm/key detectors), note that if you have python modules installed as --user (such as bpm/key detectors),
you should add /home/foo/.local as a VOLDIR you should add /home/foo/.local as a VOLDIR

View File

@@ -2,29 +2,140 @@
// assumes all files dropped into the uploader have a youtube-id somewhere in the filename, // assumes all files dropped into the uploader have a youtube-id somewhere in the filename,
// locates the youtube-ids and passes them to an API which returns a list of IDs which should be uploaded // locates the youtube-ids and passes them to an API which returns a list of IDs which should be uploaded
// //
// also tries to find the youtube-id in the embedded metadata
//
// assumes copyparty is behind nginx as /ytq is a standalone service which must be rproxied in place // assumes copyparty is behind nginx as /ytq is a standalone service which must be rproxied in place
function up2k_namefilter(good_files, nil_files, bad_files, hooks) { function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var filenames = [], var passthru = up2k.uc.fsearch;
file_lists = [good_files, nil_files, bad_files]; if (passthru)
return hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
for (var lst of file_lists) a_up2k_namefilter(good_files, nil_files, bad_files, hooks).then(() => { });
for (var ent of lst) }
filenames.push(ent[1]);
function bstrpos(buf, ptn) {
var ofs = 0,
ch0 = ptn[0],
sz = buf.byteLength;
var yt_ids = new Set();
for (var lst of file_lists)
for (var ent of lst) {
var m, name = ent[1];
while (true) { while (true) {
ofs = buf.indexOf(ch0, ofs);
if (ofs < 0 || ofs >= sz)
return -1;
for (var a = 1; a < ptn.length; a++)
if (buf[ofs + a] !== ptn[a])
break;
if (a === ptn.length)
return ofs;
++ofs;
}
}
async function a_up2k_namefilter(good_files, nil_files, bad_files, hooks) {
var t0 = Date.now(),
yt_ids = new Set(),
textdec = new TextDecoder('latin1'),
md_ptn = new TextEncoder().encode('youtube.com/watch?v='),
file_ids = [], // all IDs found for each good_files
mofs = 0,
mnchk = 0,
mfile = '';
for (var a = 0; a < good_files.length; a++) {
var [fobj, name] = good_files[a],
sz = fobj.size,
ids = [],
id_ok = false,
m;
// all IDs found in this file
file_ids.push(ids);
// look for ID in filename; reduce the
// metadata-scan intensity if the id looks safe
m = /[\[(-]([\w-]{11})[\])]?\.(?:mp4|webm|mkv)$/i.exec(name);
id_ok = !!m;
while (true) {
// fuzzy catch-all;
// some ytdl fork did %(title)-%(id).%(ext) ... // some ytdl fork did %(title)-%(id).%(ext) ...
m = /(?:^|[^\w])([\w-]{11})(?:$|[^\w-])/.exec(name); m = /(?:^|[^\w])([\w-]{11})(?:$|[^\w-])/.exec(name);
if (!m) if (!m)
break; break;
yt_ids.add(m[1]);
name = name.replace(m[1], ''); name = name.replace(m[1], '');
yt_ids.add(m[1]);
ids.push(m[1]);
} }
// look for IDs in video metadata,
if (/\.(mp4|webm|mkv)$/i.exec(name)) {
toast.show('inf r', 0, `analyzing file ${a + 1} / ${good_files.length} :\n${name}\n\nhave analysed ${++mnchk} files in ${(Date.now() - t0) / 1000} seconds, ${humantime((good_files.length - (a + 1)) * (((Date.now() - t0) / 1000) / mnchk))} remaining,\n\nbiggest offset so far is ${mofs}, in this file:\n\n${mfile}`);
// check first and last 128 MiB;
// pWxOroN5WCo.mkv @ 6edb98 (6.92M)
// Nf-nN1wF5Xo.mp4 @ 4a98034 (74.6M)
var chunksz = 1024 * 1024 * 2, // byte
aspan = id_ok ? 128 : 512; // MiB
aspan = parseInt(Math.min(sz / 2, aspan * 1024 * 1024) / chunksz) * chunksz;
for (var side = 0; side < 2; side++) {
var ofs = side ? Math.max(0, sz - aspan) : 0,
nchunks = aspan / chunksz;
for (var chunk = 0; chunk < nchunks; chunk++) {
var bchunk = await fobj.slice(ofs, ofs + chunksz + 16).arrayBuffer(),
uchunk = new Uint8Array(bchunk, 0, bchunk.byteLength),
bofs = bstrpos(uchunk, md_ptn),
absofs = Math.min(ofs + bofs, (sz - ofs) + bofs),
txt = bofs < 0 ? '' : textdec.decode(uchunk.subarray(bofs)),
m;
//console.log(`side ${ side }, chunk ${ chunk }, ofs ${ ofs }, bchunk ${ bchunk.byteLength }, txt ${ txt.length }`);
while (true) {
// mkv/webm have [a-z] immediately after url
m = /(youtube\.com\/watch\?v=[\w-]{11})/.exec(txt);
if (!m)
break;
txt = txt.replace(m[1], '');
m = m[1].slice(-11);
console.log(`found ${m} @${bofs}, ${name} `);
yt_ids.add(m);
if (!has(ids, m))
ids.push(m);
// bail after next iteration
chunk = nchunks - 1;
side = 9;
if (mofs < absofs) {
mofs = absofs;
mfile = name;
}
}
ofs += chunksz;
if (ofs >= sz)
break;
}
}
}
}
if (false) {
var msg = `finished analysing ${mnchk} files in ${(Date.now() - t0) / 1000} seconds,\n\nbiggest offset was ${mofs} in this file:\n\n${mfile}`,
mfun = function () { toast.ok(0, msg); };
mfun();
setTimeout(mfun, 200);
return hooks[0]([], [], [], hooks.slice(1));
} }
toast.inf(5, `running query for ${yt_ids.size} videos...`); toast.inf(5, `running query for ${yt_ids.size} videos...`);
@@ -36,34 +147,61 @@ function up2k_namefilter(good_files, nil_files, bad_files, hooks) {
if (this.status != 200) if (this.status != 200)
return toast.err(0, `sorry, database query failed ;_;\n\nplease let us know so we can look at it, thx!!\n\nerror ${this.status}: ${(this.response && this.response.err) || this.responseText}`); return toast.err(0, `sorry, database query failed ;_;\n\nplease let us know so we can look at it, thx!!\n\nerror ${this.status}: ${(this.response && this.response.err) || this.responseText}`);
var new_lists = [], process_id_list(this.responseText);
ptn = new RegExp(this.responseText.trim().split('\n').join('|') || '\n'),
nothing_to_do = true,
n_skip = 0;
for (var lst of file_lists) {
var keep = [];
new_lists.push(keep);
for (var ent of lst)
if (ptn.exec(ent[1]))
keep.push(ent);
else
n_skip++;
if (keep.length)
nothing_to_do = false;
}
if (nothing_to_do)
return modal.alert('Good news -- turns out we already have all those videos.\n\nBut thank you for checking in!');
else if (n_skip)
toast.inf(0, `skipped ${n_skip} files which already exist on the server`);
[good_files, nil_files, bad_files] = new_lists;
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}; };
xhr.send(Array.from(yt_ids).join('\n')); xhr.send(Array.from(yt_ids).join('\n'));
function process_id_list(txt) {
var wanted_ids = new Set(txt.trim().split('\n')),
wanted_names = new Set(), // basenames with a wanted ID
wanted_files = new Set(); // filedrops
for (var a = 0; a < good_files.length; a++) {
var name = good_files[a][1];
for (var b = 0; b < file_ids[a].length; b++)
if (wanted_ids.has(file_ids[a][b])) {
wanted_files.add(good_files[a]);
var m = /(.*)\.(mp4|webm|mkv)$/i.exec(name);
if (m)
wanted_names.add(m[1]);
break;
}
}
// add all files with the same basename as each explicitly wanted file
// (infojson/chatlog/etc when ID was discovered from metadata)
for (var a = 0; a < good_files.length; a++) {
var name = good_files[a][1];
for (var b = 0; b < 3; b++) {
name = name.replace(/\.[^\.]+$/, '');
if (wanted_names.has(name)) {
wanted_files.add(good_files[a]);
break;
}
}
}
function upload_filtered() {
if (!wanted_files.size)
return modal.alert('Good news -- turns out we already have all those.\n\nBut thank you for checking in!');
hooks[0](Array.from(wanted_files), nil_files, bad_files, hooks.slice(1));
}
function upload_all() {
hooks[0](good_files, nil_files, bad_files, hooks.slice(1));
}
var n_skip = good_files.length - wanted_files.size,
msg = `you added ${good_files.length} files; ${good_files.length == n_skip ? 'all' : n_skip} of them were skipped --\neither because we already have them,\nor because there is no youtube-ID in your filenames.\n\n<code>OK</code> / <code>Enter</code> = continue uploading just the ${wanted_files.size} files we definitely need\n\n<code>Cancel</code> / <code>ESC</code> = override the filter; upload ALL the files you added`;
if (!n_skip)
upload_filtered();
else
modal.confirm(msg, upload_filtered, upload_all);
};
} }
up2k_hooks.push(function () { up2k_hooks.push(function () {

View File

@@ -24,7 +24,18 @@ from .__init__ import ANYWIN, PY2, VT100, WINDOWS, E, unicode
from .__version__ import CODENAME, S_BUILD_DT, S_VERSION from .__version__ import CODENAME, S_BUILD_DT, S_VERSION
from .authsrv import re_vol from .authsrv import re_vol
from .svchub import SvcHub from .svchub import SvcHub
from .util import IMPLICATIONS, align_tab, ansi_re, min_ex, py_desc, termsize, wrap from .util import (
IMPLICATIONS,
JINJA_VER,
PYFTPD_VER,
SQLITE_VER,
align_tab,
ansi_re,
min_ex,
py_desc,
termsize,
wrap,
)
try: try:
from types import FrameType from types import FrameType
@@ -382,6 +393,7 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
\033[36mmaxn=250,600\033[35m max 250 uploads over 15min \033[36mmaxn=250,600\033[35m max 250 uploads over 15min
\033[36mmaxb=1g,300\033[35m max 1 GiB over 5min (suffixes: b, k, m, g) \033[36mmaxb=1g,300\033[35m max 1 GiB over 5min (suffixes: b, k, m, g)
\033[36msz=1k-3m\033[35m allow filesizes between 1 KiB and 3MiB \033[36msz=1k-3m\033[35m allow filesizes between 1 KiB and 3MiB
\033[36mdf=1g\033[35m ensure 1 GiB free disk space
\033[0mupload rotation: \033[0mupload rotation:
(moves all uploads into the specified folder structure) (moves all uploads into the specified folder structure)
@@ -396,10 +408,12 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
\033[36md2t\033[35m disables metadata collection, overrides -e2t* \033[36md2t\033[35m disables metadata collection, overrides -e2t*
\033[36md2v\033[35m disables file verification, overrides -e2v* \033[36md2v\033[35m disables file verification, overrides -e2v*
\033[36md2d\033[35m disables all database stuff, overrides -e2* \033[36md2d\033[35m disables all database stuff, overrides -e2*
\033[36mnohash=\\.iso$\033[35m skips hashing file contents if path matches *.iso
\033[36mnoidx=\\.iso$\033[35m fully ignores the contents at paths matching *.iso
\033[36mhist=/tmp/cdb\033[35m puts thumbnails and indexes at that location \033[36mhist=/tmp/cdb\033[35m puts thumbnails and indexes at that location
\033[36mscan=60\033[35m scan for new files every 60sec, same as --re-maxage \033[36mscan=60\033[35m scan for new files every 60sec, same as --re-maxage
\033[36mnohash=\\.iso$\033[35m skips hashing file contents if path matches *.iso
\033[36mnoidx=\\.iso$\033[35m fully ignores the contents at paths matching *.iso
\033[36mxdev\033[35m do not descend into other filesystems
\033[36mxvol\033[35m skip symlinks leaving the volume root
\033[0mdatabase, audio tags: \033[0mdatabase, audio tags:
"mte", "mth", "mtp", "mtm" all work the same as -mte, -mth, ... "mte", "mth", "mtp", "mtm" all work the same as -mte, -mth, ...
@@ -482,6 +496,7 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)") ap2.add_argument("--hardlink", action="store_true", help="prefer hardlinks instead of symlinks when possible (within same filesystem)")
ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made") ap2.add_argument("--never-symlink", action="store_true", help="do not fallback to symlinks when a hardlink cannot be made")
ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead") ap2.add_argument("--no-dedup", action="store_true", help="disable symlink/hardlink creation; copy file contents instead")
ap2.add_argument("--df", metavar="GiB", type=float, default=0, help="ensure GiB free disk space by rejecting upload requests")
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files") ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; 0 = off and warn if enabled, 1 = off, 2 = on, 3 = on and disable datecheck") ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; 0 = off and warn if enabled, 1 = off, 2 = on, 3 = on and disable datecheck")
ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; s=smallest-first, n=alphabetical, fs=force-s, fn=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine") ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; s=smallest-first, n=alphabetical, fs=force-s, fn=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine")
@@ -593,7 +608,10 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
ap2.add_argument("--hist", metavar="PATH", type=u, help="where to store volume data (db, thumbs)") ap2.add_argument("--hist", metavar="PATH", type=u, help="where to store volume data (db, thumbs)")
ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans") ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans")
ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans") ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans")
ap2.add_argument("--xdev", action="store_true", help="do not descend into other filesystems (symlink or bind-mount to another HDD, ...)")
ap2.add_argument("--xvol", action="store_true", help="skip symlinks leaving the volume root")
ap2.add_argument("--re-maxage", metavar="SEC", type=int, default=0, help="disk rescan volume interval, 0=off, can be set per-volume with the 'scan' volflag") ap2.add_argument("--re-maxage", metavar="SEC", type=int, default=0, help="disk rescan volume interval, 0=off, can be set per-volume with the 'scan' volflag")
ap2.add_argument("--db-act", metavar="SEC", type=float, default=10, help="defer any scheduled volume reindexing until SEC seconds after last db write (uploads, renames, ...)")
ap2.add_argument("--srch-time", metavar="SEC", type=int, default=30, help="search deadline -- terminate searches running for more than SEC seconds") ap2.add_argument("--srch-time", metavar="SEC", type=int, default=30, help="search deadline -- terminate searches running for more than SEC seconds")
ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially") ap2.add_argument("--srch-hits", metavar="N", type=int, default=7999, help="max search results to allow clients to fetch; 125 results will be shown initially")
@@ -631,6 +649,7 @@ def run_argparse(argv: list[str], formatter: Any, retry: bool) -> argparse.Names
ap2.add_argument("--no-htp", action="store_true", help="disable httpserver threadpool, create threads as-needed instead") ap2.add_argument("--no-htp", action="store_true", help="disable httpserver threadpool, create threads as-needed instead")
ap2.add_argument("--stackmon", metavar="P,S", type=u, help="write stacktrace to Path every S second") ap2.add_argument("--stackmon", metavar="P,S", type=u, help="write stacktrace to Path every S second")
ap2.add_argument("--log-thrs", metavar="SEC", type=float, help="list active threads every SEC") ap2.add_argument("--log-thrs", metavar="SEC", type=float, help="list active threads every SEC")
ap2.add_argument("--log-fk", metavar="REGEX", type=u, default="", help="log filekey params for files where path matches REGEX; '.' (a single dot) = all files")
# fmt: on # fmt: on
ap2 = ap.add_argument_group("help sections") ap2 = ap.add_argument_group("help sections")
@@ -656,10 +675,17 @@ def main(argv: Optional[list[str]] = None) -> None:
if argv is None: if argv is None:
argv = sys.argv argv = sys.argv
desc = py_desc().replace("[", "\033[1;30m[") f = '\033[36mcopyparty v{} "\033[35m{}\033[36m" ({})\n{}\033[0;36m\n sqlite v{} | jinja2 v{} | pyftpd v{}\n\033[0m'
f = f.format(
f = '\033[36mcopyparty v{} "\033[35m{}\033[36m" ({})\n{}\033[0m\n' S_VERSION,
lprint(f.format(S_VERSION, CODENAME, S_BUILD_DT, desc)) CODENAME,
S_BUILD_DT,
py_desc().replace("[", "\033[1;30m["),
SQLITE_VER,
JINJA_VER,
PYFTPD_VER,
)
lprint(f)
ensure_locale() ensure_locale()
if HAVE_SSL: if HAVE_SSL:

View File

@@ -1,8 +1,8 @@
# coding: utf-8 # coding: utf-8
VERSION = (1, 3, 6) VERSION = (1, 3, 10)
CODENAME = "god dag" CODENAME = "god dag"
BUILD_DT = (2022, 7, 16) BUILD_DT = (2022, 8, 4)
S_VERSION = ".".join(map(str, VERSION)) S_VERSION = ".".join(map(str, VERSION))
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT) S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)

View File

@@ -20,6 +20,8 @@ from .util import (
Pebkac, Pebkac,
absreal, absreal,
fsenc, fsenc,
get_df,
humansize,
relchk, relchk,
statdir, statdir,
uncyg, uncyg,
@@ -72,15 +74,23 @@ class AXS(object):
class Lim(object): class Lim(object):
def __init__(self) -> None: def __init__(self, log_func: Optional["RootLogger"]) -> None:
self.log_func = log_func
self.reg: Optional[dict[str, dict[str, Any]]] = None # up2k registry
self.nups: dict[str, list[float]] = {} # num tracker self.nups: dict[str, list[float]] = {} # num tracker
self.bups: dict[str, list[tuple[float, int]]] = {} # byte tracker list self.bups: dict[str, list[tuple[float, int]]] = {} # byte tracker list
self.bupc: dict[str, int] = {} # byte tracker cache self.bupc: dict[str, int] = {} # byte tracker cache
self.nosub = False # disallow subdirectories self.nosub = False # disallow subdirectories
self.smin = -1 # filesize min self.dfl = 0 # free disk space limit
self.smax = -1 # filesize max self.dft = 0 # last-measured time
self.dfv = 0 # currently free
self.smin = 0 # filesize min
self.smax = 0 # filesize max
self.bwin = 0 # bytes window self.bwin = 0 # bytes window
self.bmax = 0 # bytes max self.bmax = 0 # bytes max
@@ -92,18 +102,34 @@ class Lim(object):
self.rotf = "" # rot datefmt self.rotf = "" # rot datefmt
self.rot_re = re.compile("") # rotf check self.rot_re = re.compile("") # rotf check
def log(self, msg: str, c: Union[int, str] = 0) -> None:
if self.log_func:
self.log_func("up-lim", msg, c)
def set_rotf(self, fmt: str) -> None: def set_rotf(self, fmt: str) -> None:
self.rotf = fmt self.rotf = fmt
r = re.escape(fmt).replace("%Y", "[0-9]{4}").replace("%j", "[0-9]{3}") r = re.escape(fmt).replace("%Y", "[0-9]{4}").replace("%j", "[0-9]{3}")
r = re.sub("%[mdHMSWU]", "[0-9]{2}", r) r = re.sub("%[mdHMSWU]", "[0-9]{2}", r)
self.rot_re = re.compile("(^|/)" + r + "$") self.rot_re = re.compile("(^|/)" + r + "$")
def all(self, ip: str, rem: str, sz: float, abspath: str) -> tuple[str, str]: def all(
self,
ip: str,
rem: str,
sz: int,
abspath: str,
reg: Optional[dict[str, dict[str, Any]]] = None,
) -> tuple[str, str]:
if reg is not None and self.reg is None:
self.reg = reg
self.dft = 0
self.chk_nup(ip) self.chk_nup(ip)
self.chk_bup(ip) self.chk_bup(ip)
self.chk_rem(rem) self.chk_rem(rem)
if sz != -1: if sz != -1:
self.chk_sz(sz) self.chk_sz(sz)
self.chk_df(abspath, sz) # side effects; keep last-ish
ap2, vp2 = self.rot(abspath) ap2, vp2 = self.rot(abspath)
if abspath == ap2: if abspath == ap2:
@@ -111,13 +137,33 @@ class Lim(object):
return ap2, ("{}/{}".format(rem, vp2) if rem else vp2) return ap2, ("{}/{}".format(rem, vp2) if rem else vp2)
def chk_sz(self, sz: float) -> None: def chk_sz(self, sz: int) -> None:
if self.smin != -1 and sz < self.smin: if sz < self.smin:
raise Pebkac(400, "file too small") raise Pebkac(400, "file too small")
if self.smax != -1 and sz > self.smax: if self.smax and sz > self.smax:
raise Pebkac(400, "file too big") raise Pebkac(400, "file too big")
def chk_df(self, abspath: str, sz: int, already_written: bool = False) -> None:
if not self.dfl:
return
if self.dft < time.time():
self.dft = int(time.time()) + 300
self.dfv = get_df(abspath)[0] or 0
for j in list(self.reg.values()) if self.reg else []:
self.dfv -= int(j["size"] / len(j["hash"]) * len(j["need"]))
if already_written:
sz = 0
if self.dfv - sz < self.dfl:
self.dft = min(self.dft, int(time.time()) + 10)
t = "server HDD is full; {} free, need {}"
raise Pebkac(500, t.format(humansize(self.dfv - self.dfl), humansize(sz)))
self.dfv -= int(sz)
def chk_rem(self, rem: str) -> None: def chk_rem(self, rem: str) -> None:
if self.nosub and rem: if self.nosub and rem:
raise Pebkac(500, "no subdirectories allowed") raise Pebkac(500, "no subdirectories allowed")
@@ -226,7 +272,7 @@ class VFS(object):
def __init__( def __init__(
self, self,
log: Optional[RootLogger], log: Optional["RootLogger"],
realpath: str, realpath: str,
vpath: str, vpath: str,
axs: AXS, axs: AXS,
@@ -569,7 +615,7 @@ class AuthSrv(object):
def __init__( def __init__(
self, self,
args: argparse.Namespace, args: argparse.Namespace,
log_func: Optional[RootLogger], log_func: Optional["RootLogger"],
warn_anonwrite: bool = True, warn_anonwrite: bool = True,
) -> None: ) -> None:
self.args = args self.args = args
@@ -661,7 +707,7 @@ class AuthSrv(object):
raise Exception('invalid mountpoint "{}"'.format(vol_dst)) raise Exception('invalid mountpoint "{}"'.format(vol_dst))
# cfg files override arguments and previous files # cfg files override arguments and previous files
vol_src = bos.path.abspath(vol_src) vol_src = absreal(vol_src)
vol_dst = vol_dst.strip("/") vol_dst = vol_dst.strip("/")
self._map_volume(vol_src, vol_dst, mount, daxs, mflags) self._map_volume(vol_src, vol_dst, mount, daxs, mflags)
continue continue
@@ -682,12 +728,12 @@ class AuthSrv(object):
self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any] self, lvl: str, uname: str, axs: AXS, flags: dict[str, Any]
) -> None: ) -> None:
if lvl.strip("crwmdg"): if lvl.strip("crwmdg"):
raise Exception("invalid volume flag: {},{}".format(lvl, uname)) raise Exception("invalid volflag: {},{}".format(lvl, uname))
if lvl == "c": if lvl == "c":
cval: Union[bool, str] = True cval: Union[bool, str] = True
try: try:
# volume flag with arguments, possibly with a preceding list of bools # volflag with arguments, possibly with a preceding list of bools
uname, cval = uname.split("=", 1) uname, cval = uname.split("=", 1)
except: except:
# just one or more bools # just one or more bools
@@ -772,7 +818,7 @@ class AuthSrv(object):
src = uncyg(src) src = uncyg(src)
# print("\n".join([src, dst, perms])) # print("\n".join([src, dst, perms]))
src = bos.path.abspath(src) src = absreal(src)
dst = dst.strip("/") dst = dst.strip("/")
self._map_volume(src, dst, mount, daxs, mflags) self._map_volume(src, dst, mount, daxs, mflags)
@@ -801,7 +847,7 @@ class AuthSrv(object):
if not mount: if not mount:
# -h says our defaults are CWD at root and read/write for everyone # -h says our defaults are CWD at root and read/write for everyone
axs = AXS(["*"], ["*"], None, None) axs = AXS(["*"], ["*"], None, None)
vfs = VFS(self.log_func, bos.path.abspath("."), "", axs, {}) vfs = VFS(self.log_func, absreal("."), "", axs, {})
elif "" not in mount: elif "" not in mount:
# there's volumes but no root; make root inaccessible # there's volumes but no root; make root inaccessible
vfs = VFS(self.log_func, "", "", AXS(), {}) vfs = VFS(self.log_func, "", "", AXS(), {})
@@ -917,13 +963,20 @@ class AuthSrv(object):
vfs.histtab = {zv.realpath: zv.histpath for zv in vfs.all_vols.values()} vfs.histtab = {zv.realpath: zv.histpath for zv in vfs.all_vols.values()}
for vol in vfs.all_vols.values(): for vol in vfs.all_vols.values():
lim = Lim() lim = Lim(self.log_func)
use = False use = False
if vol.flags.get("nosub"): if vol.flags.get("nosub"):
use = True use = True
lim.nosub = True lim.nosub = True
zs = vol.flags.get("df") or (
"{}g".format(self.args.df) if self.args.df else ""
)
if zs:
use = True
lim.dfl = unhumanize(zs)
zs = vol.flags.get("sz") zs = vol.flags.get("sz")
if zs: if zs:
use = True use = True
@@ -976,10 +1029,15 @@ class AuthSrv(object):
vol.flags["dathumb"] = True vol.flags["dathumb"] = True
vol.flags["dithumb"] = True vol.flags["dithumb"] = True
have_fk = False
for vol in vfs.all_vols.values(): for vol in vfs.all_vols.values():
fk = vol.flags.get("fk") fk = vol.flags.get("fk")
if fk: if fk:
vol.flags["fk"] = int(fk) if fk is not True else 8 vol.flags["fk"] = int(fk) if fk is not True else 8
have_fk = True
if have_fk and re.match(r"^[0-9\.]+$", self.args.fk_salt):
self.log("filekey salt: {}".format(self.args.fk_salt))
for vol in vfs.all_vols.values(): for vol in vfs.all_vols.values():
if "pk" in vol.flags and "gz" not in vol.flags and "xz" not in vol.flags: if "pk" in vol.flags and "gz" not in vol.flags and "xz" not in vol.flags:
@@ -1008,7 +1066,7 @@ class AuthSrv(object):
if ptn: if ptn:
vol.flags[vf] = re.compile(ptn) vol.flags[vf] = re.compile(ptn)
for k in ["e2t", "e2ts", "e2tsr", "e2v", "e2vu", "e2vp"]: for k in ["e2t", "e2ts", "e2tsr", "e2v", "e2vu", "e2vp", "xdev", "xvol"]:
if getattr(self.args, k): if getattr(self.args, k):
vol.flags[k] = True vol.flags[k] = True
@@ -1026,7 +1084,7 @@ class AuthSrv(object):
if "mth" not in vol.flags: if "mth" not in vol.flags:
vol.flags["mth"] = self.args.mth vol.flags["mth"] = self.args.mth
# append parsers from argv to volume-flags # append parsers from argv to volflags
self._read_volflag(vol.flags, "mtp", self.args.mtp, True) self._read_volflag(vol.flags, "mtp", self.args.mtp, True)
# d2d drops all database features for a volume # d2d drops all database features for a volume
@@ -1089,7 +1147,7 @@ class AuthSrv(object):
for mtp in local_only_mtp: for mtp in local_only_mtp:
if mtp not in local_mte: if mtp not in local_mte:
t = 'volume "/{}" defines metadata tag "{}", but doesnt use it in "-mte" (or with "cmte" in its volume-flags)' t = 'volume "/{}" defines metadata tag "{}", but doesnt use it in "-mte" (or with "cmte" in its volflags)'
self.log(t.format(vol.vpath, mtp), 1) self.log(t.format(vol.vpath, mtp), 1)
errors = True errors = True
@@ -1098,7 +1156,7 @@ class AuthSrv(object):
tags = [y for x in tags for y in x.split(",")] tags = [y for x in tags for y in x.split(",")]
for mtp in tags: for mtp in tags:
if mtp not in all_mte: if mtp not in all_mte:
t = 'metadata tag "{}" is defined by "-mtm" or "-mtp", but is not used by "-mte" (or by any "cmte" volume-flag)' t = 'metadata tag "{}" is defined by "-mtm" or "-mtp", but is not used by "-mte" (or by any "cmte" volflag)'
self.log(t.format(mtp), 1) self.log(t.format(mtp), 1)
errors = True errors = True
@@ -1107,6 +1165,7 @@ class AuthSrv(object):
vfs.bubble_flags() vfs.bubble_flags()
e2vs = []
t = "volumes and permissions:\n" t = "volumes and permissions:\n"
for zv in vfs.all_vols.values(): for zv in vfs.all_vols.values():
if not self.warn_anonwrite: if not self.warn_anonwrite:
@@ -1124,8 +1183,16 @@ class AuthSrv(object):
u = ", ".join("\033[35meverybody\033[0m" if x == "*" else x for x in u) u = ", ".join("\033[35meverybody\033[0m" if x == "*" else x for x in u)
u = u if u else "\033[36m--none--\033[0m" u = u if u else "\033[36m--none--\033[0m"
t += "\n| {}: {}".format(txt, u) t += "\n| {}: {}".format(txt, u)
if "e2v" in zv.flags:
e2vs.append(zv.vpath or "/")
t += "\n" t += "\n"
if e2vs:
t += "\n\033[33me2v enabled for the following volumes;\nuploads will be blocked until scan has finished:\n \033[0m"
t += " ".join(e2vs) + "\n"
if self.warn_anonwrite and not self.args.no_voldump: if self.warn_anonwrite and not self.args.no_voldump:
self.log(t) self.log(t)
@@ -1133,7 +1200,7 @@ class AuthSrv(object):
zv, _ = vfs.get("/", "*", False, True) zv, _ = vfs.get("/", "*", False, True)
if self.warn_anonwrite and os.getcwd() == zv.realpath: if self.warn_anonwrite and os.getcwd() == zv.realpath:
self.warn_anonwrite = False self.warn_anonwrite = False
t = "anyone can read/write the current directory: {}\n" t = "anyone can write to the current directory: {}\n"
self.log(t.format(zv.realpath), c=1) self.log(t.format(zv.realpath), c=1)
except Pebkac: except Pebkac:
self.warn_anonwrite = True self.warn_anonwrite = True

View File

@@ -42,7 +42,7 @@ class BrokerCli(object):
""" """
def __init__(self) -> None: def __init__(self) -> None:
self.log: RootLogger = None self.log: "RootLogger" = None
self.args: argparse.Namespace = None self.args: argparse.Namespace = None
self.asrv: AuthSrv = None self.asrv: AuthSrv = None
self.httpsrv: "HttpSrv" = None self.httpsrv: "HttpSrv" = None

View File

@@ -1,17 +1,17 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import ctypes
import os import os
import re import re
import time import time
from .__init__ import ANYWIN, MACOS from .__init__ import ANYWIN, MACOS
from .authsrv import AXS, VFS from .authsrv import AXS, VFS
from .bos import bos
from .util import chkcmd, min_ex from .util import chkcmd, min_ex
try: try:
from typing import Any, Optional, Union from typing import Optional, Union
from .util import RootLogger from .util import RootLogger
except: except:
@@ -19,7 +19,7 @@ except:
class Fstab(object): class Fstab(object):
def __init__(self, log: RootLogger): def __init__(self, log: "RootLogger"):
self.log_func = log self.log_func = log
self.trusted = False self.trusted = False
@@ -40,13 +40,9 @@ class Fstab(object):
msg = "failed to determine filesystem at [{}]; assuming {}\n{}" msg = "failed to determine filesystem at [{}]; assuming {}\n{}"
if ANYWIN: if ANYWIN:
fs = "vfat" # can smb do sparse files? gonna guess no fs = "vfat"
try: try:
# good enough path = self._winpath(path)
disk = path.split(":", 1)[0]
disk = "{}:\\".format(disk).lower()
assert len(disk) == 3
path = disk
except: except:
self.log(msg.format(path, fs, min_ex()), 3) self.log(msg.format(path, fs, min_ex()), 3)
return fs return fs
@@ -67,6 +63,19 @@ class Fstab(object):
self.log("found {} at {}".format(fs, path)) self.log("found {} at {}".format(fs, path))
return fs return fs
def _winpath(self, path: str) -> str:
# try to combine volume-label + st_dev (vsn)
path = path.replace("/", "\\")
vid = path.split(":", 1)[0].strip("\\").split("\\", 1)[0]
try:
return "{}*{}".format(vid, bos.stat(path).st_dev)
except:
return vid
def build_fallback(self) -> None:
self.tab = VFS(self.log_func, "idk", "/", AXS(), {})
self.trusted = False
def build_tab(self) -> None: def build_tab(self) -> None:
self.log("building tab") self.log("building tab")
@@ -96,6 +105,9 @@ class Fstab(object):
def relabel(self, path: str, nval: str) -> None: def relabel(self, path: str, nval: str) -> None:
assert self.tab assert self.tab
self.cache = {} self.cache = {}
if ANYWIN:
path = self._winpath(path)
path = path.lstrip("/") path = path.lstrip("/")
ptn = re.compile(r"^[^\\/]*") ptn = re.compile(r"^[^\\/]*")
vn, rem = self.tab._find(path) vn, rem = self.tab._find(path)
@@ -124,8 +136,7 @@ class Fstab(object):
except: except:
# prisonparty or other restrictive environment # prisonparty or other restrictive environment
self.log("failed to build tab:\n{}".format(min_ex()), 3) self.log("failed to build tab:\n{}".format(min_ex()), 3)
self.tab = VFS(self.log_func, "idk", "/", AXS(), {}) self.build_fallback()
self.trusted = False
assert self.tab assert self.tab
ret = self.tab._find(path)[0] ret = self.tab._find(path)[0]
@@ -135,43 +146,9 @@ class Fstab(object):
return "idk" return "idk"
def get_w32(self, path: str) -> str: def get_w32(self, path: str) -> str:
# list mountpoints: fsutil fsinfo drives if not self.tab:
self.build_fallback()
from ctypes.wintypes import BOOL, DWORD, LPCWSTR, LPDWORD, LPWSTR, MAX_PATH assert self.tab
ret = self.tab._find(path)[0]
def echk(rc: int, fun: Any, args: Any) -> None: return ret.realpath
if not rc:
raise ctypes.WinError(ctypes.get_last_error())
return None
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
k32.GetVolumeInformationW.errcheck = echk
k32.GetVolumeInformationW.restype = BOOL
k32.GetVolumeInformationW.argtypes = (
LPCWSTR,
LPWSTR,
DWORD,
LPDWORD,
LPDWORD,
LPDWORD,
LPWSTR,
DWORD,
)
bvolname = ctypes.create_unicode_buffer(MAX_PATH + 1)
bfstype = ctypes.create_unicode_buffer(MAX_PATH + 1)
serial = DWORD()
max_name_len = DWORD()
fs_flags = DWORD()
k32.GetVolumeInformationW(
path,
bvolname,
ctypes.sizeof(bvolname),
ctypes.byref(serial),
ctypes.byref(max_name_len),
ctypes.byref(fs_flags),
bfstype,
ctypes.sizeof(bfstype),
)
return bfstype.value

View File

@@ -24,12 +24,7 @@ try:
except: except:
pass pass
try: from .__init__ import ANYWIN, PY2, TYPE_CHECKING, E, unicode
import ctypes
except:
pass
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, WINDOWS, E, unicode
from .authsrv import VFS # typechk from .authsrv import VFS # typechk
from .bos import bos from .bos import bos
from .star import StreamTar from .star import StreamTar
@@ -47,7 +42,9 @@ from .util import (
exclude_dotfiles, exclude_dotfiles,
fsenc, fsenc,
gen_filekey, gen_filekey,
gen_filekey_dbg,
gencookie, gencookie,
get_df,
get_spd, get_spd,
guess_mime, guess_mime,
gzip_orig_sz, gzip_orig_sz,
@@ -112,6 +109,7 @@ class HttpCli(object):
self.u2fh = conn.u2fh # mypy404 self.u2fh = conn.u2fh # mypy404
self.log_func = conn.log_func # mypy404 self.log_func = conn.log_func # mypy404
self.log_src = conn.log_src # mypy404 self.log_src = conn.log_src # mypy404
self.gen_fk = self._gen_fk if self.args.log_fk else gen_filekey
self.tls: bool = hasattr(self.s, "cipher") self.tls: bool = hasattr(self.s, "cipher")
# placeholders; assigned by run() # placeholders; assigned by run()
@@ -181,6 +179,9 @@ class HttpCli(object):
if rem.startswith("/") or rem.startswith("../") or "/../" in rem: if rem.startswith("/") or rem.startswith("../") or "/../" in rem:
raise Exception("that was close") raise Exception("that was close")
def _gen_fk(self, salt: str, fspath: str, fsize: int, inode: int) -> str:
return gen_filekey_dbg(salt, fspath, fsize, inode, self.log, self.args.log_fk)
def j2s(self, name: str, **ka: Any) -> str: def j2s(self, name: str, **ka: Any) -> str:
tpl = self.conn.hsrv.j2[name] tpl = self.conn.hsrv.j2[name]
ka["ts"] = self.conn.hsrv.cachebuster() ka["ts"] = self.conn.hsrv.cachebuster()
@@ -380,13 +381,21 @@ class HttpCli(object):
if not self._check_nonfatal(pex, post): if not self._check_nonfatal(pex, post):
self.keepalive = False self.keepalive = False
msg = str(ex) if pex == ex else min_ex() em = str(ex)
self.log("{}\033[0m, {}".format(msg, self.vpath), 3) msg = em if pex == ex else min_ex()
self.log(
"{}\033[0m, {}".format(msg, self.vpath),
6 if em.startswith("client d/c ") else 3,
)
msg = "{}\r\nURL: {}\r\n".format(str(ex), self.vpath) msg = "{}\r\nURL: {}\r\n".format(em, self.vpath)
if self.hint: if self.hint:
msg += "hint: {}\r\n".format(self.hint) msg += "hint: {}\r\n".format(self.hint)
if "database is locked" in em:
self.conn.hsrv.broker.say("log_stacks")
msg += "hint: important info in the server log\r\n"
msg = "<pre>" + html_escape(msg) msg = "<pre>" + html_escape(msg)
self.reply(msg.encode("utf-8", "replace"), status=pex.code, volsan=True) self.reply(msg.encode("utf-8", "replace"), status=pex.code, volsan=True)
return self.keepalive return self.keepalive
@@ -707,7 +716,7 @@ class HttpCli(object):
reader, remains = self.get_body_reader() reader, remains = self.get_body_reader()
vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True) vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True)
lim = vfs.get_dbv(rem)[0].lim lim = vfs.get_dbv(rem)[0].lim
fdir = os.path.join(vfs.realpath, rem) fdir = vfs.canonical(rem)
if lim: if lim:
fdir, rem = lim.all(self.ip, rem, remains, fdir) fdir, rem = lim.all(self.ip, rem, remains, fdir)
@@ -809,7 +818,7 @@ class HttpCli(object):
vsuf = "" vsuf = ""
if self.can_read and "fk" in vfs.flags: if self.can_read and "fk" in vfs.flags:
vsuf = "?k=" + gen_filekey( vsuf = "?k=" + self.gen_fk(
self.args.fk_salt, self.args.fk_salt,
path, path,
post_sz, post_sz,
@@ -946,7 +955,7 @@ class HttpCli(object):
if rem: if rem:
try: try:
dst = os.path.join(vfs.realpath, rem) dst = vfs.canonical(rem)
if not bos.path.isdir(dst): if not bos.path.isdir(dst):
bos.makedirs(dst) bos.makedirs(dst)
except OSError as ex: except OSError as ex:
@@ -1181,7 +1190,7 @@ class HttpCli(object):
sanitized = sanitize_fn(new_dir, "", []) sanitized = sanitize_fn(new_dir, "", [])
if not nullwrite: if not nullwrite:
fdir = os.path.join(vfs.realpath, rem) fdir = vfs.canonical(rem)
fn = os.path.join(fdir, sanitized) fn = os.path.join(fdir, sanitized)
if not bos.path.isdir(fdir): if not bos.path.isdir(fdir):
@@ -1220,7 +1229,7 @@ class HttpCli(object):
sanitized = sanitize_fn(new_file, "", []) sanitized = sanitize_fn(new_file, "", [])
if not nullwrite: if not nullwrite:
fdir = os.path.join(vfs.realpath, rem) fdir = vfs.canonical(rem)
fn = os.path.join(fdir, sanitized) fn = os.path.join(fdir, sanitized)
if bos.path.exists(fn): if bos.path.exists(fn):
@@ -1241,7 +1250,7 @@ class HttpCli(object):
upload_vpath = self.vpath upload_vpath = self.vpath
lim = vfs.get_dbv(rem)[0].lim lim = vfs.get_dbv(rem)[0].lim
fdir_base = os.path.join(vfs.realpath, rem) fdir_base = vfs.canonical(rem)
if lim: if lim:
fdir_base, rem = lim.all(self.ip, rem, -1, fdir_base) fdir_base, rem = lim.all(self.ip, rem, -1, fdir_base)
upload_vpath = "{}/{}".format(vfs.vpath, rem).strip("/") upload_vpath = "{}/{}".format(vfs.vpath, rem).strip("/")
@@ -1282,14 +1291,19 @@ class HttpCli(object):
else: else:
open_args = {} open_args = {}
tnam = fname = os.devnull tnam = fname = os.devnull
fdir = "" fdir = abspath = ""
if lim: if lim:
lim.chk_bup(self.ip) lim.chk_bup(self.ip)
lim.chk_nup(self.ip) lim.chk_nup(self.ip)
try: try:
max_sz = lim.smax if lim else 0 max_sz = 0
if lim:
v1 = lim.smax
v2 = lim.dfv - lim.dfl
max_sz = min(v1, v2) if v1 and v2 else v1 or v2
with ren_open(tnam, "wb", 512 * 1024, **open_args) as zfw: with ren_open(tnam, "wb", 512 * 1024, **open_args) as zfw:
f, tnam = zfw["orz"] f, tnam = zfw["orz"]
tabspath = os.path.join(fdir, tnam) tabspath = os.path.join(fdir, tnam)
@@ -1304,16 +1318,20 @@ class HttpCli(object):
lim.nup(self.ip) lim.nup(self.ip)
lim.bup(self.ip, sz) lim.bup(self.ip, sz)
try: try:
lim.chk_df(tabspath, sz, True)
lim.chk_sz(sz) lim.chk_sz(sz)
lim.chk_bup(self.ip) lim.chk_bup(self.ip)
lim.chk_nup(self.ip) lim.chk_nup(self.ip)
except: except:
if not nullwrite:
bos.unlink(tabspath) bos.unlink(tabspath)
bos.unlink(abspath) bos.unlink(abspath)
fname = os.devnull fname = os.devnull
raise raise
if not nullwrite:
atomic_move(tabspath, abspath) atomic_move(tabspath, abspath)
files.append( files.append(
(sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath) (sz, sha_hex, sha_b64, p_file or "(discarded)", fname, abspath)
) )
@@ -1363,9 +1381,9 @@ class HttpCli(object):
for sz, sha_hex, sha_b64, ofn, lfn, ap in files: for sz, sha_hex, sha_b64, ofn, lfn, ap in files:
vsuf = "" vsuf = ""
if self.can_read and "fk" in vfs.flags: if self.can_read and "fk" in vfs.flags:
vsuf = "?k=" + gen_filekey( vsuf = "?k=" + self.gen_fk(
self.args.fk_salt, self.args.fk_salt,
abspath, ap,
sz, sz,
0 if ANYWIN or not ap else bos.stat(ap).st_ino, 0 if ANYWIN or not ap else bos.stat(ap).st_ino,
)[: vfs.flags["fk"]] )[: vfs.flags["fk"]]
@@ -1443,7 +1461,7 @@ class HttpCli(object):
raise Pebkac(411) raise Pebkac(411)
rp, fn = vsplit(rem) rp, fn = vsplit(rem)
fp = os.path.join(vfs.realpath, rp) fp = vfs.canonical(rp)
lim = vfs.get_dbv(rem)[0].lim lim = vfs.get_dbv(rem)[0].lim
if lim: if lim:
fp, rp = lim.all(self.ip, rp, clen, fp) fp, rp = lim.all(self.ip, rp, clen, fp)
@@ -1917,7 +1935,13 @@ class HttpCli(object):
vstate = {("/" + k).rstrip("/") + "/": v for k, v in vs["volstate"].items()} vstate = {("/" + k).rstrip("/") + "/": v for k, v in vs["volstate"].items()}
else: else:
vstate = {} vstate = {}
vs = {"scanning": None, "hashq": None, "tagq": None, "mtpq": None} vs = {
"scanning": None,
"hashq": None,
"tagq": None,
"mtpq": None,
"dbwt": None,
}
if self.uparam.get("ls") in ["v", "t", "txt"]: if self.uparam.get("ls") in ["v", "t", "txt"]:
if self.uname == "*": if self.uname == "*":
@@ -1927,7 +1951,7 @@ class HttpCli(object):
if vstate: if vstate:
txt += "\nstatus:" txt += "\nstatus:"
for k in ["scanning", "hashq", "tagq", "mtpq"]: for k in ["scanning", "hashq", "tagq", "mtpq", "dbwt"]:
txt += " {}({})".format(k, vs[k]) txt += " {}({})".format(k, vs[k])
if rvol: if rvol:
@@ -1956,6 +1980,7 @@ class HttpCli(object):
hashq=vs["hashq"], hashq=vs["hashq"],
tagq=vs["tagq"], tagq=vs["tagq"],
mtpq=vs["mtpq"], mtpq=vs["mtpq"],
dbwt=vs["dbwt"],
url_suf=suf, url_suf=suf,
k304=self.k304(), k304=self.k304(),
) )
@@ -2293,7 +2318,7 @@ class HttpCli(object):
if not is_dir and (self.can_read or self.can_get): if not is_dir and (self.can_read or self.can_get):
if not self.can_read and "fk" in vn.flags: if not self.can_read and "fk" in vn.flags:
correct = gen_filekey( correct = self.gen_fk(
self.args.fk_salt, abspath, st.st_size, 0 if ANYWIN else st.st_ino self.args.fk_salt, abspath, st.st_size, 0 if ANYWIN else st.st_ino
)[: vn.flags["fk"]] )[: vn.flags["fk"]]
got = self.uparam.get("k") got = self.uparam.get("k")
@@ -2317,26 +2342,14 @@ class HttpCli(object):
except: except:
self.log("#wow #whoa") self.log("#wow #whoa")
try:
# some fuses misbehave
if not self.args.nid: if not self.args.nid:
if WINDOWS: free, total = get_df(abspath)
try: if total is not None:
bfree = ctypes.c_ulonglong(0) h1 = humansize(free or 0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW( # type: ignore h2 = humansize(total)
ctypes.c_wchar_p(abspath), None, None, ctypes.pointer(bfree) srv_info.append("{} free of {}".format(h1, h2))
) elif free is not None:
srv_info.append(humansize(bfree.value) + " free") srv_info.append(humansize(free, True) + " free")
except:
pass
else:
sv = os.statvfs(fsenc(abspath))
free = humansize(sv.f_frsize * sv.f_bfree, True)
total = humansize(sv.f_frsize * sv.f_blocks, True)
srv_info.append("{} free of {}".format(free, total))
except:
pass
srv_infot = "</span> // <span>".join(srv_info) srv_infot = "</span> // <span>".join(srv_info)
@@ -2529,7 +2542,7 @@ class HttpCli(object):
if add_fk: if add_fk:
href = "{}?k={}".format( href = "{}?k={}".format(
quotep(href), quotep(href),
gen_filekey( self.gen_fk(
self.args.fk_salt, fspath, sz, 0 if ANYWIN else inf.st_ino self.args.fk_salt, fspath, sz, 0 if ANYWIN else inf.st_ino
)[:add_fk], )[:add_fk],
) )

View File

@@ -62,7 +62,7 @@ class HttpConn(object):
self.nreq: int = 0 # mypy404 self.nreq: int = 0 # mypy404
self.nbyte: int = 0 # mypy404 self.nbyte: int = 0 # mypy404
self.u2idx: Optional[U2idx] = None self.u2idx: Optional[U2idx] = None
self.log_func: Util.RootLogger = hsrv.log # mypy404 self.log_func: "Util.RootLogger" = hsrv.log # mypy404
self.log_src: str = "httpconn" # mypy404 self.log_src: str = "httpconn" # mypy404
self.lf_url: Optional[Pattern[str]] = ( self.lf_url: Optional[Pattern[str]] = (
re.compile(self.args.lf_url) if self.args.lf_url else None re.compile(self.args.lf_url) if self.args.lf_url else None

View File

@@ -165,7 +165,7 @@ class HttpSrv(object):
"""listens on a shared tcp server""" """listens on a shared tcp server"""
ip, port = srv_sck.getsockname() ip, port = srv_sck.getsockname()
fno = srv_sck.fileno() fno = srv_sck.fileno()
msg = "subscribed @ {}:{} f{}".format(ip, port, fno) msg = "subscribed @ {}:{} f{} p{}".format(ip, port, fno, os.getpid())
self.log(self.name, msg) self.log(self.name, msg)
def fun() -> None: def fun() -> None:
@@ -261,7 +261,10 @@ class HttpSrv(object):
) )
self.thr_client(sck, addr) self.thr_client(sck, addr)
me.name = self.name + "-poolw" me.name = self.name + "-poolw"
except: except Exception as ex:
if str(ex).startswith("client d/c "):
self.log(self.name, "thr_client: " + str(ex), 6)
else:
self.log(self.name, "thr_client: " + min_ex(), 3) self.log(self.name, "thr_client: " + min_ex(), 3)
def shutdown(self) -> None: def shutdown(self) -> None:

View File

@@ -248,7 +248,7 @@ def parse_ffprobe(txt: str) -> tuple[dict[str, tuple[int, Any]], dict[str, list[
class MTag(object): class MTag(object):
def __init__(self, log_func: RootLogger, args: argparse.Namespace) -> None: def __init__(self, log_func: "RootLogger", args: argparse.Namespace) -> None:
self.log_func = log_func self.log_func = log_func
self.args = args self.args = args
self.usable = True self.usable = True
@@ -437,6 +437,8 @@ class MTag(object):
return r1 return r1
def get_mutagen(self, abspath: str) -> dict[str, Union[str, float]]: def get_mutagen(self, abspath: str) -> dict[str, Union[str, float]]:
ret: dict[str, tuple[int, Any]] = {}
if not bos.path.isfile(abspath): if not bos.path.isfile(abspath):
return {} return {}
@@ -450,7 +452,10 @@ class MTag(object):
return self.get_ffprobe(abspath) if self.can_ffprobe else {} return self.get_ffprobe(abspath) if self.can_ffprobe else {}
sz = bos.path.getsize(abspath) sz = bos.path.getsize(abspath)
ret = {".q": (0, int((sz / md.info.length) / 128))} try:
ret[".q"] = (0, int((sz / md.info.length) / 128))
except:
pass
for attr, k, norm in [ for attr, k, norm in [
["codec", "ac", unicode], ["codec", "ac", unicode],

View File

@@ -44,7 +44,7 @@ class StreamTar(StreamArc):
def __init__( def __init__(
self, self,
log: NamedLogger, log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None], fgen: Generator[dict[str, Any], None, None],
**kwargs: Any **kwargs: Any
): ):
@@ -65,6 +65,7 @@ class StreamTar(StreamArc):
w.start() w.start()
def gen(self) -> Generator[Optional[bytes], None, None]: def gen(self) -> Generator[Optional[bytes], None, None]:
try:
while True: while True:
buf = self.qfile.q.get() buf = self.qfile.q.get()
if not buf: if not buf:
@@ -74,6 +75,7 @@ class StreamTar(StreamArc):
yield buf yield buf
yield None yield None
finally:
if self.errf: if self.errf:
bos.unlink(self.errf["ap"]) bos.unlink(self.errf["ap"])

View File

@@ -17,7 +17,7 @@ except:
class StreamArc(object): class StreamArc(object):
def __init__( def __init__(
self, self,
log: NamedLogger, log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None], fgen: Generator[dict[str, Any], None, None],
**kwargs: Any **kwargs: Any
): ):

View File

@@ -2,8 +2,11 @@
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import argparse import argparse
import base64
import calendar import calendar
import gzip
import os import os
import re
import shlex import shlex
import signal import signal
import socket import socket
@@ -17,7 +20,7 @@ try:
from types import FrameType from types import FrameType
import typing import typing
from typing import Optional, Union from typing import Any, Optional, Union
except: except:
pass pass
@@ -27,7 +30,15 @@ from .mtag import HAVE_FFMPEG, HAVE_FFPROBE
from .tcpsrv import TcpSrv from .tcpsrv import TcpSrv
from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv from .th_srv import HAVE_PIL, HAVE_VIPS, HAVE_WEBP, ThumbSrv
from .up2k import Up2k from .up2k import Up2k
from .util import ansi_re, min_ex, mp, start_log_thrs, start_stackmon from .util import (
VERSIONS,
alltrace,
ansi_re,
min_ex,
mp,
start_log_thrs,
start_stackmon,
)
class SvcHub(object): class SvcHub(object):
@@ -47,15 +58,18 @@ class SvcHub(object):
self.logf: Optional[typing.TextIO] = None self.logf: Optional[typing.TextIO] = None
self.logf_base_fn = "" self.logf_base_fn = ""
self.stop_req = False self.stop_req = False
self.reload_req = False
self.stopping = False self.stopping = False
self.stopped = False
self.reload_req = False
self.reloading = False self.reloading = False
self.stop_cond = threading.Condition() self.stop_cond = threading.Condition()
self.nsigs = 3
self.retcode = 0 self.retcode = 0
self.httpsrv_up = 0 self.httpsrv_up = 0
self.log_mutex = threading.Lock() self.log_mutex = threading.Lock()
self.next_day = 0 self.next_day = 0
self.tstack = 0.0
if args.sss or args.s >= 3: if args.sss or args.s >= 3:
args.ss = True args.ss = True
@@ -110,6 +124,9 @@ class SvcHub(object):
if not args.hardlink and args.never_symlink: if not args.hardlink and args.never_symlink:
args.no_dedup = True args.no_dedup = True
if args.log_fk:
args.log_fk = re.compile(args.log_fk)
# initiate all services to manage # initiate all services to manage
self.asrv = AuthSrv(self.args, self.log) self.asrv = AuthSrv(self.args, self.log)
if args.ls: if args.ls:
@@ -280,7 +297,9 @@ class SvcHub(object):
pass pass
self.shutdown() self.shutdown()
thr.join() # cant join; eats signals on win10
while not self.stopped:
time.sleep(0.1)
else: else:
self.stop_thr() self.stop_thr()
@@ -316,9 +335,22 @@ class SvcHub(object):
def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None: def signal_handler(self, sig: int, frame: Optional[FrameType]) -> None:
if self.stopping: if self.stopping:
if self.nsigs <= 0:
try:
threading.Thread(target=self.pr, args=("OMBO BREAKER",)).start()
time.sleep(0.1)
except:
pass
if ANYWIN:
os.system("taskkill /f /pid {}".format(os.getpid()))
else:
os.kill(os.getpid(), signal.SIGKILL)
else:
self.nsigs -= 1
return return
if sig == signal.SIGUSR1: if not ANYWIN and sig == signal.SIGUSR1:
self.reload_req = True self.reload_req = True
else: else:
self.stop_req = True self.stop_req = True
@@ -339,9 +371,7 @@ class SvcHub(object):
ret = 1 ret = 1
try: try:
with self.log_mutex: self.pr("OPYTHAT")
print("OPYTHAT")
self.tcpsrv.shutdown() self.tcpsrv.shutdown()
self.broker.shutdown() self.broker.shutdown()
self.up2k.shutdown() self.up2k.shutdown()
@@ -354,22 +384,23 @@ class SvcHub(object):
break break
if n == 3: if n == 3:
print("waiting for thumbsrv (10sec)...") self.pr("waiting for thumbsrv (10sec)...")
print("nailed it", end="") self.pr("nailed it", end="")
ret = self.retcode ret = self.retcode
except: except:
print("\033[31m[ error during shutdown ]\n{}\033[0m".format(min_ex())) self.pr("\033[31m[ error during shutdown ]\n{}\033[0m".format(min_ex()))
raise raise
finally: finally:
if self.args.wintitle: if self.args.wintitle:
print("\033]0;\033\\", file=sys.stderr, end="") print("\033]0;\033\\", file=sys.stderr, end="")
sys.stderr.flush() sys.stderr.flush()
print("\033[0m") self.pr("\033[0m")
if self.logf: if self.logf:
self.logf.close() self.logf.close()
self.stopped = True
sys.exit(ret) sys.exit(ret)
def _log_disabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None: def _log_disabled(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
@@ -436,6 +467,10 @@ class SvcHub(object):
if self.logf: if self.logf:
self.logf.write(msg) self.logf.write(msg)
def pr(self, *a: Any, **ka: Any) -> None:
with self.log_mutex:
print(*a, **ka)
def check_mp_support(self) -> str: def check_mp_support(self) -> str:
vmin = sys.version_info[1] vmin = sys.version_info[1]
if WINDOWS: if WINDOWS:
@@ -500,3 +535,16 @@ class SvcHub(object):
sck.sendall(b"READY=1") sck.sendall(b"READY=1")
except: except:
self.log("sd_notify", min_ex()) self.log("sd_notify", min_ex())
def log_stacks(self) -> None:
td = time.time() - self.tstack
if td < 300:
self.log("stacks", "cooldown {}".format(td))
return
self.tstack = time.time()
zs = "{}\n{}".format(VERSIONS, alltrace())
zb = zs.encode("utf-8", "replace")
zb = gzip.compress(zb)
zs = base64.b64encode(zb).decode("ascii")
self.log("stacks", zs)

View File

@@ -218,7 +218,7 @@ def gen_ecdr64_loc(ecdr64_pos: int) -> bytes:
class StreamZip(StreamArc): class StreamZip(StreamArc):
def __init__( def __init__(
self, self,
log: NamedLogger, log: "NamedLogger",
fgen: Generator[dict[str, Any], None, None], fgen: Generator[dict[str, Any], None, None],
utf8: bool = False, utf8: bool = False,
pre_crc: bool = False, pre_crc: bool = False,
@@ -272,6 +272,7 @@ class StreamZip(StreamArc):
def gen(self) -> Generator[bytes, None, None]: def gen(self) -> Generator[bytes, None, None]:
errors = [] errors = []
try:
for f in self.fgen: for f in self.fgen:
if "err" in f: if "err" in f:
errors.append((f["vp"], f["err"])) errors.append((f["vp"], f["err"]))
@@ -280,6 +281,8 @@ class StreamZip(StreamArc):
try: try:
for x in self.ser(f): for x in self.ser(f):
yield x yield x
except GeneratorExit:
raise
except: except:
ex = min_ex(5, True).replace("\n", "\n-- ") ex = min_ex(5, True).replace("\n", "\n-- ")
errors.append((f["vp"], ex)) errors.append((f["vp"], ex))
@@ -307,6 +310,6 @@ class StreamZip(StreamArc):
ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end) ecdr, _ = gen_ecdr(self.items, cdir_pos, cdir_end)
yield self._ct(ecdr) yield self._ct(ecdr)
finally:
if errors: if errors:
bos.unlink(errf["ap"]) bos.unlink(errf["ap"])

View File

@@ -1,6 +1,7 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import os
import re import re
import socket import socket
import sys import sys
@@ -128,7 +129,7 @@ class TcpSrv(object):
srv.listen(self.args.nc) srv.listen(self.args.nc)
ip, port = srv.getsockname() ip, port = srv.getsockname()
fno = srv.fileno() fno = srv.fileno()
msg = "listening @ {}:{} f{}".format(ip, port, fno) msg = "listening @ {}:{} f{} p{}".format(ip, port, fno, os.getpid())
self.log("tcpsrv", msg) self.log("tcpsrv", msg)
if self.args.q: if self.args.q:
print(msg) print(msg)

View File

@@ -12,6 +12,7 @@ import shutil
import signal import signal
import stat import stat
import subprocess as sp import subprocess as sp
import tempfile
import threading import threading
import time import time
import traceback import traceback
@@ -31,6 +32,8 @@ from .util import (
ProgressPrinter, ProgressPrinter,
absreal, absreal,
atomic_move, atomic_move,
db_ex_chk,
djoin,
fsenc, fsenc,
min_ex, min_ex,
quotep, quotep,
@@ -69,6 +72,8 @@ class Dbw(object):
class Mpqe(object): class Mpqe(object):
"""pending files to tag-scan"""
def __init__( def __init__(
self, self,
mtp: dict[str, MParser], mtp: dict[str, MParser],
@@ -98,9 +103,11 @@ class Up2k(object):
self.gid = 0 self.gid = 0
self.stop = False self.stop = False
self.mutex = threading.Lock() self.mutex = threading.Lock()
self.blocked: Optional[str] = None
self.pp: Optional[ProgressPrinter] = None self.pp: Optional[ProgressPrinter] = None
self.rescan_cond = threading.Condition() self.rescan_cond = threading.Condition()
self.need_rescan: set[str] = set() self.need_rescan: set[str] = set()
self.db_act = 0.0
self.registry: dict[str, dict[str, dict[str, Any]]] = {} self.registry: dict[str, dict[str, dict[str, Any]]] = {}
self.flags: dict[str, dict[str, Any]] = {} self.flags: dict[str, dict[str, Any]] = {}
@@ -126,6 +133,8 @@ class Up2k(object):
self.mem_cur = None self.mem_cur = None
self.sqlite_ver = None self.sqlite_ver = None
self.no_expr_idx = False self.no_expr_idx = False
self.timeout = int(max(self.args.srch_time, 5) * 1.2) + 1
self.spools: set[tempfile.SpooledTemporaryFile[bytes]] = set()
if HAVE_SQLITE3: if HAVE_SQLITE3:
# mojibake detector # mojibake detector
self.mem_cur = self._orz(":memory:") self.mem_cur = self._orz(":memory:")
@@ -134,7 +143,8 @@ class Up2k(object):
if self.sqlite_ver < (3, 9): if self.sqlite_ver < (3, 9):
self.no_expr_idx = True self.no_expr_idx = True
else: else:
self.log("could not initialize sqlite3, will use in-memory registry only") t = "could not initialize sqlite3, will use in-memory registry only"
self.log(t, 3)
if ANYWIN: if ANYWIN:
# usually fails to set lastmod too quickly # usually fails to set lastmod too quickly
@@ -193,6 +203,16 @@ class Up2k(object):
def log(self, msg: str, c: Union[int, str] = 0) -> None: def log(self, msg: str, c: Union[int, str] = 0) -> None:
self.log_func("up2k", msg + "\033[K", c) self.log_func("up2k", msg + "\033[K", c)
def _block(self, why: str) -> None:
self.blocked = why
self.log("uploads temporarily blocked due to " + why, 3)
def _unblock(self) -> None:
if self.blocked is not None:
self.blocked = None
if not self.stop:
self.log("uploads are now possible", 2)
def get_state(self) -> str: def get_state(self) -> str:
mtpq: Union[int, str] = 0 mtpq: Union[int, str] = 0
q = "select count(w) from mt where k = 't:mtp'" q = "select count(w) from mt where k = 't:mtp'"
@@ -213,6 +233,9 @@ class Up2k(object):
"hashq": self.n_hashq, "hashq": self.n_hashq,
"tagq": self.n_tagq, "tagq": self.n_tagq,
"mtpq": mtpq, "mtpq": mtpq,
"dbwt": "{:.2f}".format(
min(1000 * 24 * 60 * 60 - 1, time.time() - self.db_act)
),
} }
return json.dumps(ret, indent=4) return json.dumps(ret, indent=4)
@@ -245,10 +268,15 @@ class Up2k(object):
continue continue
if self.pp: if self.pp:
cooldown = now + 5 cooldown = now + 1
continue continue
if self.args.no_lifetime:
timeout = now + 9001 timeout = now + 9001
else:
# important; not deferred by db_act
timeout = self._check_lifetimes()
with self.mutex: with self.mutex:
for vp, vol in sorted(self.asrv.vfs.all_vols.items()): for vp, vol in sorted(self.asrv.vfs.all_vols.items()):
maxage = vol.flags.get("scan") maxage = vol.flags.get("scan")
@@ -264,6 +292,20 @@ class Up2k(object):
timeout = min(timeout, deadline) timeout = min(timeout, deadline)
if self.db_act > now - self.args.db_act:
# recent db activity; defer volume rescan
act_timeout = self.db_act + self.args.db_act
if self.need_rescan:
timeout = now
if timeout < act_timeout:
timeout = act_timeout
t = "volume rescan deferred {:.1f} sec, due to database activity"
self.log(t.format(timeout - now))
continue
with self.mutex:
vols = list(sorted(self.need_rescan)) vols = list(sorted(self.need_rescan))
self.need_rescan.clear() self.need_rescan.clear()
@@ -279,9 +321,10 @@ class Up2k(object):
for v in vols: for v in vols:
volage[v] = now volage[v] = now
if self.args.no_lifetime: def _check_lifetimes(self) -> float:
continue now = time.time()
timeout = now + 9001
if now: # diff-golf
for vp, vol in sorted(self.asrv.vfs.all_vols.items()): for vp, vol in sorted(self.asrv.vfs.all_vols.items()):
lifetime = vol.flags.get("lifetime") lifetime = vol.flags.get("lifetime")
if not lifetime: if not lifetime:
@@ -328,6 +371,8 @@ class Up2k(object):
if hits: if hits:
timeout = min(timeout, now + lifetime - (now - hits[0])) timeout = min(timeout, now + lifetime - (now - hits[0]))
return timeout
def _vis_job_progress(self, job: dict[str, Any]) -> str: def _vis_job_progress(self, job: dict[str, Any]) -> str:
perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"])) perc = 100 - (len(job["need"]) * 100.0 / len(job["hash"]))
path = os.path.join(job["ptop"], job["prel"], job["name"]) path = os.path.join(job["ptop"], job["prel"], job["name"])
@@ -416,6 +461,9 @@ class Up2k(object):
self.mtag = None self.mtag = None
# e2ds(a) volumes first # e2ds(a) volumes first
if next((zv for zv in vols if "e2ds" in zv.flags), None):
self._block("indexing")
for vol in vols: for vol in vols:
if self.stop: if self.stop:
break break
@@ -444,6 +492,8 @@ class Up2k(object):
self.volstate[vol.vpath] = t self.volstate[vol.vpath] = t
self._unblock()
# file contents verification # file contents verification
for vol in vols: for vol in vols:
if self.stop: if self.stop:
@@ -623,10 +673,15 @@ class Up2k(object):
top = vol.realpath top = vol.realpath
rei = vol.flags.get("noidx") rei = vol.flags.get("noidx")
reh = vol.flags.get("nohash") reh = vol.flags.get("nohash")
dev = 0
if vol.flags.get("xdev"):
dev = bos.stat(top).st_dev
with self.mutex: with self.mutex:
reg = self.register_vpath(top, vol.flags) reg = self.register_vpath(top, vol.flags)
assert reg and self.pp assert reg and self.pp
cur, _ = reg cur, db_path = reg
db = Dbw(cur, 0, time.time()) db = Dbw(cur, 0, time.time())
self.pp.n = next(db.c.execute("select count(w) from up"))[0] self.pp.n = next(db.c.execute("select count(w) from up"))[0]
@@ -640,15 +695,31 @@ class Up2k(object):
excl += list(self.asrv.vfs.histtab.values()) excl += list(self.asrv.vfs.histtab.values())
if WINDOWS: if WINDOWS:
excl = [x.replace("/", "\\") for x in excl] excl = [x.replace("/", "\\") for x in excl]
else:
# ~/.wine/dosdevices/z:/ and such
excl += ["/dev", "/proc", "/run", "/sys"]
rtop = absreal(top) rtop = absreal(top)
n_add = n_rm = 0 n_add = n_rm = 0
try: try:
n_add = self._build_dir(db, top, set(excl), top, rtop, rei, reh, []) n_add = self._build_dir(
n_rm = self._drop_lost(db.c, top) db,
except: top,
set(excl),
top,
rtop,
rei,
reh,
[],
dev,
bool(vol.flags.get("xvol")),
)
n_rm = self._drop_lost(db.c, top, excl)
except Exception as ex:
t = "failed to index volume [{}]:\n{}" t = "failed to index volume [{}]:\n{}"
self.log(t.format(top, min_ex()), c=1) self.log(t.format(top, min_ex()), c=1)
if db_ex_chk(self.log, ex, db_path):
self.hub.log_stacks()
if db.n: if db.n:
self.log("commit {} new files".format(db.n)) self.log("commit {} new files".format(db.n))
@@ -667,7 +738,13 @@ class Up2k(object):
rei: Optional[Pattern[str]], rei: Optional[Pattern[str]],
reh: Optional[Pattern[str]], reh: Optional[Pattern[str]],
seen: list[str], seen: list[str],
dev: int,
xvol: bool,
) -> int: ) -> int:
if xvol and not rcdir.startswith(top):
self.log("skip xvol: [{}] -> [{}]".format(top, rcdir), 6)
return 0
if rcdir in seen: if rcdir in seen:
t = "bailing from symlink loop,\n prev: {}\n curr: {}\n from: {}" t = "bailing from symlink loop,\n prev: {}\n curr: {}\n from: {}"
self.log(t.format(seen[-1], rcdir, cdir), 3) self.log(t.format(seen[-1], rcdir, cdir), 3)
@@ -677,6 +754,7 @@ class Up2k(object):
assert self.pp and self.mem_cur assert self.pp and self.mem_cur
self.pp.msg = "a{} {}".format(self.pp.n, cdir) self.pp.msg = "a{} {}".format(self.pp.n, cdir)
ret = 0 ret = 0
unreg: list[str] = []
seen_files = {} # != inames; files-only for dropcheck seen_files = {} # != inames; files-only for dropcheck
g = statdir(self.log_func, not self.args.no_scandir, False, cdir) g = statdir(self.log_func, not self.args.no_scandir, False, cdir)
gl = sorted(g) gl = sorted(g)
@@ -686,7 +764,12 @@ class Up2k(object):
return -1 return -1
abspath = os.path.join(cdir, iname) abspath = os.path.join(cdir, iname)
rp = abspath[len(top) :].lstrip("/")
if WINDOWS:
rp = rp.replace("\\", "/").strip("/")
if rei and rei.search(abspath): if rei and rei.search(abspath):
unreg.append(rp)
continue continue
nohash = reh.search(abspath) if reh else False nohash = reh.search(abspath) if reh else False
@@ -694,14 +777,20 @@ class Up2k(object):
sz = inf.st_size sz = inf.st_size
if stat.S_ISDIR(inf.st_mode): if stat.S_ISDIR(inf.st_mode):
rap = absreal(abspath) rap = absreal(abspath)
if dev and inf.st_dev != dev:
self.log("skip xdev {}->{}: {}".format(dev, inf.st_dev, abspath), 6)
continue
if abspath in excl or rap in excl: if abspath in excl or rap in excl:
unreg.append(rp)
continue continue
if iname == ".th" and bos.path.isdir(os.path.join(abspath, "top")): if iname == ".th" and bos.path.isdir(os.path.join(abspath, "top")):
# abandoned or foreign, skip # abandoned or foreign, skip
continue continue
# self.log(" dir: {}".format(abspath)) # self.log(" dir: {}".format(abspath))
try: try:
ret += self._build_dir(db, top, excl, abspath, rap, rei, reh, seen) ret += self._build_dir(
db, top, excl, abspath, rap, rei, reh, seen, dev, xvol
)
except: except:
t = "failed to index subdir [{}]:\n{}" t = "failed to index subdir [{}]:\n{}"
self.log(t.format(abspath, min_ex()), c=1) self.log(t.format(abspath, min_ex()), c=1)
@@ -710,10 +799,6 @@ class Up2k(object):
else: else:
# self.log("file: {}".format(abspath)) # self.log("file: {}".format(abspath))
seen_files[iname] = 1 seen_files[iname] = 1
rp = abspath[len(top) :].lstrip("/")
if WINDOWS:
rp = rp.replace("\\", "/").strip("/")
if rp.endswith(".PARTIAL") and time.time() - lmod < 60: if rp.endswith(".PARTIAL") and time.time() - lmod < 60:
# rescan during upload # rescan during upload
continue continue
@@ -770,6 +855,9 @@ class Up2k(object):
self.log("hash: {} @ [{}]".format(repr(ex), abspath)) self.log("hash: {} @ [{}]".format(repr(ex), abspath))
continue continue
if not hashes:
return -1
wark = up2k_wark_from_hashlist(self.salt, sz, hashes) wark = up2k_wark_from_hashlist(self.salt, sz, hashes)
self.db_add(db.c, wark, rd, fn, lmod, sz, "", 0) self.db_add(db.c, wark, rd, fn, lmod, sz, "", 0)
@@ -785,6 +873,25 @@ class Up2k(object):
if self.stop: if self.stop:
return -1 return -1
# drop shadowed folders
for rd in unreg:
n = 0
q = "select count(w) from up where (rd = ? or rd like ?||'%') and at == 0"
for erd in [rd, "//" + w8b64enc(rd)]:
try:
n = db.c.execute(q, (erd, erd + "/")).fetchone()[0]
break
except:
pass
if n:
t = "forgetting {} shadowed autoindexed files in [{}] > [{}]"
self.log(t.format(n, top, rd))
q = "delete from up where (rd = ? or rd like ?||'%') and at == 0"
db.c.execute(q, (erd, erd + "/"))
ret += n
# drop missing files # drop missing files
rd = cdir[len(top) + 1 :].strip("/") rd = cdir[len(top) + 1 :].strip("/")
if WINDOWS: if WINDOWS:
@@ -807,12 +914,13 @@ class Up2k(object):
return ret return ret
def _drop_lost(self, cur: "sqlite3.Cursor", top: str) -> int: def _drop_lost(self, cur: "sqlite3.Cursor", top: str, excl: list[str]) -> int:
rm = [] rm = []
n_rm = 0 n_rm = 0
nchecked = 0 nchecked = 0
assert self.pp assert self.pp
# `_build_dir` did all the files, now do dirs
# `_build_dir` did all unshadowed files; first do dirs:
ndirs = next(cur.execute("select count(distinct rd) from up"))[0] ndirs = next(cur.execute("select count(distinct rd) from up"))[0]
c = cur.execute("select distinct rd from up order by rd desc") c = cur.execute("select distinct rd from up order by rd desc")
for (drd,) in c: for (drd,) in c:
@@ -832,9 +940,7 @@ class Up2k(object):
rm.append(drd) rm.append(drd)
if not rm: if rm:
return 0
q = "select count(w) from up where rd = ?" q = "select count(w) from up where rd = ?"
for rd in rm: for rd in rm:
n_rm += next(cur.execute(q, (rd,)))[0] n_rm += next(cur.execute(q, (rd,)))[0]
@@ -843,12 +949,46 @@ class Up2k(object):
for rd in rm: for rd in rm:
cur.execute("delete from up where rd = ?", (rd,)) cur.execute("delete from up where rd = ?", (rd,))
return n_rm # then shadowed deleted files
n_rm2 = 0
c2 = cur.connection.cursor()
excl = [x[len(top) + 1 :] for x in excl if x.startswith(top + "/")]
q = "select rd, fn from up where (rd = ? or rd like ?||'%') order by rd"
for rd in excl:
for erd in [rd, "//" + w8b64enc(rd)]:
try:
c = cur.execute(q, (erd, erd + "/"))
break
except:
pass
crd = "///"
cdc: set[str] = set()
for drd, dfn in c:
rd, fn = s3dec(drd, dfn)
if crd != rd:
crd = rd
try:
cdc = set(os.listdir(os.path.join(top, rd)))
except:
cdc.clear()
if fn not in cdc:
q = "delete from up where rd = ? and fn = ?"
c2.execute(q, (drd, dfn))
n_rm2 += 1
if n_rm2:
self.log("forgetting {} shadowed deleted files".format(n_rm2))
c2.close()
return n_rm + n_rm2
def _verify_integrity(self, vol: VFS) -> int: def _verify_integrity(self, vol: VFS) -> int:
"""expensive; blocks database access until finished""" """expensive; blocks database access until finished"""
ptop = vol.realpath ptop = vol.realpath
assert self.pp and self.mtag assert self.pp
cur = self.cur[ptop] cur = self.cur[ptop]
rei = vol.flags.get("noidx") rei = vol.flags.get("noidx")
@@ -867,7 +1007,7 @@ class Up2k(object):
qexa.append("up.rd != ? and not up.rd like ?||'%'") qexa.append("up.rd != ? and not up.rd like ?||'%'")
pexa.extend([vpath, vpath]) pexa.extend([vpath, vpath])
pex = tuple(pexa) pex: tuple[Any, ...] = tuple(pexa)
qex = " and ".join(qexa) qex = " and ".join(qexa)
if qex: if qex:
qex = " where " + qex qex = " where " + qex
@@ -882,11 +1022,22 @@ class Up2k(object):
b_left += sz # sum() can overflow according to docs b_left += sz # sum() can overflow according to docs
n_left += 1 n_left += 1
q = "select w, mt, sz, rd, fn from up" + qex tf, _ = self._spool_warks(cur, "select w, rd, fn from up" + qex, pex, 0)
for w, mt, sz, drd, dfn in cur.execute(q, pex):
with gzip.GzipFile(mode="rb", fileobj=tf) as gf:
for zb in gf:
if self.stop: if self.stop:
return -1 return -1
w, drd, dfn = zb[:-1].decode("utf-8").split("\x00")
with self.mutex:
q = "select mt, sz from up where w = ? and rd = ? and fn = ?"
try:
mt, sz = cur.execute(q, (w, drd, dfn)).fetchone()
except:
# file moved/deleted since spooling
continue
n_left -= 1 n_left -= 1
b_left -= sz b_left -= sz
if drd.startswith("//") or dfn.startswith("//"): if drd.startswith("//") or dfn.startswith("//"):
@@ -920,6 +1071,9 @@ class Up2k(object):
self.log("hash: {} @ [{}]".format(repr(ex), abspath)) self.log("hash: {} @ [{}]".format(repr(ex), abspath))
continue continue
if not hashes:
return -1
w2 = up2k_wark_from_hashlist(self.salt, sz2, hashes) w2 = up2k_wark_from_hashlist(self.salt, sz2, hashes)
if w == w2: if w == w2:
@@ -936,13 +1090,16 @@ class Up2k(object):
os.kill(os.getpid(), signal.SIGTERM) os.kill(os.getpid(), signal.SIGTERM)
raise Exception("{} files have incorrect hashes".format(len(rewark))) raise Exception("{} files have incorrect hashes".format(len(rewark)))
if not e2vu: if not e2vu or not rewark:
return 0 return 0
with self.mutex:
for rd, fn, w, sz, mt in rewark: for rd, fn, w, sz, mt in rewark:
q = "update up set w = ?, sz = ?, mt = ? where rd = ? and fn = ? limit 1" q = "update up set w = ?, sz = ?, mt = ? where rd = ? and fn = ? limit 1"
cur.execute(q, (w, sz, int(mt), rd, fn)) cur.execute(q, (w, sz, int(mt), rd, fn))
cur.connection.commit()
return len(rewark) return len(rewark)
def _build_tags_index(self, vol: VFS) -> tuple[int, int, bool]: def _build_tags_index(self, vol: VFS) -> tuple[int, int, bool]:
@@ -950,17 +1107,12 @@ class Up2k(object):
with self.mutex: with self.mutex:
reg = self.register_vpath(ptop, vol.flags) reg = self.register_vpath(ptop, vol.flags)
assert reg and self.pp and self.mtag assert reg and self.pp
_, db_path = reg
entags = self.entags[ptop] entags = self.entags[ptop]
flags = self.flags[ptop] flags = self.flags[ptop]
cur = self.cur[ptop] cur = self.cur[ptop]
n_add = 0
n_rm = 0 n_rm = 0
n_buf = 0
last_write = time.time()
if "e2tsr" in flags: if "e2tsr" in flags:
with self.mutex: with self.mutex:
n_rm = cur.execute("select count(w) from mt").fetchone()[0] n_rm = cur.execute("select count(w) from mt").fetchone()[0]
@@ -971,92 +1123,164 @@ class Up2k(object):
# integrity: drop tags for tracks that were deleted # integrity: drop tags for tracks that were deleted
if "e2t" in flags: if "e2t" in flags:
with self.mutex: with self.mutex:
drops = [] n = 0
c2 = cur.connection.cursor() c2 = cur.connection.cursor()
up_q = "select w from up where substr(w,1,16) = ?" up_q = "select w from up where substr(w,1,16) = ?"
rm_q = "delete from mt where w = ?"
for (w,) in cur.execute("select w from mt"): for (w,) in cur.execute("select w from mt"):
if not c2.execute(up_q, (w,)).fetchone(): if not c2.execute(up_q, (w,)).fetchone():
drops.append(w[:16]) c2.execute(rm_q, (w[:16],))
n += 1
c2.close() c2.close()
if n:
t = "discarded media tags for {} deleted files"
self.log(t.format(n))
n_rm += n
if drops: with self.mutex:
msg = "discarding media tags for {} deleted files" cur.connection.commit()
self.log(msg.format(len(drops)))
n_rm += len(drops)
for w in drops:
cur.execute("delete from mt where w = ?", (w,))
# bail if a volume flag disables indexing # bail if a volflag disables indexing
if "d2t" in flags or "d2d" in flags: if "d2t" in flags or "d2d" in flags:
return n_add, n_rm, True return 0, n_rm, True
# add tags for new files # add tags for new files
gcur = cur
with self.mutex:
gcur.connection.commit()
if "e2ts" in flags: if "e2ts" in flags:
if not self.mtag: if not self.mtag:
return n_add, n_rm, False return 0, n_rm, False
nq = 0
with self.mutex:
tf, nq = self._spool_warks(
cur, "select w from up order by rd, fn", (), 1
)
if not nq:
# self.log("tags ok")
self._unspool(tf)
return 0, n_rm, True
if nq == -1:
return -1, -1, True
with gzip.GzipFile(mode="rb", fileobj=tf) as gf:
n_add = self._e2ts_q(gf, nq, cur, ptop, entags)
self._unspool(tf)
return n_add, n_rm, True
def _e2ts_q(
self,
qf: gzip.GzipFile,
nq: int,
cur: "sqlite3.Cursor",
ptop: str,
entags: set[str],
) -> int:
assert self.pp and self.mtag
flags = self.flags[ptop]
mpool: Optional[Queue[Mpqe]] = None mpool: Optional[Queue[Mpqe]] = None
if self.mtag.prefer_mt and self.args.mtag_mt > 1: if self.mtag.prefer_mt and self.args.mtag_mt > 1:
mpool = self._start_mpool() mpool = self._start_mpool()
# TODO blocks writes to registry cursor; do chunks instead n_add = 0
conn = sqlite3.connect(db_path, timeout=15) n_buf = 0
cur = conn.cursor() last_write = time.time()
c2 = conn.cursor() for bw in qf:
c3 = conn.cursor()
n_left = cur.execute("select count(w) from up").fetchone()[0]
for w, rd, fn in cur.execute("select w, rd, fn from up order by rd, fn"):
if self.stop: if self.stop:
return -1, -1, False return -1
n_left -= 1 w = bw[:-1].decode("ascii")
q = "select w from mt where w = ?"
if c2.execute(q, (w[:16],)).fetchone(): with self.mutex:
try:
q = "select rd, fn from up where substr(w,1,16)=? and +w=?"
rd, fn = cur.execute(q, (w[:16], w)).fetchone()
except:
# file modified/deleted since spooling
continue continue
if "mtp" in flags:
q = "insert into mt values (?,'t:mtp','a')"
c2.execute(q, (w[:16],))
if rd.startswith("//") or fn.startswith("//"): if rd.startswith("//") or fn.startswith("//"):
rd, fn = s3dec(rd, fn) rd, fn = s3dec(rd, fn)
if "mtp" in flags:
q = "insert into mt values (?,'t:mtp','a')"
cur.execute(q, (w[:16],))
abspath = os.path.join(ptop, rd, fn) abspath = os.path.join(ptop, rd, fn)
self.pp.msg = "c{} {}".format(n_left, abspath) self.pp.msg = "c{} {}".format(nq, abspath)
if not mpool: if not mpool:
n_tags = self._tag_file(c3, entags, w, abspath) n_tags = self._tagscan_file(cur, entags, w, abspath)
else: else:
mpool.put(Mpqe({}, entags, w, abspath, {})) mpool.put(Mpqe({}, entags, w, abspath, {}))
# not registry cursor; do not self.mutex: with self.mutex:
n_tags = len(self._flush_mpool(c3)) n_tags = len(self._flush_mpool(cur))
n_add += n_tags n_add += n_tags
n_buf += n_tags n_buf += n_tags
nq -= 1
td = time.time() - last_write td = time.time() - last_write
if n_buf >= 4096 or td >= 60: if n_buf >= 4096 or td >= max(1, self.timeout - 1):
self.log("commit {} new tags".format(n_buf)) self.log("commit {} new tags".format(n_buf))
with self.mutex:
cur.connection.commit() cur.connection.commit()
last_write = time.time() last_write = time.time()
n_buf = 0 n_buf = 0
if mpool: if mpool:
self._stop_mpool(mpool) self._stop_mpool(mpool)
with self.mutex: with self.mutex:
n_add += len(self._flush_mpool(c3)) n_add += len(self._flush_mpool(cur))
with self.mutex:
cur.connection.commit()
return n_add
def _spool_warks(
self,
cur: "sqlite3.Cursor",
q: str,
params: tuple[Any, ...],
flt: int,
) -> tuple[tempfile.SpooledTemporaryFile[bytes], int]:
"""mutex me"""
n = 0
c2 = cur.connection.cursor()
tf = tempfile.SpooledTemporaryFile(1024 * 1024 * 8, "w+b", prefix="cpp-tq-")
with gzip.GzipFile(mode="wb", fileobj=tf) as gf:
for row in cur.execute(q, params):
if self.stop:
return tf, -1
if flt == 1:
q = "select w from mt where w = ?"
if c2.execute(q, (row[0][:16],)).fetchone():
continue
gf.write("{}\n".format("\x00".join(row)).encode("utf-8"))
n += 1
conn.commit()
c3.close()
c2.close() c2.close()
cur.close() tf.seek(0)
conn.close() self.spools.add(tf)
return tf, n
return n_add, n_rm, True def _unspool(self, tf: tempfile.SpooledTemporaryFile[bytes]) -> None:
try:
self.spools.remove(tf)
except:
return
try:
tf.close()
except Exception as ex:
self.log("failed to delete spool: {}".format(ex), 3)
def _flush_mpool(self, wcur: "sqlite3.Cursor") -> list[str]: def _flush_mpool(self, wcur: "sqlite3.Cursor") -> list[str]:
ret = [] ret = []
@@ -1317,21 +1541,38 @@ class Up2k(object):
msg = "{} failed to read tags from {}:\n{}".format(parser, abspath, ex) msg = "{} failed to read tags from {}:\n{}".format(parser, abspath, ex)
self.log(msg.lstrip(), c=1 if "<Signals.SIG" in msg else 3) self.log(msg.lstrip(), c=1 if "<Signals.SIG" in msg else 3)
def _tagscan_file(
self,
write_cur: "sqlite3.Cursor",
entags: set[str],
wark: str,
abspath: str,
) -> int:
"""will mutex"""
assert self.mtag
if not bos.path.isfile(abspath):
return 0
try:
tags = self.mtag.get(abspath)
except Exception as ex:
self._log_tag_err("", abspath, ex)
return 0
with self.mutex:
return self._tag_file(write_cur, entags, wark, abspath, tags)
def _tag_file( def _tag_file(
self, self,
write_cur: "sqlite3.Cursor", write_cur: "sqlite3.Cursor",
entags: set[str], entags: set[str],
wark: str, wark: str,
abspath: str, abspath: str,
tags: Optional[dict[str, Union[str, float]]] = None, tags: dict[str, Union[str, float]],
) -> int: ) -> int:
"""mutex me"""
assert self.mtag assert self.mtag
if tags is None:
try:
tags = self.mtag.get(abspath)
except Exception as ex:
self._log_tag_err("", abspath, ex)
return 0
if not bos.path.isfile(abspath): if not bos.path.isfile(abspath):
return 0 return 0
@@ -1361,8 +1602,7 @@ class Up2k(object):
return ret return ret
def _orz(self, db_path: str) -> "sqlite3.Cursor": def _orz(self, db_path: str) -> "sqlite3.Cursor":
timeout = int(max(self.args.srch_time, 5) * 1.2) return sqlite3.connect(db_path, self.timeout, check_same_thread=False).cursor()
return sqlite3.connect(db_path, timeout, check_same_thread=False).cursor()
# x.set_trace_callback(trace) # x.set_trace_callback(trace)
def _open_db(self, db_path: str) -> "sqlite3.Cursor": def _open_db(self, db_path: str) -> "sqlite3.Cursor":
@@ -1485,18 +1725,30 @@ class Up2k(object):
cur.connection.commit() cur.connection.commit()
def handle_json(self, cj: dict[str, Any]) -> dict[str, Any]: def _job_volchk(self, cj: dict[str, Any]) -> None:
with self.mutex:
if not self.register_vpath(cj["ptop"], cj["vcfg"]): if not self.register_vpath(cj["ptop"], cj["vcfg"]):
if cj["ptop"] not in self.registry: if cj["ptop"] not in self.registry:
raise Pebkac(410, "location unavailable") raise Pebkac(410, "location unavailable")
def handle_json(self, cj: dict[str, Any]) -> dict[str, Any]:
try:
# bit expensive; 3.9=10x 3.11=2x
if self.mutex.acquire(timeout=10):
self._job_volchk(cj)
self.mutex.release()
else:
t = "cannot receive uploads right now;\nserver busy with {}.\nPlease wait; the client will retry..."
raise Pebkac(503, t.format(self.blocked or "[unknown]"))
except TypeError:
# py2
with self.mutex:
self._job_volchk(cj)
cj["name"] = sanitize_fn(cj["name"], "", [".prologue.html", ".epilogue.html"]) cj["name"] = sanitize_fn(cj["name"], "", [".prologue.html", ".epilogue.html"])
cj["poke"] = time.time() cj["poke"] = now = self.db_act = time.time()
wark = self._get_wark(cj) wark = self._get_wark(cj)
now = time.time()
job = None job = None
pdir = os.path.join(cj["ptop"], cj["prel"]) pdir = djoin(cj["ptop"], cj["prel"])
try: try:
dev = bos.stat(pdir).st_dev dev = bos.stat(pdir).st_dev
except: except:
@@ -1608,7 +1860,7 @@ class Up2k(object):
for k in ["ptop", "vtop", "prel"]: for k in ["ptop", "vtop", "prel"]:
job[k] = cj[k] job[k] = cj[k]
pdir = os.path.join(cj["ptop"], cj["prel"]) pdir = djoin(cj["ptop"], cj["prel"])
job["name"] = self._untaken(pdir, cj["name"], now, cj["addr"]) job["name"] = self._untaken(pdir, cj["name"], now, cj["addr"])
dst = os.path.join(job["ptop"], job["prel"], job["name"]) dst = os.path.join(job["ptop"], job["prel"], job["name"])
if not self.args.nw: if not self.args.nw:
@@ -1624,9 +1876,9 @@ class Up2k(object):
if not job: if not job:
vfs = self.asrv.vfs.all_vols[cj["vtop"]] vfs = self.asrv.vfs.all_vols[cj["vtop"]]
if vfs.lim: if vfs.lim:
ap1 = os.path.join(cj["ptop"], cj["prel"]) ap1 = djoin(cj["ptop"], cj["prel"])
ap2, cj["prel"] = vfs.lim.all( ap2, cj["prel"] = vfs.lim.all(
cj["addr"], cj["prel"], cj["size"], ap1 cj["addr"], cj["prel"], cj["size"], ap1, reg
) )
bos.makedirs(ap2) bos.makedirs(ap2)
vfs.lim.nup(cj["addr"]) vfs.lim.nup(cj["addr"])
@@ -1662,7 +1914,11 @@ class Up2k(object):
job["need"].append(k) job["need"].append(k)
lut[k] = 1 lut[k] = 1
try:
self._new_upload(job) self._new_upload(job)
except:
self.registry[job["ptop"]].pop(job["wark"], None)
raise
purl = "{}/{}".format(job["vtop"], job["prel"]).strip("/") purl = "{}/{}".format(job["vtop"], job["prel"]).strip("/")
purl = "/{}/".format(purl) if purl else "/" purl = "/{}/".format(purl) if purl else "/"
@@ -1754,6 +2010,7 @@ class Up2k(object):
self, ptop: str, wark: str, chash: str self, ptop: str, wark: str, chash: str
) -> tuple[int, list[int], str, float, bool]: ) -> tuple[int, list[int], str, float, bool]:
with self.mutex: with self.mutex:
self.db_act = time.time()
job = self.registry[ptop].get(wark) job = self.registry[ptop].get(wark)
if not job: if not job:
known = " ".join([x for x in self.registry[ptop].keys()]) known = " ".join([x for x in self.registry[ptop].keys()])
@@ -1804,6 +2061,7 @@ class Up2k(object):
def confirm_chunk(self, ptop: str, wark: str, chash: str) -> tuple[int, str]: def confirm_chunk(self, ptop: str, wark: str, chash: str) -> tuple[int, str]:
with self.mutex: with self.mutex:
self.db_act = time.time()
try: try:
job = self.registry[ptop][wark] job = self.registry[ptop][wark]
pdir = os.path.join(job["ptop"], job["prel"]) pdir = os.path.join(job["ptop"], job["prel"])
@@ -1838,6 +2096,7 @@ class Up2k(object):
self._finish_upload(ptop, wark) self._finish_upload(ptop, wark)
def _finish_upload(self, ptop: str, wark: str) -> None: def _finish_upload(self, ptop: str, wark: str) -> None:
self.db_act = time.time()
try: try:
job = self.registry[ptop][wark] job = self.registry[ptop][wark]
pdir = os.path.join(job["ptop"], job["prel"]) pdir = os.path.join(job["ptop"], job["prel"])
@@ -1914,9 +2173,15 @@ class Up2k(object):
if not cur: if not cur:
return False return False
try:
self.db_rm(cur, rd, fn) self.db_rm(cur, rd, fn)
self.db_add(cur, wark, rd, fn, lmod, sz, ip, at) self.db_add(cur, wark, rd, fn, lmod, sz, ip, at)
cur.connection.commit() cur.connection.commit()
except Exception as ex:
x = self.register_vpath(ptop, {})
assert x
db_ex_chk(self.log, ex, x[1])
raise
if "e2t" in self.flags[ptop]: if "e2t" in self.flags[ptop]:
self.tagq.put((ptop, wark, rd, fn)) self.tagq.put((ptop, wark, rd, fn))
@@ -1974,6 +2239,7 @@ class Up2k(object):
def _handle_rm( def _handle_rm(
self, uname: str, ip: str, vpath: str self, uname: str, ip: str, vpath: str
) -> tuple[int, list[str], list[str]]: ) -> tuple[int, list[str], list[str]]:
self.db_act = time.time()
try: try:
permsets = [[True, False, False, True]] permsets = [[True, False, False, True]]
vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0]) vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0])
@@ -2058,6 +2324,7 @@ class Up2k(object):
return n_files, ok + ok2, ng + ng2 return n_files, ok + ok2, ng + ng2
def handle_mv(self, uname: str, svp: str, dvp: str) -> str: def handle_mv(self, uname: str, svp: str, dvp: str) -> str:
self.db_act = time.time()
svn, srem = self.asrv.vfs.get(svp, uname, True, False, True) svn, srem = self.asrv.vfs.get(svp, uname, True, False, True)
svn, srem = svn.get_dbv(srem) svn, srem = svn.get_dbv(srem)
sabs = svn.canonical(srem, False) sabs = svn.canonical(srem, False)
@@ -2360,6 +2627,9 @@ class Up2k(object):
ret = [] ret = []
with open(fsenc(path), "rb", 512 * 1024) as f: with open(fsenc(path), "rb", 512 * 1024) as f:
while fsz > 0: while fsz > 0:
if self.stop:
return []
if self.pp: if self.pp:
mb = int(fsz / 1024 / 1024) mb = int(fsz / 1024 / 1024)
self.pp.msg = "{}{} MB, {}".format(prefix, mb, path) self.pp.msg = "{}{} MB, {}".format(prefix, mb, path)
@@ -2382,7 +2652,7 @@ class Up2k(object):
return ret return ret
def _new_upload(self, job: dict[str, Any]) -> None: def _new_upload(self, job: dict[str, Any]) -> None:
pdir = os.path.join(job["ptop"], job["prel"]) pdir = djoin(job["ptop"], job["prel"])
if not job["size"] and bos.path.isfile(os.path.join(pdir, job["name"])): if not job["size"] and bos.path.isfile(os.path.join(pdir, job["name"])):
return return
@@ -2604,6 +2874,9 @@ class Up2k(object):
self.log("hashing " + abspath) self.log("hashing " + abspath)
inf = bos.stat(abspath) inf = bos.stat(abspath)
hashes = self._hashlist_from_file(abspath) hashes = self._hashlist_from_file(abspath)
if not hashes:
return
wark = up2k_wark_from_hashlist(self.salt, inf.st_size, hashes) wark = up2k_wark_from_hashlist(self.salt, inf.st_size, hashes)
with self.mutex: with self.mutex:
self.idx_wark(ptop, wark, rd, fn, inf.st_mtime, inf.st_size, ip, at) self.idx_wark(ptop, wark, rd, fn, inf.st_mtime, inf.st_size, ip, at)
@@ -2619,6 +2892,10 @@ class Up2k(object):
def shutdown(self) -> None: def shutdown(self) -> None:
self.stop = True self.stop = True
for x in list(self.spools):
self._unspool(x)
self.log("writing snapshot") self.log("writing snapshot")
self.do_snapshot() self.do_snapshot()

View File

@@ -22,8 +22,14 @@ from collections import Counter
from datetime import datetime from datetime import datetime
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, WINDOWS from .__init__ import ANYWIN, PY2, TYPE_CHECKING, VT100, WINDOWS
from .__version__ import S_BUILD_DT, S_VERSION
from .stolen import surrogateescape from .stolen import surrogateescape
try:
import ctypes
except:
pass
try: try:
HAVE_SQLITE3 = True HAVE_SQLITE3 = True
import sqlite3 # pylint: disable=unused-import # typechk import sqlite3 # pylint: disable=unused-import # typechk
@@ -41,7 +47,7 @@ try:
from collections.abc import Callable, Iterable from collections.abc import Callable, Iterable
import typing import typing
from typing import Any, Generator, Optional, Protocol, Union from typing import Any, Generator, Optional, Pattern, Protocol, Union
class RootLogger(Protocol): class RootLogger(Protocol):
def __call__(self, src: str, msg: str, c: Union[int, str] = 0) -> None: def __call__(self, src: str, msg: str, c: Union[int, str] = 0) -> None:
@@ -78,8 +84,6 @@ else:
from urllib import quote # pylint: disable=no-name-in-module from urllib import quote # pylint: disable=no-name-in-module
from urllib import unquote # pylint: disable=no-name-in-module from urllib import unquote # pylint: disable=no-name-in-module
_: Any = (mp, BytesIO, quote, unquote)
__all__ = ["mp", "BytesIO", "quote", "unquote"]
try: try:
struct.unpack(b">i", b"idgi") struct.unpack(b">i", b"idgi")
@@ -208,6 +212,54 @@ REKOBO_KEY = {
REKOBO_LKEY = {k.lower(): v for k, v in REKOBO_KEY.items()} REKOBO_LKEY = {k.lower(): v for k, v in REKOBO_KEY.items()}
def py_desc() -> str:
interp = platform.python_implementation()
py_ver = ".".join([str(x) for x in sys.version_info])
ofs = py_ver.find(".final.")
if ofs > 0:
py_ver = py_ver[:ofs]
try:
bitness = struct.calcsize(b"P") * 8
except:
bitness = struct.calcsize("P") * 8
host_os = platform.system()
compiler = platform.python_compiler()
m = re.search(r"([0-9]+\.[0-9\.]+)", platform.version())
os_ver = m.group(1) if m else ""
return "{:>9} v{} on {}{} {} [{}]".format(
interp, py_ver, host_os, bitness, os_ver, compiler
)
try:
from sqlite3 import sqlite_version as SQLITE_VER
except:
SQLITE_VER = "(None)"
try:
from jinja2 import __version__ as JINJA_VER
except:
JINJA_VER = "(None)"
try:
from pyftpdlib.__init__ import __ver__ as PYFTPD_VER
except:
PYFTPD_VER = "(None)"
VERSIONS = "copyparty v{} ({})\n{}\n sqlite v{} | jinja v{} | pyftpd v{}".format(
S_VERSION, S_BUILD_DT, py_desc(), SQLITE_VER, JINJA_VER, PYFTPD_VER
)
_: Any = (mp, BytesIO, quote, unquote, SQLITE_VER, JINJA_VER, PYFTPD_VER)
__all__ = ["mp", "BytesIO", "quote", "unquote", "SQLITE_VER", "JINJA_VER", "PYFTPD_VER"]
class Cooldown(object): class Cooldown(object):
def __init__(self, maxage: float) -> None: def __init__(self, maxage: float) -> None:
self.maxage = maxage self.maxage = maxage
@@ -243,7 +295,7 @@ class _Unrecv(object):
undo any number of socket recv ops undo any number of socket recv ops
""" """
def __init__(self, s: socket.socket, log: Optional[NamedLogger]) -> None: def __init__(self, s: socket.socket, log: Optional["NamedLogger"]) -> None:
self.s = s self.s = s
self.log = log self.log = log
self.buf: bytes = b"" self.buf: bytes = b""
@@ -287,7 +339,7 @@ class _LUnrecv(object):
with expensive debug logging with expensive debug logging
""" """
def __init__(self, s: socket.socket, log: Optional[NamedLogger]) -> None: def __init__(self, s: socket.socket, log: Optional["NamedLogger"]) -> None:
self.s = s self.s = s
self.log = log self.log = log
self.buf = b"" self.buf = b""
@@ -662,7 +714,9 @@ def ren_open(
class MultipartParser(object): class MultipartParser(object):
def __init__(self, log_func: NamedLogger, sr: Unrecv, http_headers: dict[str, str]): def __init__(
self, log_func: "NamedLogger", sr: Unrecv, http_headers: dict[str, str]
):
self.sr = sr self.sr = sr
self.log = log_func self.log = log_func
self.headers = http_headers self.headers = http_headers
@@ -925,6 +979,24 @@ def gen_filekey(salt: str, fspath: str, fsize: int, inode: int) -> str:
).decode("ascii") ).decode("ascii")
def gen_filekey_dbg(
salt: str,
fspath: str,
fsize: int,
inode: int,
log: "NamedLogger",
log_ptn: Optional[Pattern[str]],
) -> str:
ret = gen_filekey(salt, fspath, fsize, inode)
assert log_ptn
if log_ptn.search(fspath):
t = "fk({}) salt({}) size({}) inode({}) fspath({})"
log(t.format(ret[:8], salt, fsize, inode, fspath))
return ret
def gencookie(k: str, v: str, dur: Optional[int]) -> str: def gencookie(k: str, v: str, dur: Optional[int]) -> str:
v = v.replace(";", "") v = v.replace(";", "")
if dur: if dur:
@@ -982,6 +1054,11 @@ def s2hms(s: float, optional_h: bool = False) -> str:
return "{}:{:02}:{:02}".format(h, m, s) return "{}:{:02}:{:02}".format(h, m, s)
def djoin(*paths: str) -> str:
"""joins without adding a trailing slash on blank args"""
return os.path.join(*[x for x in paths if x])
def uncyg(path: str) -> str: def uncyg(path: str) -> str:
if len(path) < 2 or not path.startswith("/"): if len(path) < 2 or not path.startswith("/"):
return path return path
@@ -1184,15 +1261,30 @@ def s3enc(mem_cur: "sqlite3.Cursor", rd: str, fn: str) -> tuple[str, str]:
def s3dec(rd: str, fn: str) -> tuple[str, str]: def s3dec(rd: str, fn: str) -> tuple[str, str]:
ret = [] return (
for v in [rd, fn]: w8b64dec(rd[2:]) if rd.startswith("//") else rd,
if v.startswith("//"): w8b64dec(fn[2:]) if fn.startswith("//") else fn,
ret.append(w8b64dec(v[2:])) )
# self.log("mojide [{}] {}".format(ret[-1], v[2:]))
else:
ret.append(v)
return ret[0], ret[1]
def db_ex_chk(log: "NamedLogger", ex: Exception, db_path: str) -> bool:
if str(ex) != "database is locked":
return False
thr = threading.Thread(target=lsof, args=(log, db_path))
thr.daemon = True
thr.start()
return True
def lsof(log: "NamedLogger", abspath: str) -> None:
try:
rc, so, se = runcmd([b"lsof", b"-R", fsenc(abspath)], timeout=5)
zs = (so.strip() + "\n" + se.strip()).strip()
log("lsof {} = {}\n{}".format(abspath, rc, zs), 3)
except:
log("lsof failed; " + min_ex(), 3)
def atomic_move(usrc: str, udst: str) -> None: def atomic_move(usrc: str, udst: str) -> None:
@@ -1207,6 +1299,24 @@ def atomic_move(usrc: str, udst: str) -> None:
os.rename(src, dst) os.rename(src, dst)
def get_df(abspath: str) -> tuple[Optional[int], Optional[int]]:
try:
# some fuses misbehave
if ANYWIN:
bfree = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW( # type: ignore
ctypes.c_wchar_p(abspath), None, None, ctypes.pointer(bfree)
)
return (bfree.value, None)
else:
sv = os.statvfs(fsenc(abspath))
free = sv.f_frsize * sv.f_bfree
total = sv.f_frsize * sv.f_blocks
return (free, total)
except:
return (None, None)
def read_socket(sr: Unrecv, total_size: int) -> Generator[bytes, None, None]: def read_socket(sr: Unrecv, total_size: int) -> Generator[bytes, None, None]:
remains = total_size remains = total_size
while remains > 0: while remains > 0:
@@ -1233,7 +1343,7 @@ def read_socket_unbounded(sr: Unrecv) -> Generator[bytes, None, None]:
def read_socket_chunked( def read_socket_chunked(
sr: Unrecv, log: Optional[NamedLogger] = None sr: Unrecv, log: Optional["NamedLogger"] = None
) -> Generator[bytes, None, None]: ) -> Generator[bytes, None, None]:
err = "upload aborted: expected chunk length, got [{}] |{}| instead" err = "upload aborted: expected chunk length, got [{}] |{}| instead"
while True: while True:
@@ -1311,7 +1421,7 @@ def hashcopy(
def sendfile_py( def sendfile_py(
log: NamedLogger, log: "NamedLogger",
lower: int, lower: int,
upper: int, upper: int,
f: typing.BinaryIO, f: typing.BinaryIO,
@@ -1339,7 +1449,7 @@ def sendfile_py(
def sendfile_kern( def sendfile_kern(
log: NamedLogger, log: "NamedLogger",
lower: int, lower: int,
upper: int, upper: int,
f: typing.BinaryIO, f: typing.BinaryIO,
@@ -1380,7 +1490,7 @@ def sendfile_kern(
def statdir( def statdir(
logger: Optional[RootLogger], scandir: bool, lstat: bool, top: str logger: Optional["RootLogger"], scandir: bool, lstat: bool, top: str
) -> Generator[tuple[str, os.stat_result], None, None]: ) -> Generator[tuple[str, os.stat_result], None, None]:
if lstat and ANYWIN: if lstat and ANYWIN:
lstat = False lstat = False
@@ -1423,7 +1533,7 @@ def statdir(
def rmdirs( def rmdirs(
logger: RootLogger, scandir: bool, lstat: bool, top: str, depth: int logger: "RootLogger", scandir: bool, lstat: bool, top: str, depth: int
) -> tuple[list[str], list[str]]: ) -> tuple[list[str], list[str]]:
"""rmdir all descendants, then self""" """rmdir all descendants, then self"""
if not os.path.isdir(fsenc(top)): if not os.path.isdir(fsenc(top)):
@@ -1644,7 +1754,7 @@ def retchk(
rc: int, rc: int,
cmd: Union[list[bytes], list[str]], cmd: Union[list[bytes], list[str]],
serr: str, serr: str,
logger: Optional[NamedLogger] = None, logger: Optional["NamedLogger"] = None,
color: Union[int, str] = 0, color: Union[int, str] = 0,
verbose: bool = False, verbose: bool = False,
) -> None: ) -> None:
@@ -1696,29 +1806,6 @@ def gzip_orig_sz(fn: str) -> int:
return sunpack(b"I", rv)[0] # type: ignore return sunpack(b"I", rv)[0] # type: ignore
def py_desc() -> str:
interp = platform.python_implementation()
py_ver = ".".join([str(x) for x in sys.version_info])
ofs = py_ver.find(".final.")
if ofs > 0:
py_ver = py_ver[:ofs]
try:
bitness = struct.calcsize(b"P") * 8
except:
bitness = struct.calcsize("P") * 8
host_os = platform.system()
compiler = platform.python_compiler()
m = re.search(r"([0-9]+\.[0-9\.]+)", platform.version())
os_ver = m.group(1) if m else ""
return "{:>9} v{} on {}{} {} [{}]".format(
interp, py_ver, host_os, bitness, os_ver, compiler
)
def align_tab(lines: list[str]) -> list[str]: def align_tab(lines: list[str]) -> list[str]:
rows = [] rows = []
ncols = 0 ncols = 0

View File

@@ -224,6 +224,7 @@ window.baguetteBox = (function () {
['space, P, K', 'video: play / pause'], ['space, P, K', 'video: play / pause'],
['U', 'video: seek 10sec back'], ['U', 'video: seek 10sec back'],
['P', 'video: seek 10sec ahead'], ['P', 'video: seek 10sec ahead'],
['0..9', 'video: seek 0%..90%'],
['M', 'video: toggle mute'], ['M', 'video: toggle mute'],
['V', 'video: toggle loop'], ['V', 'video: toggle loop'],
['C', 'video: toggle auto-next'], ['C', 'video: toggle auto-next'],
@@ -248,7 +249,7 @@ window.baguetteBox = (function () {
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing || modal.busy) if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing || modal.busy)
return; return;
var k = e.code + '', v = vid(); var k = e.code + '', v = vid(), pos = -1;
if (k == "ArrowLeft" || k == "KeyJ") if (k == "ArrowLeft" || k == "KeyJ")
showPreviousImage(); showPreviousImage();
@@ -264,6 +265,8 @@ window.baguetteBox = (function () {
playpause(); playpause();
else if (k == "KeyU" || k == "KeyO") else if (k == "KeyU" || k == "KeyO")
relseek(k == "KeyU" ? -10 : 10); relseek(k == "KeyU" ? -10 : 10);
else if (k.indexOf('Digit') === 0)
vid().currentTime = vid().duration * parseInt(k.slice(-1)) * 0.1;
else if (k == "KeyM" && v) { else if (k == "KeyM" && v) {
v.muted = vmute = !vmute; v.muted = vmute = !vmute;
mp_ctl(); mp_ctl();
@@ -696,18 +699,12 @@ window.baguetteBox = (function () {
showOverlay(index); showOverlay(index);
return true; return true;
} }
if (index < 0) {
if (options.animation)
bounceAnimation('left');
return false; if (index < 0)
} return bounceAnimation('left');
if (index >= imagesElements.length) {
if (options.animation)
bounceAnimation('right');
return false; if (index >= imagesElements.length)
} return bounceAnimation('right');
var v = vid(); var v = vid();
if (v) { if (v) {
@@ -890,10 +887,11 @@ window.baguetteBox = (function () {
} }
function bounceAnimation(direction) { function bounceAnimation(direction) {
slider.className = 'bounce-from-' + direction; slider.className = options.animation == 'slideIn' ? 'bounce-from-' + direction : 'eog';
setTimeout(function () { setTimeout(function () {
slider.className = ''; slider.className = '';
}, 400); }, 300);
return false;
} }
function updateOffset() { function updateOffset() {

View File

@@ -259,7 +259,7 @@ html.bz {
--bg-d2: #34384e; --bg-d2: #34384e;
--bg-d3: #34384e; --bg-d3: #34384e;
--row-alt: rgba(139, 150, 205, 0.06); --row-alt: #181a27;
--btn-bg: #202231; --btn-bg: #202231;
--btn-h-bg: #2d2f45; --btn-h-bg: #2d2f45;
@@ -309,7 +309,7 @@ html.c {
--a-gray: #0ae; --a-gray: #0ae;
--tab-alt: #6ef; --tab-alt: #6ef;
--row-alt: rgba(180,0,255,0.3); --row-alt: #47237d;
--scroll: #ff0; --scroll: #ff0;
--btn-fg: #fff; --btn-fg: #fff;
@@ -544,6 +544,9 @@ html.dy {
--tree-bg: #fff; --tree-bg: #fff;
--g-sel-bg: #000;
--g-fsel-bg: #444;
--g-fsel-ts: #000;
--g-fg: a; --g-fg: a;
--g-bg: a; --g-bg: a;
--g-b1: a; --g-b1: a;
@@ -707,6 +710,7 @@ html.y #files thead th {
#files td { #files td {
margin: 0; margin: 0;
padding: .3em .5em; padding: .3em .5em;
background: var(--bg);
} }
#files tr:nth-child(2n) td { #files tr:nth-child(2n) td {
background: var(--row-alt); background: var(--row-alt);
@@ -1595,9 +1599,6 @@ html.y #tree.nowrap .ntree a+a:hover {
margin: .7em 0 .7em .5em; margin: .7em 0 .7em .5em;
padding-left: .5em; padding-left: .5em;
} }
.opwide>div.fill {
display: block;
}
.opwide>div>div>a { .opwide>div>div>a {
line-height: 2em; line-height: 2em;
} }
@@ -1908,10 +1909,13 @@ html.y #bbox-overlay figcaption a {
transition: left .2s ease, transform .2s ease; transition: left .2s ease, transform .2s ease;
} }
.bounce-from-right { .bounce-from-right {
animation: bounceFromRight .4s ease-out; animation: bounceFromRight .3s ease-out;
} }
.bounce-from-left { .bounce-from-left {
animation: bounceFromLeft .4s ease-out; animation: bounceFromLeft .3s ease-out;
}
.eog {
animation: eog .2s;
} }
@keyframes bounceFromRight { @keyframes bounceFromRight {
0% {margin-left: 0} 0% {margin-left: 0}
@@ -1923,6 +1927,9 @@ html.y #bbox-overlay figcaption a {
50% {margin-left: 30px} 50% {margin-left: 30px}
100% {margin-left: 0} 100% {margin-left: 0}
} }
@keyframes eog {
0% {filter: brightness(1.5)}
}
#bbox-next, #bbox-next,
#bbox-prev { #bbox-prev {
top: 50%; top: 50%;
@@ -2244,6 +2251,7 @@ html.y #bbox-overlay figcaption a {
max-width: none; max-width: none;
} }
#u2tab td { #u2tab td {
word-wrap: break-word;
border: 1px solid rgba(128,128,128,0.8); border: 1px solid rgba(128,128,128,0.8);
border-width: 0 0px 1px 0; border-width: 0 0px 1px 0;
padding: .2em .3em; padding: .2em .3em;
@@ -2258,7 +2266,19 @@ html.y #bbox-overlay figcaption a {
#u2tab.up.ok td:nth-child(3), #u2tab.up.ok td:nth-child(3),
#u2tab.up.bz td:nth-child(3), #u2tab.up.bz td:nth-child(3),
#u2tab.up.q td:nth-child(3) { #u2tab.up.q td:nth-child(3) {
width: 19em; width: 18em;
}
@media (max-width: 65em) {
#u2tab {
font-size: .9em;
}
}
@media (max-width: 50em) {
#u2tab.up.ok td:nth-child(3),
#u2tab.up.bz td:nth-child(3),
#u2tab.up.q td:nth-child(3) {
width: 16em;
}
} }
#op_up2k.srch td.prog { #op_up2k.srch td.prog {
font-family: sans-serif; font-family: sans-serif;

View File

@@ -106,6 +106,7 @@ var Ls = {
"ct_thumb": "in icon view, toggle icons or thumbnails$NHotkey: T", "ct_thumb": "in icon view, toggle icons or thumbnails$NHotkey: T",
"ct_dots": "show hidden files (if server permits)", "ct_dots": "show hidden files (if server permits)",
"ct_dir1st": "sort folders before files",
"ct_readme": "show README.md in folder listings", "ct_readme": "show README.md in folder listings",
"cut_turbo": "the yolo button, you probably DO NOT want to enable this:$N$Nuse this if you were uploading a huge amount of files and had to restart for some reason, and want to continue the upload ASAP$N$Nthis replaces the hash-check with a simple <em>&quot;does this have the same filesize on the server?&quot;</em> so if the file contents are different it will NOT be uploaded$N$Nyou should turn this off when the upload is done, and then &quot;upload&quot; the same files again to let the client verify them", "cut_turbo": "the yolo button, you probably DO NOT want to enable this:$N$Nuse this if you were uploading a huge amount of files and had to restart for some reason, and want to continue the upload ASAP$N$Nthis replaces the hash-check with a simple <em>&quot;does this have the same filesize on the server?&quot;</em> so if the file contents are different it will NOT be uploaded$N$Nyou should turn this off when the upload is done, and then &quot;upload&quot; the same files again to let the client verify them",
@@ -146,7 +147,7 @@ var Ls = {
"mt_caac": "convert aac / m4a to opus\">aac", "mt_caac": "convert aac / m4a to opus\">aac",
"mt_coth": "convert all others (not mp3) to opus\">oth", "mt_coth": "convert all others (not mp3) to opus\">oth",
"mt_tint": "background level (0-100) on the seekbar$Nto make buffering less distracting", "mt_tint": "background level (0-100) on the seekbar$Nto make buffering less distracting",
"mt_eq": "enables the equalizer and gain control;$Nboost 0 = unmodified 100% volume$N$Nenabling the equalizer makes gapless albums fully gapless, so leave it on with all the values at zero if you care about that", "mt_eq": "enables the equalizer and gain control;$N$Nboost &lt;code&gt;0&lt;/code&gt; = standard 100% volume (unmodified)$N$Nwidth &lt;code&gt;1 &nbsp;&lt;/code&gt; = standard stereo (unmodified)$Nwidth &lt;code&gt;0.5&lt;/code&gt; = 50% left-right crossfeed$Nwidth &lt;code&gt;0 &nbsp;&lt;/code&gt; = mono$N$Nboost &lt;code&gt;-0.8&lt;/code&gt; &amp; width &lt;code&gt;10&lt;/code&gt; = vocal removal :^)$N$Nenabling the equalizer makes gapless albums fully gapless, so leave it on with all the values at zero (except width = 1) if you care about that",
"mb_play": "play", "mb_play": "play",
"mm_hashplay": "play this audio file?", "mm_hashplay": "play this audio file?",
@@ -308,9 +309,11 @@ var Ls = {
"u_upping": 'uploading', "u_upping": 'uploading',
"u_cuerr": "failed to upload chunk {0} of {1};\nprobably harmless, continuing\n\nfile: {2}", "u_cuerr": "failed to upload chunk {0} of {1};\nprobably harmless, continuing\n\nfile: {2}",
"u_cuerr2": "server rejected upload (chunk {0} of {1});\n\nfile: {2}\n\nerror ", "u_cuerr2": "server rejected upload (chunk {0} of {1});\n\nfile: {2}\n\nerror ",
"u_ehstmp": "will retry; see bottom-right",
"u_ehsfin": "server rejected the request to finalize upload", "u_ehsfin": "server rejected the request to finalize upload",
"u_ehssrch": "server rejected the request to perform search", "u_ehssrch": "server rejected the request to perform search",
"u_ehsinit": "server rejected the request to initiate upload", "u_ehsinit": "server rejected the request to initiate upload",
"u_ehsdf": "server ran out of disk space!\n\nwill keep retrying, in case someone\nfrees up enough space to continue",
"u_s404": "not found on server", "u_s404": "not found on server",
"u_expl": "explain", "u_expl": "explain",
"u_tu": '<p class="warn">WARNING: turbo enabled, <span>&nbsp;client may not detect and resume incomplete uploads; see turbo-button tooltip</span></p>', "u_tu": '<p class="warn">WARNING: turbo enabled, <span>&nbsp;client may not detect and resume incomplete uploads; see turbo-button tooltip</span></p>',
@@ -437,6 +440,7 @@ var Ls = {
"ct_thumb": "vis miniatyrbilder istedenfor ikoner$NSnarvei: T", "ct_thumb": "vis miniatyrbilder istedenfor ikoner$NSnarvei: T",
"ct_dots": "vis skjulte filer (gitt at serveren tillater det)", "ct_dots": "vis skjulte filer (gitt at serveren tillater det)",
"ct_dir1st": "sorter slik at mapper kommer foran filer",
"ct_readme": "vis README.md nedenfor filene", "ct_readme": "vis README.md nedenfor filene",
"cut_turbo": "forenklet befaring ved opplastning; bør sannsynlig <em>ikke</em> skrus på:$N$Nnyttig dersom du var midt i en svær opplastning som måtte restartes av en eller annen grunn, og du vil komme igang igjen så raskt som overhodet mulig.$N$Nnår denne er skrudd på så forenkles befaringen kraftig; istedenfor å utføre en trygg sjekk på om filene finnes på serveren i god stand, så sjekkes kun om <em>filstørrelsen</em> stemmer. Så dersom en korrupt fil skulle befinne seg på serveren allerede, på samme sted med samme størrelse og navn, så blir det <em>ikke oppdaget</em>.$N$Ndet anbefales å kun benytte denne funksjonen for å komme seg raskt igjennom selve opplastningen, for så å skru den av, og til slutt &quot;laste opp&quot; de samme filene én gang til -- slik at integriteten kan verifiseres", "cut_turbo": "forenklet befaring ved opplastning; bør sannsynlig <em>ikke</em> skrus på:$N$Nnyttig dersom du var midt i en svær opplastning som måtte restartes av en eller annen grunn, og du vil komme igang igjen så raskt som overhodet mulig.$N$Nnår denne er skrudd på så forenkles befaringen kraftig; istedenfor å utføre en trygg sjekk på om filene finnes på serveren i god stand, så sjekkes kun om <em>filstørrelsen</em> stemmer. Så dersom en korrupt fil skulle befinne seg på serveren allerede, på samme sted med samme størrelse og navn, så blir det <em>ikke oppdaget</em>.$N$Ndet anbefales å kun benytte denne funksjonen for å komme seg raskt igjennom selve opplastningen, for så å skru den av, og til slutt &quot;laste opp&quot; de samme filene én gang til -- slik at integriteten kan verifiseres",
@@ -477,7 +481,7 @@ var Ls = {
"mt_caac": "konverter aac / m4a-filer til to opus\">aac", "mt_caac": "konverter aac / m4a-filer til to opus\">aac",
"mt_coth": "konverter alt annet (men ikke mp3) til opus\">andre", "mt_coth": "konverter alt annet (men ikke mp3) til opus\">andre",
"mt_tint": "nivå av bakgrunnsfarge på søkestripa (0-100),$Ngjør oppdateringer mindre distraherende", "mt_tint": "nivå av bakgrunnsfarge på søkestripa (0-100),$Ngjør oppdateringer mindre distraherende",
"mt_eq": "aktiver tonekontroll og forsterker;$Nboost 0 = normal volumskala$N$Nreduserer også dødtid imellom sangfiler", "mt_eq": "aktiver tonekontroll og forsterker;$N$Nboost &lt;code&gt;0&lt;/code&gt; = normal volumskala$N$Nwidth &lt;code&gt;1 &nbsp;&lt;/code&gt; = normal stereo$Nwidth &lt;code&gt;0.5&lt;/code&gt; = 50% blanding venstre-høyre$Nwidth &lt;code&gt;0 &nbsp;&lt;/code&gt; = mono$N$Nboost &lt;code&gt;-0.8&lt;/code&gt; &amp; width &lt;code&gt;10&lt;/code&gt; = instrumental :^)$N$Nreduserer også dødtid imellom sangfiler",
"mb_play": "lytt", "mb_play": "lytt",
"mm_hashplay": "spill denne sangen?", "mm_hashplay": "spill denne sangen?",
@@ -639,9 +643,11 @@ var Ls = {
"u_upping": 'sender', "u_upping": 'sender',
"u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert harmløst, fortsetter\n\nfil: {2}", "u_cuerr": "kunne ikke laste opp del {0} av {1};\nsikkert harmløst, fortsetter\n\nfil: {2}",
"u_cuerr2": "server nektet opplastningen (del {0} av {1});\n\nfile: {2}\n\nerror ", "u_cuerr2": "server nektet opplastningen (del {0} av {1});\n\nfile: {2}\n\nerror ",
"u_ehstmp": "prøver igjen; se mld nederst",
"u_ehsfin": "server nektet forespørselen om å ferdigstille filen", "u_ehsfin": "server nektet forespørselen om å ferdigstille filen",
"u_ehssrch": "server nektet forespørselen om å utføre søk", "u_ehssrch": "server nektet forespørselen om å utføre søk",
"u_ehsinit": "server nektet forespørselen om å begynne en ny opplastning", "u_ehsinit": "server nektet forespørselen om å begynne en ny opplastning",
"u_ehsdf": "serveren er full!\n\nprøver igjen regelmessig,\ni tilfelle noen rydder litt...",
"u_s404": "ikke funnet på serveren", "u_s404": "ikke funnet på serveren",
"u_expl": "forklar", "u_expl": "forklar",
"u_tu": '<p class="warn">ADVARSEL: turbo er på, <span>&nbsp;avbrutte opplastninger vil muligens ikke oppdages og gjenopptas; hold musepekeren over turbo-knappen for mer info</span></p>', "u_tu": '<p class="warn">ADVARSEL: turbo er på, <span>&nbsp;avbrutte opplastninger vil muligens ikke oppdages og gjenopptas; hold musepekeren over turbo-knappen for mer info</span></p>',
@@ -818,6 +824,7 @@ ebi('op_cfg').innerHTML = (
' <a id="griden" class="tgl btn" href="#" tt="' + L.wt_grid + '">田 the grid</a>\n' + ' <a id="griden" class="tgl btn" href="#" tt="' + L.wt_grid + '">田 the grid</a>\n' +
' <a id="thumbs" class="tgl btn" href="#" tt="' + L.ct_thumb + '">🖼️ thumbs</a>\n' + ' <a id="thumbs" class="tgl btn" href="#" tt="' + L.ct_thumb + '">🖼️ thumbs</a>\n' +
' <a id="dotfiles" class="tgl btn" href="#" tt="' + L.ct_dots + '">dotfiles</a>\n' + ' <a id="dotfiles" class="tgl btn" href="#" tt="' + L.ct_dots + '">dotfiles</a>\n' +
' <a id="dir1st" class="tgl btn" href="#" tt="' + L.ct_dir1st + '">📁 first</a>\n' +
' <a id="ireadme" class="tgl btn" href="#" tt="' + L.ct_readme + '">📜 readme</a>\n' + ' <a id="ireadme" class="tgl btn" href="#" tt="' + L.ct_readme + '">📜 readme</a>\n' +
' </div>\n' + ' </div>\n' +
'</div>\n' + '</div>\n' +
@@ -854,7 +861,7 @@ ebi('op_cfg').innerHTML = (
' </div>\n' + ' </div>\n' +
'</div>\n' + '</div>\n' +
'<div><h3>' + L.cl_keytype + '</h3><div id="key_notation"></div></div>\n' + '<div><h3>' + L.cl_keytype + '</h3><div id="key_notation"></div></div>\n' +
'<div class="fill"><h3>' + L.cl_hiddenc + ' <a href="#" id="hcolsr">' + L.cl_reset + '</h3><div id="hcols"></div></div>' '<div><h3>' + L.cl_hiddenc + ' <a href="#" id="hcolsr">' + L.cl_reset + '</h3><div id="hcols"></div></div>'
); );
@@ -1890,6 +1897,7 @@ var audio_eq = (function () {
"gains": [4, 3, 2, 1, 0, 0, 1, 2, 3, 4], "gains": [4, 3, 2, 1, 0, 0, 1, 2, 3, 4],
"filters": [], "filters": [],
"amp": 0, "amp": 0,
"chw": 1,
"last_au": null, "last_au": null,
"acst": {} "acst": {}
}; };
@@ -1941,6 +1949,7 @@ var audio_eq = (function () {
try { try {
r.amp = fcfg_get('au_eq_amp', r.amp); r.amp = fcfg_get('au_eq_amp', r.amp);
r.chw = fcfg_get('au_eq_chw', r.chw);
var gains = jread('au_eq_gain', r.gains); var gains = jread('au_eq_gain', r.gains);
if (r.gains.length == gains.length) if (r.gains.length == gains.length)
r.gains = gains; r.gains = gains;
@@ -1950,12 +1959,14 @@ var audio_eq = (function () {
r.draw = function () { r.draw = function () {
jwrite('au_eq_gain', r.gains); jwrite('au_eq_gain', r.gains);
swrite('au_eq_amp', r.amp); swrite('au_eq_amp', r.amp);
swrite('au_eq_chw', r.chw);
var txt = QSA('input.eq_gain'); var txt = QSA('input.eq_gain');
for (var a = 0; a < r.bands.length; a++) for (var a = 0; a < r.bands.length; a++)
txt[a].value = r.gains[a]; txt[a].value = r.gains[a];
QS('input.eq_gain[band="amp"]').value = r.amp; QS('input.eq_gain[band="amp"]').value = r.amp;
QS('input.eq_gain[band="chw"]').value = r.chw;
}; };
r.stop = function () { r.stop = function () {
@@ -2025,16 +2036,47 @@ var audio_eq = (function () {
for (var a = r.filters.length - 1; a >= 0; a--) for (var a = r.filters.length - 1; a >= 0; a--)
r.filters[a].connect(a > 0 ? r.filters[a - 1] : actx.destination); r.filters[a].connect(a > 0 ? r.filters[a - 1] : actx.destination);
if (Math.round(r.chw * 25) != 25) {
var split = actx.createChannelSplitter(2),
merge = actx.createChannelMerger(2),
lg1 = actx.createGain(),
lg2 = actx.createGain(),
rg1 = actx.createGain(),
rg2 = actx.createGain(),
vg1 = 1 - (1 - r.chw) / 2,
vg2 = 1 - vg1;
console.log('chw', vg1, vg2);
merge.connect(r.filters[r.filters.length - 1]);
lg1.gain.value = rg2.gain.value = vg1;
lg2.gain.value = rg1.gain.value = vg2;
lg1.connect(merge, 0, 0);
rg1.connect(merge, 0, 0);
lg2.connect(merge, 0, 1);
rg2.connect(merge, 0, 1);
split.connect(lg1, 0);
split.connect(lg2, 0);
split.connect(rg1, 1);
split.connect(rg2, 1);
r.filters.push(split);
mp.acs.channelCountMode = 'explicit';
}
mp.acs.connect(r.filters[r.filters.length - 1]); mp.acs.connect(r.filters[r.filters.length - 1]);
} }
function eq_step(e) { function eq_step(e) {
ev(e); ev(e);
var band = parseInt(this.getAttribute('band')), var sb = this.getAttribute('band'),
band = parseInt(sb),
step = parseFloat(this.getAttribute('step')); step = parseFloat(this.getAttribute('step'));
if (isNaN(band)) if (sb == 'amp')
r.amp = Math.round((r.amp + step * 0.2) * 100) / 100; r.amp = Math.round((r.amp + step * 0.2) * 100) / 100;
else if (sb == 'chw')
r.chw = Math.round((r.chw + step * 0.2) * 100) / 100;
else else
r.gains[band] += step; r.gains[band] += step;
@@ -2044,15 +2086,18 @@ var audio_eq = (function () {
function adj_band(that, step) { function adj_band(that, step) {
var err = false; var err = false;
try { try {
var band = parseInt(that.getAttribute('band')), var sb = that.getAttribute('band'),
band = parseInt(sb),
vs = that.value, vs = that.value,
v = parseFloat(vs); v = parseFloat(vs);
if (isNaN(v) || v + '' != vs) if (isNaN(v) || v + '' != vs)
throw new Error('inval band'); throw new Error('inval band');
if (isNaN(band)) if (sb == 'amp')
r.amp = Math.round((v + step * 0.2) * 100) / 100; r.amp = Math.round((v + step * 0.2) * 100) / 100;
else if (sb == 'chw')
r.chw = Math.round((v + step * 0.2) * 100) / 100;
else else
r.gains[band] = v + step; r.gains[band] = v + step;
@@ -2089,6 +2134,7 @@ var audio_eq = (function () {
vs.push([a, hz, r.gains[a]]); vs.push([a, hz, r.gains[a]]);
} }
vs.push(["amp", "boost", r.amp]); vs.push(["amp", "boost", r.amp]);
vs.push(["chw", "width", r.chw]);
for (var a = 0; a < vs.length; a++) { for (var a = 0; a < vs.length; a++) {
var b = vs[a][0]; var b = vs[a][0];
@@ -2423,7 +2469,7 @@ function eval_hash() {
if (a) if (a)
QS(treectl.hidden ? '#path a:nth-last-child(2)' : '#treeul a.hl').focus(); QS(treectl.hidden ? '#path a:nth-last-child(2)' : '#treeul a.hl').focus();
else else
QS(thegrid.en ? '#ggrid a' : '#files tbody a').focus(); QS(thegrid.en ? '#ggrid a' : '#files tbody tr[tabindex]').focus();
}; };
})(a); })(a);
@@ -2436,7 +2482,8 @@ function sortfiles(nodes) {
if (!nodes.length) if (!nodes.length)
return nodes; return nodes;
var sopts = jread('fsort', [["href", 1, ""]]); var sopts = jread('fsort', [["href", 1, ""]]),
dir1st = sread('dir1st') !== '0';
try { try {
var is_srch = false; var is_srch = false;
@@ -2467,14 +2514,10 @@ function sortfiles(nodes) {
if ((v + '').indexOf('<a ') === 0) if ((v + '').indexOf('<a ') === 0)
v = v.split('>')[1]; v = v.split('>')[1];
else if (name == "href" && v) { else if (name == "href" && v)
if (v.split('?')[0].slice(-1) == '/')
v = '\t' + v;
v = uricom_dec(v)[0]; v = uricom_dec(v)[0];
}
nodes[b]._sv = v; nodes[b]._sv = v
} }
} }
@@ -2503,6 +2546,13 @@ function sortfiles(nodes) {
if (is_srch) if (is_srch)
delete nodes[b].ext; delete nodes[b].ext;
} }
if (dir1st) {
var r1 = [], r2 = [];
for (var b = 0, bb = nodes.length; b < bb; b++)
(nodes[b].href.split('?')[0].slice(-1) == '/' ? r1 : r2).push(nodes[b]);
nodes = r1.concat(r2);
}
} }
catch (ex) { catch (ex) {
console.log("failed to apply sort config: " + ex); console.log("failed to apply sort config: " + ex);
@@ -3955,6 +4005,9 @@ document.onkeydown = function (e) {
} }
} }
if (k == 'Enter' && ae && (ae.onclick || ae.hasAttribute('tabIndex')))
return ev(e) && ae.click() || true;
if (aet && aet != 'a' && aet != 'tr' && aet != 'pre') if (aet && aet != 'a' && aet != 'tr' && aet != 'pre')
return; return;
@@ -4393,6 +4446,9 @@ var treectl = (function () {
bcfg_bind(r, 'dots', 'dotfiles', false, function (v) { bcfg_bind(r, 'dots', 'dotfiles', false, function (v) {
r.goto(get_evpath()); r.goto(get_evpath());
}); });
bcfg_bind(r, 'dir1st', 'dir1st', true, function (v) {
treectl.gentab(get_evpath(), treectl.lsc);
});
setwrap(bcfg_bind(r, 'wtree', 'wraptree', true, setwrap)); setwrap(bcfg_bind(r, 'wtree', 'wraptree', true, setwrap));
setwrap(bcfg_bind(r, 'parpane', 'parpane', true, onscroll)); setwrap(bcfg_bind(r, 'parpane', 'parpane', true, onscroll));
bcfg_bind(r, 'htree', 'hovertree', false, reload_tree); bcfg_bind(r, 'htree', 'hovertree', false, reload_tree);
@@ -4839,6 +4895,7 @@ var treectl = (function () {
} }
r.gentab = function (top, res) { r.gentab = function (top, res) {
r.lsc = res;
var nodes = res.dirs.concat(res.files), var nodes = res.dirs.concat(res.files),
html = mk_files_header(res.taglist), html = mk_files_header(res.taglist),
seen = {}; seen = {};
@@ -4851,7 +4908,6 @@ var treectl = (function () {
bhref = tn.href.split('?')[0], bhref = tn.href.split('?')[0],
fname = uricom_dec(bhref)[0], fname = uricom_dec(bhref)[0],
hname = esc(fname), hname = esc(fname),
sortv = (bhref.slice(-1) == '/' ? '\t' : '') + hname,
id = 'f-' + ('00000000' + crc32(fname)).slice(-8), id = 'f-' + ('00000000' + crc32(fname)).slice(-8),
lang = showfile.getlang(fname); lang = showfile.getlang(fname);
@@ -4866,8 +4922,8 @@ var treectl = (function () {
tn.lead = '<a href="?doc=' + tn.href + '" class="doc' + (lang ? ' bri' : '') + tn.lead = '<a href="?doc=' + tn.href + '" class="doc' + (lang ? ' bri' : '') +
'" hl="' + id + '" name="' + hname + '">-txt-</a>'; '" hl="' + id + '" name="' + hname + '">-txt-</a>';
var ln = ['<tr><td>' + tn.lead + '</td><td sortv="' + sortv + var ln = ['<tr><td>' + tn.lead + '</td><td><a href="' +
'"><a href="' + top + tn.href + '" id="' + id + '">' + hname + '</a>', tn.sz]; top + tn.href + '" id="' + id + '">' + hname + '</a>', tn.sz];
for (var b = 0; b < res.taglist.length; b++) { for (var b = 0; b < res.taglist.length; b++) {
var k = res.taglist[b], var k = res.taglist[b],

View File

@@ -36,6 +36,7 @@
<tr><td>hash-q</td><td>{{ hashq }}</td></tr> <tr><td>hash-q</td><td>{{ hashq }}</td></tr>
<tr><td>tag-q</td><td>{{ tagq }}</td></tr> <tr><td>tag-q</td><td>{{ tagq }}</td></tr>
<tr><td>mtp-q</td><td>{{ mtpq }}</td></tr> <tr><td>mtp-q</td><td>{{ mtpq }}</td></tr>
<tr><td>db-act</td><td id="u">{{ dbwt }}</td></tr>
</table> </table>
</td><td> </td><td>
<table class="vols"> <table class="vols">
@@ -50,8 +51,8 @@
</table> </table>
</td></tr></table> </td></tr></table>
<div class="btns"> <div class="btns">
<a id="d" href="/?stack" tt="shows the state of all active threads">dump stack</a> <a id="d" href="/?stack">dump stack</a>
<a id="e" href="/?reload=cfg" tt="reload config files (accounts/volumes/volflags),$Nand rescan all e2ds volumes">reload cfg</a> <a id="e" href="/?reload=cfg">reload cfg</a>
</div> </div>
{%- endif %} {%- endif %}

View File

@@ -23,6 +23,12 @@ var Ls = {
"r1": "gå hjem", "r1": "gå hjem",
".s1": "kartlegg", ".s1": "kartlegg",
"t1": "handling", "t1": "handling",
"u2": "tid siden noen sist skrev til serveren$N( opplastning / navneendring / ... )$N$N17d = 17 dager$N1h23 = 1 time 23 minutter$N4m56 = 4 minuter 56 sekunder",
},
"eng": {
"d2": "shows the state of all active threads",
"e2": "reload config files (accounts/volumes/volflags),$Nand rescan all e2ds volumes",
"u2": "time since the last server write$N( upload / rename / ... )$N$N17d = 17 days$N1h23 = 1 hour 23 minutes$N4m56 = 4 minutes 56 seconds",
} }
}, },
d = Ls[sread("lang") || lang]; d = Ls[sread("lang") || lang];
@@ -40,5 +46,10 @@ for (var k in (d || {})) {
} }
tt.init(); tt.init();
if (!ebi('c')) var o = QS('input[name="cppwd"]');
QS('input[name="cppwd"]').focus(); if (!ebi('c') && o.offsetTop + o.offsetHeight < window.innerHeight)
o.focus();
o = ebi('u');
if (o && /[0-9]+$/.exec(o.innerHTML))
o.innerHTML = shumantime(o.innerHTML);

View File

@@ -205,7 +205,7 @@ function U2pvis(act, btns, uc, st) {
if (!r.is_act(fo.in)) if (!r.is_act(fo.in))
return; return;
var k = 'f{0}{1}'.format(nfile, field.slice(1)), var k = 'f' + nfile + '' + field.slice(1),
obj = ebi(k); obj = ebi(k);
obj.innerHTML = field == 'ht' ? (markup[html] || html) : html; obj.innerHTML = field == 'ht' ? (markup[html] || html) : html;
@@ -250,9 +250,7 @@ function U2pvis(act, btns, uc, st) {
nb = fo.bt * (++fo.nh / fo.cb.length), nb = fo.bt * (++fo.nh / fo.cb.length),
p = r.perc(nb, 0, fobj.size, fobj.t_hashing); p = r.perc(nb, 0, fobj.size, fobj.t_hashing);
fo.hp = '{0}%, {1}, {2} MB/s'.format( fo.hp = f2f(p[0], 2) + '%, ' + p[1] + ', ' + f2f(p[2], 2) + ' MB/s';
f2f(p[0], 2), p[1], f2f(p[2], 2)
);
if (!r.is_act(fo.in)) if (!r.is_act(fo.in))
return; return;
@@ -269,14 +267,12 @@ function U2pvis(act, btns, uc, st) {
fo.bd += delta; fo.bd += delta;
var p = r.perc(fo.bd, fo.bd0, fo.bt, fobj.t_uploading); var p = r.perc(fo.bd, fo.bd0, fo.bt, fobj.t_uploading);
fo.hp = '{0}%, {1}, {2} MB/s'.format( fo.hp = f2f(p[0], 2) + '%, ' + p[1] + ', ' + f2f(p[2], 2) + ' MB/s';
f2f(p[0], 2), p[1], f2f(p[2], 2)
);
if (!r.is_act(fo.in)) if (!r.is_act(fo.in))
return; return;
var obj = ebi('f{0}p'.format(fobj.n)), var obj = ebi('f' + fobj.n + 'p'),
o1 = p[0] - 2, o2 = p[0] - 0.1, o3 = p[0]; o1 = p[0] - 2, o2 = p[0] - 0.1, o3 = p[0];
if (!obj) { if (!obj) {
@@ -446,8 +442,8 @@ function U2pvis(act, btns, uc, st) {
r.npotato = 0; r.npotato = 0;
var html = [ var html = [
"<p>files: &nbsp; <b>{0}</b> finished, &nbsp; <b>{1}</b> failed, &nbsp; <b>{2}</b> busy, &nbsp; <b>{3}</b> queued</p>".format(r.ctr.ok, r.ctr.ng, r.ctr.bz, r.ctr.q), "<p>files: &nbsp; <b>{0}</b> finished, &nbsp; <b>{1}</b> failed, &nbsp; <b>{2}</b> busy, &nbsp; <b>{3}</b> queued</p>".format(
]; r.ctr.ok, r.ctr.ng, r.ctr.bz, r.ctr.q)];
while (r.head < r.tab.length && has(["ok", "ng"], r.tab[r.head].in)) while (r.head < r.tab.length && has(["ok", "ng"], r.tab[r.head].in))
r.head++; r.head++;
@@ -457,7 +453,8 @@ function U2pvis(act, btns, uc, st) {
act = r.tab[r.head]; act = r.tab[r.head];
if (act) if (act)
html.push("<p>file {0} of {1} : &nbsp; {2} &nbsp; <code>{3}</code></p>\n<div>{4}</div>".format(r.head + 1, r.tab.length, act.ht, act.hp, act.hn)); html.push("<p>file {0} of {1} : &nbsp; {2} &nbsp; <code>{3}</code></p>\n<div>{4}</div>".format(
r.head + 1, r.tab.length, act.ht, act.hp, act.hn));
html = html.join('\n'); html = html.join('\n');
if (r.hpotato == html) if (r.hpotato == html)
@@ -470,7 +467,7 @@ function U2pvis(act, btns, uc, st) {
function apply_html() { function apply_html() {
var oq = {}, n = 0; var oq = {}, n = 0;
for (var k in r.hq) { for (var k in r.hq) {
var o = ebi('f{0}p'.format(k)); var o = ebi('f' + k + 'p');
if (!o) if (!o)
continue; continue;
@@ -682,8 +679,8 @@ function Donut(uc, st) {
} }
if (++r.tc >= 10) { if (++r.tc >= 10) {
wintitle("{0}%, {1}s, #{2}, ".format( wintitle("{0}%, {1}, #{2}, ".format(
f2f(v * 100 / t, 1), r.eta, st.files.length - st.nfile.upload), true); f2f(v * 100 / t, 1), shumantime(r.eta), st.files.length - st.nfile.upload), true);
r.tc = 0; r.tc = 0;
} }
@@ -835,6 +832,11 @@ function up2k_init(subtle) {
"uploading": 0, "uploading": 0,
"busy": 0 "busy": 0
}, },
"eta": {
"h": "",
"u": "",
"t": ""
},
"car": 0, "car": 0,
"modn": 0, "modn": 0,
"modv": 0, "modv": 0,
@@ -919,8 +921,14 @@ function up2k_init(subtle) {
catch (ex) { } catch (ex) { }
ev(e); ev(e);
try {
e.dataTransfer.dropEffect = 'copy'; e.dataTransfer.dropEffect = 'copy';
e.dataTransfer.effectAllowed = 'copy'; e.dataTransfer.effectAllowed = 'copy';
}
catch (ex) {
document.body.ondragenter = document.body.ondragleave = document.body.ondragover = null;
return modal.alert('your browser does not support drag-and-drop uploading');
}
clmod(ebi('drops'), 'vis', 1); clmod(ebi('drops'), 'vis', 1);
var v = this.getAttribute('v'); var v = this.getAttribute('v');
if (v) if (v)
@@ -1278,12 +1286,21 @@ function up2k_init(subtle) {
ebi('u2tabw').style.minHeight = utw_minh + 'px'; ebi('u2tabw').style.minHeight = utw_minh + 'px';
} }
if (!nhash) if (!nhash) {
ebi('u2etah').innerHTML = L.u_etadone.format(humansize(st.bytes.hashed), pvis.ctr.ok + pvis.ctr.ng); var h = L.u_etadone.format(humansize(st.bytes.hashed), pvis.ctr.ok + pvis.ctr.ng);
if (st.eta.h !== h)
st.eta.h = ebi('u2etah').innerHTML = h;
}
if (!nsend && !nhash) if (!nsend && !nhash) {
ebi('u2etau').innerHTML = ebi('u2etat').innerHTML = ( var h = L.u_etadone.format(humansize(st.bytes.uploaded), pvis.ctr.ok + pvis.ctr.ng);
L.u_etadone.format(humansize(st.bytes.uploaded), pvis.ctr.ok + pvis.ctr.ng));
if (st.eta.u !== h)
st.eta.u = ebi('u2etau').innerHTML = h;
if (st.eta.t !== h)
st.eta.t = ebi('u2etat').innerHTML = h;
}
if (!st.busy.hash.length && !hashing_permitted()) if (!st.busy.hash.length && !hashing_permitted())
nhash = 0; nhash = 0;
@@ -1314,19 +1331,21 @@ function up2k_init(subtle) {
for (var a = 0; a < t.length; a++) { for (var a = 0; a < t.length; a++) {
var rem = st.bytes.total - t[a][2], var rem = st.bytes.total - t[a][2],
bps = t[a][1] / t[a][3], bps = t[a][1] / t[a][3],
hid = t[a][0],
eid = hid.slice(-1),
eta = Math.floor(rem / bps); eta = Math.floor(rem / bps);
if (t[a][1] < 1024 || t[a][3] < 0.1) { if (t[a][1] < 1024 || t[a][3] < 0.1) {
ebi(t[a][0]).innerHTML = L.u_etaprep; ebi(hid).innerHTML = L.u_etaprep;
continue; continue;
} }
donut.eta = eta; donut.eta = eta;
if (etaskip) st.eta[eid] = '{0}, {1}/s, {2}'.format(
continue;
ebi(t[a][0]).innerHTML = '{0}, {1}/s, {2}'.format(
humansize(rem), humansize(bps, 1), humantime(eta)); humansize(rem), humansize(bps, 1), humantime(eta));
if (!etaskip)
ebi(hid).innerHTML = st.eta[eid];
} }
if (++etaskip > 2) if (++etaskip > 2)
etaskip = 0; etaskip = 0;
@@ -1356,6 +1375,10 @@ function up2k_init(subtle) {
st.busy.handshake.length) st.busy.handshake.length)
return false; return false;
if (t.n - st.car > 8)
// prevent runahead from a stuck upload (slow server hdd)
return false;
if ((uc.multitask ? 1 : 0) < if ((uc.multitask ? 1 : 0) <
st.todo.upload.length + st.todo.upload.length +
st.busy.upload.length) st.busy.upload.length)
@@ -1977,6 +2000,9 @@ function up2k_init(subtle) {
tasker(); tasker();
} }
else { else {
pvis.seth(t.n, 1, "ERROR");
pvis.seth(t.n, 2, L.u_ehstmp);
var err = "", var err = "",
rsp = (xhr.responseText + ''), rsp = (xhr.responseText + ''),
ofs = rsp.lastIndexOf('\nURL: '); ofs = rsp.lastIndexOf('\nURL: ');
@@ -2011,6 +2037,9 @@ function up2k_init(subtle) {
t.want_recheck = true; t.want_recheck = true;
} }
} }
if (rsp.indexOf('server HDD is full') + 1)
return toast.err(0, L.u_ehsdf + "\n\n" + rsp.replace(/.*; /, ''));
if (err != "") { if (err != "") {
pvis.seth(t.n, 1, "ERROR"); pvis.seth(t.n, 1, "ERROR");
pvis.seth(t.n, 2, err); pvis.seth(t.n, 2, err);
@@ -2135,8 +2164,9 @@ function up2k_init(subtle) {
xhr.open('POST', t.purl, true); xhr.open('POST', t.purl, true);
xhr.setRequestHeader("X-Up2k-Hash", t.hash[npart]); xhr.setRequestHeader("X-Up2k-Hash", t.hash[npart]);
xhr.setRequestHeader("X-Up2k-Wark", t.wark); xhr.setRequestHeader("X-Up2k-Wark", t.wark);
xhr.setRequestHeader("X-Up2k-Stat", "{0}/{1}/{2}/{3} {4}/{5}".format( xhr.setRequestHeader("X-Up2k-Stat", "{0}/{1}/{2}/{3} {4}/{5} {6}".format(
pvis.ctr.ok, pvis.ctr.ng, pvis.ctr.bz, pvis.ctr.q, btot, btot - bfin)); pvis.ctr.ok, pvis.ctr.ng, pvis.ctr.bz, pvis.ctr.q, btot, btot - bfin,
st.eta.t.split(' ').pop()));
xhr.setRequestHeader('Content-Type', 'application/octet-stream'); xhr.setRequestHeader('Content-Type', 'application/octet-stream');
if (xhr.overrideMimeType) if (xhr.overrideMimeType)
xhr.overrideMimeType('Content-Type', 'application/octet-stream'); xhr.overrideMimeType('Content-Type', 'application/octet-stream');

View File

@@ -7,6 +7,7 @@ if (!window['console'])
var wah = '', var wah = '',
HALFMAX = 8192 * 8192 * 8192 * 8192,
is_touch = 'ontouchstart' in window, is_touch = 'ontouchstart' in window,
is_https = (window.location + '').indexOf('https:') === 0, is_https = (window.location + '').indexOf('https:') === 0,
IPHONE = is_touch && /iPhone|iPad|iPod/i.test(navigator.userAgent), IPHONE = is_touch && /iPhone|iPad|iPod/i.test(navigator.userAgent),
@@ -459,6 +460,16 @@ function sortTable(table, col, cb) {
} }
return reverse * (a.localeCompare(b)); return reverse * (a.localeCompare(b));
}); });
if (sread('dir1st') !== '0') {
var r1 = [], r2 = [];
for (var i = 0; i < tr.length; i++) {
var cell = tr[vl[i][1]].cells[1],
href = cell.getAttribute('sortv') || cell.textContent.trim();
(href.split('?')[0].slice(-1) == '/' ? r1 : r2).push(vl[i]);
}
vl = r1.concat(r2);
}
for (i = 0; i < tr.length; ++i) tb.appendChild(tr[vl[i][1]]); for (i = 0; i < tr.length; ++i) tb.appendChild(tr[vl[i][1]]);
if (cb) cb(); if (cb) cb();
} }
@@ -642,7 +653,7 @@ function humansize(b, terse) {
function humantime(v) { function humantime(v) {
if (v >= 60 * 60 * 24) if (v >= 60 * 60 * 24)
return v; return shumantime(v);
try { try {
return /.*(..:..:..).*/.exec(new Date(v * 1000).toUTCString())[1]; return /.*(..:..:..).*/.exec(new Date(v * 1000).toUTCString())[1];
@@ -653,12 +664,39 @@ function humantime(v) {
} }
function shumantime(v) {
if (v < 10)
return f2f(v, 2) + 's';
if (v < 60)
return f2f(v, 1) + 's';
v = parseInt(v);
var st = [[60 * 60 * 24, 60 * 60, 'd'], [60 * 60, 60, 'h'], [60, 1, 'm']];
for (var a = 0; a < st.length; a++) {
var m1 = st[a][0],
m2 = st[a][1],
ch = st[a][2];
if (v < m1)
continue;
var v1 = parseInt(v / m1),
v2 = ('0' + parseInt((v % m1) / m2)).slice(-2);
return v1 + ch + (v1 >= 10 ? '' : v2);
}
}
function clamp(v, a, b) { function clamp(v, a, b) {
return Math.min(Math.max(v, a), b); return Math.min(Math.max(v, a), b);
} }
function has(haystack, needle) { function has(haystack, needle) {
try { return haystack.includes(needle); } catch (ex) { }
for (var a = 0; a < haystack.length; a++) for (var a = 0; a < haystack.length; a++)
if (haystack[a] == needle) if (haystack[a] == needle)
return true; return true;

View File

@@ -1,3 +1,133 @@
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-0727-1407 `v1.3.8` more async
* read-only demo server at https://a.ocv.me/pub/demo/
* latest gzip edition of the sfx: [v1.0.14](https://github.com/9001/copyparty/releases/tag/v1.0.14#:~:text=release-specific%20notes)
## new features
* new arg `--df 4` and volflag `:c,df=4g` to guarantee 4 GiB free disk space by rejecting uploads
* some features no longer block new uploads while they're processing
* `-e2v` file integrity checker
* `-e2ts` initial tag scanner
* hopefully fixes a [deadlock](https://www.youtube.com/watch?v=DkKoMveT_jo&t=3s) someone ran into (but probably doesn't)
* (the "deadlock" link is an addictive demoscene banger -- the actual issue is #10)
* reduced the impact of some features which still do
* defer `--re-maxage` reindexing if there was a write (upload/rename/...) recently
* `--db-act` sets minimum idle period before reindex can start (default 10sec)
* bbox / image-viewer: add video hotkeys 0..9 to seek 0%..90%
* audio-player: add audio crossfeed (left-right channel mixer / vocal isolation)
* splashpage (`/?h`) shows time since the most recent write
## bugfixes
* a11y:
* enter-key should always trigger onclick
* only focus password box if in-bounds
* improve skip-to-files
* prisonparty: volume labeling in root folders
* other minor stuff
* forget deleted shadowed files from the db
* be less noisy if a client disconnects mid-reply
* up2k.js less eager to thrash slow server HDDs
## other changes
* show client's upload ETA in server log
* dump stacks and issue `lsof` on the db if a transaction is stuck
* will hopefully help if there's any more deadlocks
* [up2k-hook-ytid](https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/up2k-hook-ytid.js) (the overengineered up2k.js plugin example) now has an mp4/webm/mkv metadata parser
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-0716-1848 `v1.3.7` faster
* read-only demo server at https://a.ocv.me/pub/demo/
* latest gzip edition of the sfx: [v1.0.14](https://github.com/9001/copyparty/releases/tag/v1.0.14#:~:text=release-specific%20notes)
## new features
* `up2k.js`: **improved upload speeds!**
* **...when there's many small files** (or the browser is slow)
* add [potato mode](https://user-images.githubusercontent.com/241032/179336639-8ecc01ea-2662-4cb6-8048-5be3ad599f33.png) -- lightweight UI for faster uploads from slow boxes
* enables automatically if it detects a cpu bottleneck (not very accurate)
* **...on really fast connections (LAN / fiber)**
* batch progress updates to reduce repaints
* **...when there is a mix of big and small files**
* sort the uploads by size, smallest first, for optimal cpu/network usage
* can be overridden to alphabetical order in the settings tab
* new arg `--u2sort` changes the default + overrides the override button
* improve upload pacing when alphabetical order is enabled
* mainly affecting single files that are 300 GiB +
* `up2k.js`: add [up2k hooks](https://github.com/9001/copyparty/blob/hovudstraum/contrib/plugins/up2k-hooks.js)
* specify *client-side* rules to reject files as they are dropped into the browser
* not a hard-reject since people can use [up2k.py](https://github.com/9001/copyparty/blob/hovudstraum/bin/up2k.py) and whatnot, more like a hint
* `up2k.py`: add file integrity checker
* new arg `-e2v` to scan volumes and verify file checksums on startup
* `-e2vu` updates the db on mismatch, `-e2vp` panics
* uploads are blocked while the scan is running -- might get fixed at some point
* for now it prints a warning
* bbox / image-viewer: doubletap a picture to enter fullscreen mode
* md-editor: `ctrl-c/x` affects current line if no selection, and `ctrl-e` is fullscreen
* tag-parser plugins:
* add support for passing metadata from one mtp to another (parser dependencies)
* the `p` flag in [vidchk](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/vidchk.py) usage makes it run after the base parser, eating its output
* add [rclone uploader](https://github.com/9001/copyparty/blob/hovudstraum/bin/mtag/rclone-upload.py) which optionally and by default depends on vidchk
## bugfixes
* sfx would crash if it got the same PID as recently (for example across two reboots)
* audio equalizer on recent chromes
* still can't figure out why chrome sometimes drops the mediasession
* bbox: don't attach click events to videos
* up2k.py:
* more sensible behavior w/ blank files
* avoid some extra directory scans when deleting files
* faster shutdown on `ctrl-c` during volume indexing
* warning from the thumbnail cleaner if the volume has no thumbnails
* `>fixing py2 support` `>2022`
## other changes
* up2k.js:
* sends a summary of the upload queue to [the server log](https://github.com/9001/copyparty#up2k)
* shows a toast while loading huge filedrops to indicate it's still alive
* sfx: disable guru meditation unless running on windows
* avoids hanging systemd on certain crashes
* logs the state of all threads if sqlite hits a timeout
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-0706-0029 `v1.3.5` sup cloudflare
* read-only demo server at https://a.ocv.me/pub/demo/
* latest gzip edition of the sfx: [v1.0.14](https://github.com/9001/copyparty/releases/tag/v1.0.14#:~:text=release-specific%20notes)
## new features
* detect + recover from cloudflare ddos-protection memes during upload
* while carefully avoiding any mention of "DDoS" in the JS because enterprise firewalls do not enjoy that
* new option `--favico` to specify a default favicon
* set to `🎉` by default, which also enables the fancy upload progress donut 👌
* baguettebox (image/video viewer):
* toolbar button `⛶` to enter fullscreen mode (same as hotkey `F`)
* tap middle of screen to show/hide toolbar
* tap left/right-side of pics to navigate prev/next
* hotkeys `[` and `]` to set A-B loop in videos
* and [URL parameters](https://a.ocv.me/pub/demo/pics-vids/#gf-e2e482ae&t=4.2-6) for that + [initial seekpoint](https://a.ocv.me/pub/demo/pics-vids/#gf-c04bb0f6&t=26s) (same as the audio player)
## bugfixes
* when a tag-parser hits the timeout, `pkill` all its descendants too
* and a [new mtp flag](https://github.com/9001/copyparty/#file-parser-plugins) to override that; `kt` (kill tree, default), `km` (kill main, old default), `kn` (kill none)
* cpu-wasting spin while waiting for the final handful of files to finish tag-scraping
* detection of sparse-files support inside [prisonparty](https://github.com/9001/copyparty/tree/hovudstraum/bin#prisonpartysh) and other strict jails
* baguettebox (image/video viewer):
* crash on swipe during close
* didn't reset terminal color at the end of `?ls=v`
* don't try to thumbnail empty files (harmless but dumb)
## other changes
* ux improvements
* hide the uploads table until something happens
* bump codemirror to 5.65.6
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
# 2022-0627-2057 `v1.3.3` sdcardfs # 2022-0627-2057 `v1.3.3` sdcardfs

10
docs/notes.md Normal file
View File

@@ -0,0 +1,10 @@
# up2k.js
## potato detection
* tsk 0.25/8.4/31.5 bzw 1.27/22.9/18 = 77% (38.4s, 49.7s)
* 4c locale #1313, ff-102,deb-11 @ ryzen4500u wifi -> win10
* profiling shows 2sec heavy gc every 2sec
* tsk 0.41/4.1/10 bzw 1.41/9.9/7 = 73% (13.3s, 18.2s)
* 4c locale #1313, ch-103,deb-11 @ ryzen4500u wifi -> win10

View File

@@ -185,7 +185,7 @@ brew install python@2
pip install virtualenv pip install virtualenv
# readme toc # readme toc
cat README.md | awk 'function pr() { if (!h) {return}; if (/^ *[*!#|]/||!s) {printf "%s\n",h;h=0;return}; if (/.../) {printf "%s - %s\n",h,$0;h=0}; }; /^#/{s=1;pr()} /^#* *(file indexing|install on android|dev env setup|just the sfx|complete release|optional gpl stuff)|`$/{s=0} /^#/{lv=length($1);sub(/[^ ]+ /,"");bab=$0;gsub(/ /,"-",bab); h=sprintf("%" ((lv-1)*4+1) "s [%s](#%s)", "*",$0,bab);next} !h{next} {sub(/ .*/,"");sub(/[:,]$/,"")} {pr()}' > toc; grep -E '^## readme toc' -B1000 -A2 <README.md >p1; grep -E '^## quickstart' -B2 -A999999 <README.md >p2; (cat p1; grep quickstart -A1000 <toc; cat p2) >README.md; rm p1 p2 toc cat README.md | awk 'function pr() { if (!h) {return}; if (/^ *[*!#|]/||!s) {printf "%s\n",h;h=0;return}; if (/.../) {printf "%s - %s\n",h,$0;h=0}; }; /^#/{s=1;pr()} /^#* *(install on android|dev env setup|just the sfx|complete release|optional gpl stuff)|`$/{s=0} /^#/{lv=length($1);sub(/[^ ]+ /,"");bab=$0;gsub(/ /,"-",bab); h=sprintf("%" ((lv-1)*4+1) "s [%s](#%s)", "*",$0,bab);next} !h{next} {sub(/ .*/,"");sub(/[:;,]$/,"")} {pr()}' > toc; grep -E '^## readme toc' -B1000 -A2 <README.md >p1; grep -E '^## quickstart' -B2 -A999999 <README.md >p2; (cat p1; grep quickstart -A1000 <toc; cat p2) >README.md; rm p1 p2 toc
# fix firefox phantom breakpoints, # fix firefox phantom breakpoints,
# suggestions from bugtracker, doesnt work (debugger is not attachable) # suggestions from bugtracker, doesnt work (debugger is not attachable)

View File

@@ -2,9 +2,9 @@ FROM alpine:3.16
WORKDIR /z WORKDIR /z
ENV ver_asmcrypto=5b994303a9d3e27e0915f72a10b6c2c51535a4dc \ ENV ver_asmcrypto=5b994303a9d3e27e0915f72a10b6c2c51535a4dc \
ver_hashwasm=4.9.0 \ ver_hashwasm=4.9.0 \
ver_marked=4.0.17 \ ver_marked=4.0.18 \
ver_mde=2.16.1 \ ver_mde=2.16.1 \
ver_codemirror=5.65.6 \ ver_codemirror=5.65.7 \
ver_fontawesome=5.13.0 \ ver_fontawesome=5.13.0 \
ver_zopfli=1.0.3 ver_zopfli=1.0.3

View File

@@ -26,6 +26,11 @@ help() { exec cat <<'EOF'
# (browsers will try to use 'Consolas' instead) # (browsers will try to use 'Consolas' instead)
# #
# `no-dd` saves ~2k by removing the mouse cursor # `no-dd` saves ~2k by removing the mouse cursor
#
# ---------------------------------------------------------------------
#
# if you are on windows, you can use msys2:
# PATH=/c/Users/$USER/AppData/Local/Programs/Python/Python310:"$PATH" ./make-sfx.sh fast
EOF EOF
} }
@@ -190,7 +195,7 @@ tmpdir="$(
done done
# remove type hints before build instead # remove type hints before build instead
(cd copyparty; python3 ../../scripts/strip_hints/a.py; rm uh) (cd copyparty; "$pybin" ../../scripts/strip_hints/a.py; rm uh)
} }
ver= ver=

View File

@@ -8,7 +8,7 @@ cmd = sys.argv[1]
if cmd == "cpp": if cmd == "cpp":
from copyparty.__main__ import main from copyparty.__main__ import main
argv = ["__main__", "-v", "srv::r", "-v", "../../yt:yt:r"] argv = ["__main__", "-vsrv::r:c,e2ds,e2ts"]
main(argv=argv) main(argv=argv)
elif cmd == "test": elif cmd == "test":
@@ -29,6 +29,6 @@ else:
# #
# python -m vmprof -o prof --lines ./scripts/profile.py test # python -m vmprof -o prof --lines ./scripts/profile.py test
# linux: ~/.local/bin/vmprofshow prof tree | grep -vF '[1m 0.' # linux: ~/.local/bin/vmprofshow prof tree | awk '$2>1{n=5} !n{next} 1;{n--} !n{print""}'
# macos: ~/Library/Python/3.9/bin/vmprofshow prof tree | grep -vF '[1m 0.' # macos: ~/Library/Python/3.9/bin/vmprofshow prof tree
# win: %appdata%\..\Roaming\Python\Python39\Scripts\vmprofshow.exe prof tree # win: %appdata%\..\Roaming\Python\Python39\Scripts\vmprofshow.exe prof tree

View File

@@ -0,0 +1,4 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
from Queue import Queue, LifoQueue, PriorityQueue, Empty, Full

View File

@@ -10,9 +10,10 @@ import pprint
import tarfile import tarfile
import tempfile import tempfile
import unittest import unittest
from argparse import Namespace
from tests import util as tu from tests import util as tu
from tests.util import Cfg
from copyparty.authsrv import AuthSrv from copyparty.authsrv import AuthSrv
from copyparty.httpcli import HttpCli from copyparty.httpcli import HttpCli
@@ -22,39 +23,6 @@ def hdr(query):
return h.format(query).encode("utf-8") return h.format(query).encode("utf-8")
class Cfg(Namespace):
def __init__(self, a=None, v=None, c=None):
ka = {}
ex = "e2d e2ds e2dsa e2t e2ts e2tsr ed emp force_js ihead no_acode no_athumb no_del no_logues no_mv no_readme no_robots no_scandir no_thumb no_vthumb no_zip nw"
ka.update(**{k: False for k in ex.split()})
ex = "nih no_rescan no_sendfile no_voldump"
ka.update(**{k: True for k in ex.split()})
ex = "css_browser hist js_browser no_hash no_idx"
ka.update(**{k: None for k in ex.split()})
ex = "re_maxage rproxy rsp_slp s_wr_slp theme themes turbo"
ka.update(**{k: 0 for k in ex.split()})
ex = "doctitle favico html_head mth textfiles"
ka.update(**{k: "" for k in ex.split()})
super(Cfg, self).__init__(
a=a or [],
v=v or [],
c=c,
s_wr_sz=512 * 1024,
unpost=600,
mtp=[],
mte="a",
lang="eng",
logout=573,
**ka
)
class TestHttpCli(unittest.TestCase): class TestHttpCli(unittest.TestCase):
def setUp(self): def setUp(self):
self.td = tu.get_ramdisk() self.td = tu.get_ramdisk()

View File

@@ -8,44 +8,14 @@ import shutil
import tempfile import tempfile
import unittest import unittest
from textwrap import dedent from textwrap import dedent
from argparse import Namespace
from tests import util as tu from tests import util as tu
from tests.util import Cfg
from copyparty.authsrv import AuthSrv, VFS from copyparty.authsrv import AuthSrv, VFS
from copyparty import util from copyparty import util
class Cfg(Namespace):
def __init__(self, a=None, v=None, c=None):
ex = "nw e2d e2ds e2dsa e2t e2ts e2tsr no_logues no_readme no_acode force_js no_robots no_thumb no_athumb no_vthumb"
ex = {k: False for k in ex.split()}
ex2 = {
"mtp": [],
"mte": "a",
"mth": "",
"doctitle": "",
"html_head": "",
"hist": None,
"no_idx": None,
"no_hash": None,
"js_browser": None,
"css_browser": None,
"no_voldump": True,
"re_maxage": 0,
"rproxy": 0,
"rsp_slp": 0,
"s_wr_slp": 0,
"s_wr_sz": 512 * 1024,
"lang": "eng",
"theme": 0,
"themes": 0,
"turbo": 0,
"logout": 573,
}
ex.update(ex2)
super(Cfg, self).__init__(a=a or [], v=v or [], c=c, **ex)
class TestVFS(unittest.TestCase): class TestVFS(unittest.TestCase):
def setUp(self): def setUp(self):
self.td = tu.get_ramdisk() self.td = tu.get_ramdisk()

View File

@@ -7,6 +7,7 @@ import threading
import tempfile import tempfile
import platform import platform
import subprocess as sp import subprocess as sp
from argparse import Namespace
WINDOWS = platform.system() == "Windows" WINDOWS = platform.system() == "Windows"
@@ -89,6 +90,40 @@ def get_ramdisk():
return subdir(ret) return subdir(ret)
class Cfg(Namespace):
def __init__(self, a=None, v=None, c=None):
ka = {}
ex = "e2d e2ds e2dsa e2t e2ts e2tsr e2v e2vu e2vp xdev xvol ed emp force_js ihead no_acode no_athumb no_del no_logues no_mv no_readme no_robots no_scandir no_thumb no_vthumb no_zip nid nih nw"
ka.update(**{k: False for k in ex.split()})
ex = "no_rescan no_sendfile no_voldump"
ka.update(**{k: True for k in ex.split()})
ex = "css_browser hist js_browser no_hash no_idx"
ka.update(**{k: None for k in ex.split()})
ex = "re_maxage rproxy rsp_slp s_wr_slp theme themes turbo df"
ka.update(**{k: 0 for k in ex.split()})
ex = "doctitle favico html_head mth textfiles log_fk"
ka.update(**{k: "" for k in ex.split()})
super(Cfg, self).__init__(
a=a or [],
v=v or [],
c=c,
s_wr_sz=512 * 1024,
unpost=600,
u2sort="s",
mtp=[],
mte="a",
lang="eng",
logout=573,
**ka
)
class NullBroker(object): class NullBroker(object):
def say(*args): def say(*args):
pass pass