Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
016dba4ca9 | ||
|
|
39c7ef305f | ||
|
|
849c1dc848 | ||
|
|
61414014fe | ||
|
|
578a915884 | ||
|
|
eacafb8a63 | ||
|
|
4446760f74 | ||
|
|
6da2a083f9 | ||
|
|
8837c8f822 | ||
|
|
bac301ed66 | ||
|
|
061db3906d | ||
|
|
fd7df5c952 | ||
|
|
a270019147 | ||
|
|
55e0209901 | ||
|
|
2b255fbbed | ||
|
|
8a2345a0fb | ||
|
|
bfa9f535aa | ||
|
|
f757623ad8 | ||
|
|
3c7465e268 | ||
|
|
108665fc4f | ||
|
|
ed519c9138 | ||
|
|
2dd2e2c57e | ||
|
|
6c3a976222 | ||
|
|
80cc26bd95 | ||
|
|
970fb84fd8 | ||
|
|
20cbcf6931 | ||
|
|
8fcde2a579 | ||
|
|
b32d1f8ad3 | ||
|
|
03513e0cb1 | ||
|
|
e041a2b197 |
28
README.md
28
README.md
@@ -46,6 +46,7 @@ turn your phone or raspi into a portable file server with resumable uploads/down
|
||||
* [browser support](#browser-support)
|
||||
* [client examples](#client-examples)
|
||||
* [up2k](#up2k)
|
||||
* [performance](#performance)
|
||||
* [dependencies](#dependencies)
|
||||
* [optional dependencies](#optional-dependencies)
|
||||
* [install recommended deps](#install-recommended-deps)
|
||||
@@ -199,10 +200,16 @@ the browser has the following hotkeys
|
||||
* `G` toggle list / grid view
|
||||
* `T` toggle thumbnails / icons
|
||||
* when playing audio:
|
||||
* `0..9` jump to 10%..90%
|
||||
* `U/O` skip 10sec back/forward
|
||||
* `J/L` prev/next song
|
||||
* `U/O` skip 10sec back/forward
|
||||
* `0..9` jump to 10%..90%
|
||||
* `P` play/pause (also starts playing the folder)
|
||||
* when viewing images / playing videos:
|
||||
* `J/L, Left/Right` prev/next file
|
||||
* `Home/End` first/last file
|
||||
* `U/O` skip 10sec back/forward
|
||||
* `P/K/Space` play/pause video
|
||||
* `Esc` close viewer
|
||||
* when tree-sidebar is open:
|
||||
* `A/D` adjust tree width
|
||||
* in the grid view:
|
||||
@@ -494,6 +501,23 @@ quick outline of the up2k protocol, see [uploading](#uploading) for the web-clie
|
||||
* client does another handshake with the hashlist; server replies with OK or a list of chunks to reupload
|
||||
|
||||
|
||||
# performance
|
||||
|
||||
defaults are good for most cases, don't mind the `cannot efficiently use multiple CPU cores` message, it's very unlikely to be a problem
|
||||
|
||||
below are some tweaks roughly ordered by usefulness:
|
||||
|
||||
* `-q` disables logging and can help a bunch, even when combined with `-lo` to redirect logs to file
|
||||
* `--http-only` or `--https-only` (unless you want to support both protocols) will reduce the delay before a new connection is established
|
||||
* `--hist` pointing to a fast location (ssd) will make directory listings and searches faster when `-e2d` or `-e2t` is set
|
||||
* `--no-hash` when indexing a networked disk if you don't care about the actual filehashes and only want the names/tags searchable
|
||||
* `-j` enables multiprocessing (actual multithreading) and can make copyparty perform better in cpu-intensive workloads, for example:
|
||||
* huge amount of short-lived connections
|
||||
* really heavy traffic (downloads/uploads)
|
||||
|
||||
...however it adds an overhead to internal communication so it might be a net loss, see if it works 4 u
|
||||
|
||||
|
||||
# dependencies
|
||||
|
||||
* `jinja2` (is built into the SFX)
|
||||
|
||||
@@ -345,7 +345,7 @@ class Gateway(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
def sendreq(self, *args, headers={}, **kwargs):
|
||||
def sendreq(self, meth, path, headers, **kwargs):
|
||||
if self.password:
|
||||
headers["Cookie"] = "=".join(["cppwd", self.password])
|
||||
|
||||
@@ -354,21 +354,21 @@ class Gateway(object):
|
||||
if c.rx_path:
|
||||
raise Exception()
|
||||
|
||||
c.request(*list(args), headers=headers, **kwargs)
|
||||
c.request(meth, path, headers=headers, **kwargs)
|
||||
c.rx = c.getresponse()
|
||||
return c
|
||||
except:
|
||||
tid = threading.current_thread().ident
|
||||
dbg(
|
||||
"\033[1;37;44mbad conn {:x}\n {}\n {}\033[0m".format(
|
||||
tid, " ".join(str(x) for x in args), c.rx_path if c else "(null)"
|
||||
"\033[1;37;44mbad conn {:x}\n {} {}\n {}\033[0m".format(
|
||||
tid, meth, path, c.rx_path if c else "(null)"
|
||||
)
|
||||
)
|
||||
|
||||
self.closeconn(c)
|
||||
c = self.getconn()
|
||||
try:
|
||||
c.request(*list(args), headers=headers, **kwargs)
|
||||
c.request(meth, path, headers=headers, **kwargs)
|
||||
c.rx = c.getresponse()
|
||||
return c
|
||||
except:
|
||||
@@ -386,7 +386,7 @@ class Gateway(object):
|
||||
path = dewin(path)
|
||||
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
|
||||
c = self.sendreq("GET", web_path)
|
||||
c = self.sendreq("GET", web_path, {})
|
||||
if c.rx.status != 200:
|
||||
self.closeconn(c)
|
||||
log(
|
||||
@@ -440,7 +440,7 @@ class Gateway(object):
|
||||
)
|
||||
)
|
||||
|
||||
c = self.sendreq("GET", web_path, headers={"Range": hdr_range})
|
||||
c = self.sendreq("GET", web_path, {"Range": hdr_range})
|
||||
if c.rx.status != http.client.PARTIAL_CONTENT:
|
||||
self.closeconn(c)
|
||||
raise Exception(
|
||||
|
||||
@@ -54,10 +54,13 @@ MACOS = platform.system() == "Darwin"
|
||||
info = log = dbg = None
|
||||
|
||||
|
||||
print("{} v{} @ {}".format(
|
||||
platform.python_implementation(),
|
||||
".".join([str(x) for x in sys.version_info]),
|
||||
sys.executable))
|
||||
print(
|
||||
"{} v{} @ {}".format(
|
||||
platform.python_implementation(),
|
||||
".".join([str(x) for x in sys.version_info]),
|
||||
sys.executable,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
@@ -299,14 +302,14 @@ class Gateway(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
def sendreq(self, *args, headers={}, **kwargs):
|
||||
def sendreq(self, meth, path, headers, **kwargs):
|
||||
tid = get_tid()
|
||||
if self.password:
|
||||
headers["Cookie"] = "=".join(["cppwd", self.password])
|
||||
|
||||
try:
|
||||
c = self.getconn(tid)
|
||||
c.request(*list(args), headers=headers, **kwargs)
|
||||
c.request(meth, path, headers=headers, **kwargs)
|
||||
return c.getresponse()
|
||||
except:
|
||||
dbg("bad conn")
|
||||
@@ -314,7 +317,7 @@ class Gateway(object):
|
||||
self.closeconn(tid)
|
||||
try:
|
||||
c = self.getconn(tid)
|
||||
c.request(*list(args), headers=headers, **kwargs)
|
||||
c.request(meth, path, headers=headers, **kwargs)
|
||||
return c.getresponse()
|
||||
except:
|
||||
info("http connection failed:\n" + traceback.format_exc())
|
||||
@@ -331,7 +334,7 @@ class Gateway(object):
|
||||
path = dewin(path)
|
||||
|
||||
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots&ls"
|
||||
r = self.sendreq("GET", web_path)
|
||||
r = self.sendreq("GET", web_path, {})
|
||||
if r.status != 200:
|
||||
self.closeconn()
|
||||
log(
|
||||
@@ -368,7 +371,7 @@ class Gateway(object):
|
||||
)
|
||||
)
|
||||
|
||||
r = self.sendreq("GET", web_path, headers={"Range": hdr_range})
|
||||
r = self.sendreq("GET", web_path, {"Range": hdr_range})
|
||||
if r.status != http.client.PARTIAL_CONTENT:
|
||||
self.closeconn()
|
||||
raise Exception(
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
# when running copyparty behind a reverse-proxy,
|
||||
# make sure that copyparty allows at least as many clients as the proxy does,
|
||||
# so run copyparty with -nc 512 if your nginx has the default limits
|
||||
# (worker_processes 1, worker_connections 512)
|
||||
# when running copyparty behind a reverse proxy,
|
||||
# the following arguments are recommended:
|
||||
#
|
||||
# -nc 512 important, see next paragraph
|
||||
# --http-only lower latency on initial connection
|
||||
# -i 127.0.0.1 only accept connections from nginx
|
||||
#
|
||||
# -nc must match or exceed the webserver's max number of concurrent clients;
|
||||
# nginx default is 512 (worker_processes 1, worker_connections 512)
|
||||
#
|
||||
# you may also consider adding -j0 for CPU-intensive configurations
|
||||
# (not that i can really think of any good examples)
|
||||
|
||||
upstream cpp {
|
||||
server 127.0.0.1:3923;
|
||||
|
||||
@@ -23,7 +23,7 @@ from textwrap import dedent
|
||||
from .__init__ import E, WINDOWS, VT100, PY2, unicode
|
||||
from .__version__ import S_VERSION, S_BUILD_DT, CODENAME
|
||||
from .svchub import SvcHub
|
||||
from .util import py_desc, align_tab, IMPLICATIONS, alltrace
|
||||
from .util import py_desc, align_tab, IMPLICATIONS
|
||||
|
||||
HAVE_SSL = True
|
||||
try:
|
||||
@@ -191,16 +191,6 @@ def sighandler(sig=None, frame=None):
|
||||
print("\n".join(msg))
|
||||
|
||||
|
||||
def stackmon(fp, ival):
|
||||
ctr = 0
|
||||
while True:
|
||||
ctr += 1
|
||||
time.sleep(ival)
|
||||
st = "{}, {}\n{}".format(ctr, time.time(), alltrace())
|
||||
with open(fp, "wb") as f:
|
||||
f.write(st.encode("utf-8", "replace"))
|
||||
|
||||
|
||||
def run_argparse(argv, formatter):
|
||||
ap = argparse.ArgumentParser(
|
||||
formatter_class=formatter,
|
||||
@@ -258,31 +248,32 @@ def run_argparse(argv, formatter):
|
||||
),
|
||||
)
|
||||
# fmt: off
|
||||
u = unicode
|
||||
ap2 = ap.add_argument_group('general options')
|
||||
ap2.add_argument("-c", metavar="PATH", type=str, action="append", help="add config file")
|
||||
ap2.add_argument("-c", metavar="PATH", type=u, action="append", help="add config file")
|
||||
ap2.add_argument("-nc", metavar="NUM", type=int, default=64, help="max num clients")
|
||||
ap2.add_argument("-j", metavar="CORES", type=int, default=1, help="max num cpu cores")
|
||||
ap2.add_argument("-a", metavar="ACCT", type=str, action="append", help="add account, USER:PASS; example [ed:wark")
|
||||
ap2.add_argument("-v", metavar="VOL", type=str, action="append", help="add volume, SRC:DST:FLAG; example [.::r], [/mnt/nas/music:/music:r:aed")
|
||||
ap2.add_argument("-a", metavar="ACCT", type=u, action="append", help="add account, USER:PASS; example [ed:wark")
|
||||
ap2.add_argument("-v", metavar="VOL", type=u, action="append", help="add volume, SRC:DST:FLAG; example [.::r], [/mnt/nas/music:/music:r:aed")
|
||||
ap2.add_argument("-ed", action="store_true", help="enable ?dots")
|
||||
ap2.add_argument("-emp", action="store_true", help="enable markdown plugins")
|
||||
ap2.add_argument("-mcr", metavar="SEC", type=int, default=60, help="md-editor mod-chk rate")
|
||||
ap2.add_argument("--dotpart", action="store_true", help="dotfile incomplete uploads")
|
||||
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="up2k min.size threshold (mswin-only)")
|
||||
ap2.add_argument("--urlform", metavar="MODE", type=str, default="print,get", help="how to handle url-forms; examples: [stash], [save,get]")
|
||||
ap2.add_argument("--urlform", metavar="MODE", type=u, default="print,get", help="how to handle url-forms; examples: [stash], [save,get]")
|
||||
|
||||
ap2 = ap.add_argument_group('network options')
|
||||
ap2.add_argument("-i", metavar="IP", type=str, default="0.0.0.0", help="ip to bind (comma-sep.)")
|
||||
ap2.add_argument("-p", metavar="PORT", type=str, default="3923", help="ports to bind (comma/range)")
|
||||
ap2.add_argument("-i", metavar="IP", type=u, default="0.0.0.0", help="ip to bind (comma-sep.)")
|
||||
ap2.add_argument("-p", metavar="PORT", type=u, default="3923", help="ports to bind (comma/range)")
|
||||
ap2.add_argument("--rproxy", metavar="DEPTH", type=int, default=1, help="which ip to keep; 0 = tcp, 1 = origin (first x-fwd), 2 = cloudflare, 3 = nginx, -1 = closest proxy")
|
||||
|
||||
ap2 = ap.add_argument_group('SSL/TLS options')
|
||||
ap2.add_argument("--http-only", action="store_true", help="disable ssl/tls")
|
||||
ap2.add_argument("--https-only", action="store_true", help="disable plaintext")
|
||||
ap2.add_argument("--ssl-ver", metavar="LIST", type=str, help="set allowed ssl/tls versions; [help] shows available versions; default is what your python version considers safe")
|
||||
ap2.add_argument("--ciphers", metavar="LIST", help="set allowed ssl/tls ciphers; [help] shows available ciphers")
|
||||
ap2.add_argument("--ssl-ver", metavar="LIST", type=u, help="set allowed ssl/tls versions; [help] shows available versions; default is what your python version considers safe")
|
||||
ap2.add_argument("--ciphers", metavar="LIST", type=u, help="set allowed ssl/tls ciphers; [help] shows available ciphers")
|
||||
ap2.add_argument("--ssl-dbg", action="store_true", help="dump some tls info")
|
||||
ap2.add_argument("--ssl-log", metavar="PATH", help="log master secrets")
|
||||
ap2.add_argument("--ssl-log", metavar="PATH", type=u, help="log master secrets")
|
||||
|
||||
ap2 = ap.add_argument_group('opt-outs')
|
||||
ap2.add_argument("-nw", action="store_true", help="disable writes (benchmark)")
|
||||
@@ -291,15 +282,16 @@ def run_argparse(argv, formatter):
|
||||
ap2.add_argument("--no-zip", action="store_true", help="disable download as zip/tar")
|
||||
|
||||
ap2 = ap.add_argument_group('safety options')
|
||||
ap2.add_argument("--ls", metavar="U[,V[,F]]", help="scan all volumes; arguments USER,VOL,FLAGS; example [**,*,ln,p,r]")
|
||||
ap2.add_argument("--salt", type=str, default="hunter2", help="up2k file-hash salt")
|
||||
ap2.add_argument("--ls", metavar="U[,V[,F]]", type=u, help="scan all volumes; arguments USER,VOL,FLAGS; example [**,*,ln,p,r]")
|
||||
ap2.add_argument("--salt", type=u, default="hunter2", help="up2k file-hash salt")
|
||||
|
||||
ap2 = ap.add_argument_group('logging options')
|
||||
ap2.add_argument("-q", action="store_true", help="quiet")
|
||||
ap2.add_argument("-lo", metavar="PATH", type=str, help="logfile, example: cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz")
|
||||
ap2.add_argument("-lo", metavar="PATH", type=u, help="logfile, example: cpp-%%Y-%%m%%d-%%H%%M%%S.txt.xz")
|
||||
ap2.add_argument("--log-conn", action="store_true", help="print tcp-server msgs")
|
||||
ap2.add_argument("--ihead", metavar="HEADER", action='append', help="dump incoming header")
|
||||
ap2.add_argument("--lf-url", metavar="RE", type=str, default=r"^/\.cpr/|\?th=[wj]$", help="dont log URLs matching")
|
||||
ap2.add_argument("--log-htp", action="store_true", help="print http-server threadpool scaling")
|
||||
ap2.add_argument("--ihead", metavar="HEADER", type=u, action='append', help="dump incoming header")
|
||||
ap2.add_argument("--lf-url", metavar="RE", type=u, default=r"^/\.cpr/|\?th=[wj]$", help="dont log URLs matching")
|
||||
|
||||
ap2 = ap.add_argument_group('admin panel options')
|
||||
ap2.add_argument("--no-rescan", action="store_true", help="disable ?scan (volume reindexing)")
|
||||
@@ -314,9 +306,9 @@ def run_argparse(argv, formatter):
|
||||
ap2.add_argument("--th-no-webp", action="store_true", help="disable webp output")
|
||||
ap2.add_argument("--th-ff-jpg", action="store_true", help="force jpg for video thumbs")
|
||||
ap2.add_argument("--th-poke", metavar="SEC", type=int, default=300, help="activity labeling cooldown")
|
||||
ap2.add_argument("--th-clean", metavar="SEC", type=int, default=43200, help="cleanup interval")
|
||||
ap2.add_argument("--th-clean", metavar="SEC", type=int, default=43200, help="cleanup interval; 0=disabled")
|
||||
ap2.add_argument("--th-maxage", metavar="SEC", type=int, default=604800, help="max folder age")
|
||||
ap2.add_argument("--th-covers", metavar="N,N", type=str, default="folder.png,folder.jpg,cover.png,cover.jpg", help="folder thumbnails to stat for")
|
||||
ap2.add_argument("--th-covers", metavar="N,N", type=u, default="folder.png,folder.jpg,cover.png,cover.jpg", help="folder thumbnails to stat for")
|
||||
|
||||
ap2 = ap.add_argument_group('database options')
|
||||
ap2.add_argument("-e2d", action="store_true", help="enable up2k database")
|
||||
@@ -325,24 +317,26 @@ def run_argparse(argv, formatter):
|
||||
ap2.add_argument("-e2t", action="store_true", help="enable metadata indexing")
|
||||
ap2.add_argument("-e2ts", action="store_true", help="enable metadata scanner, sets -e2t")
|
||||
ap2.add_argument("-e2tsr", action="store_true", help="rescan all metadata, sets -e2ts")
|
||||
ap2.add_argument("--hist", metavar="PATH", type=str, help="where to store volume state")
|
||||
ap2.add_argument("--hist", metavar="PATH", type=u, help="where to store volume state")
|
||||
ap2.add_argument("--no-hash", action="store_true", help="disable hashing during e2ds folder scans")
|
||||
ap2.add_argument("--no-mutagen", action="store_true", help="use ffprobe for tags instead")
|
||||
ap2.add_argument("--no-mtag-mt", action="store_true", help="disable tag-read parallelism")
|
||||
ap2.add_argument("-mtm", metavar="M=t,t,t", action="append", type=str, help="add/replace metadata mapping")
|
||||
ap2.add_argument("-mte", metavar="M,M,M", type=str, help="tags to index/display (comma-sep.)",
|
||||
ap2.add_argument("-mtm", metavar="M=t,t,t", type=u, action="append", help="add/replace metadata mapping")
|
||||
ap2.add_argument("-mte", metavar="M,M,M", type=u, help="tags to index/display (comma-sep.)",
|
||||
default="circle,album,.tn,artist,title,.bpm,key,.dur,.q,.vq,.aq,ac,vc,res,.fps")
|
||||
ap2.add_argument("-mtp", metavar="M=[f,]bin", action="append", type=str, help="read tag M using bin")
|
||||
ap2.add_argument("-mtp", metavar="M=[f,]bin", type=u, action="append", help="read tag M using bin")
|
||||
ap2.add_argument("--srch-time", metavar="SEC", type=int, default=30, help="search deadline")
|
||||
|
||||
ap2 = ap.add_argument_group('appearance options')
|
||||
ap2.add_argument("--css-browser", metavar="L", help="URL to additional CSS to include")
|
||||
ap2.add_argument("--css-browser", metavar="L", type=u, help="URL to additional CSS to include")
|
||||
|
||||
ap2 = ap.add_argument_group('debug options')
|
||||
ap2.add_argument("--no-sendfile", action="store_true", help="disable sendfile")
|
||||
ap2.add_argument("--no-scandir", action="store_true", help="disable scandir")
|
||||
ap2.add_argument("--no-fastboot", action="store_true", help="wait for up2k indexing")
|
||||
ap2.add_argument("--stackmon", metavar="P,S", help="write stacktrace to Path every S second")
|
||||
ap2.add_argument("--no-htp", action="store_true", help="disable httpserver threadpool, create threads as-needed instead")
|
||||
ap2.add_argument("--stackmon", metavar="P,S", type=u, help="write stacktrace to Path every S second")
|
||||
ap2.add_argument("--log-thrs", metavar="SEC", type=float, help="list active threads every SEC")
|
||||
|
||||
return ap.parse_args(args=argv[1:])
|
||||
# fmt: on
|
||||
@@ -382,16 +376,6 @@ def main(argv=None):
|
||||
except AssertionError:
|
||||
al = run_argparse(argv, Dodge11874)
|
||||
|
||||
if al.stackmon:
|
||||
fp, f = al.stackmon.rsplit(",", 1)
|
||||
f = int(f)
|
||||
t = threading.Thread(
|
||||
target=stackmon,
|
||||
args=(fp, f),
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
# propagate implications
|
||||
for k1, k2 in IMPLICATIONS:
|
||||
if getattr(al, k1):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# coding: utf-8
|
||||
|
||||
VERSION = (0, 11, 33)
|
||||
VERSION = (0, 11, 38)
|
||||
CODENAME = "the grid"
|
||||
BUILD_DT = (2021, 7, 7)
|
||||
BUILD_DT = (2021, 7, 13)
|
||||
|
||||
S_VERSION = ".".join(map(str, VERSION))
|
||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||
|
||||
@@ -16,7 +16,7 @@ from .util import IMPLICATIONS, uncyg, undot, Pebkac, fsdec, fsenc, statdir
|
||||
class VFS(object):
|
||||
"""single level in the virtual fs"""
|
||||
|
||||
def __init__(self, log, realpath, vpath, uread=[], uwrite=[], uadm=[], flags={}):
|
||||
def __init__(self, log, realpath, vpath, uread, uwrite, uadm, flags):
|
||||
self.log = log
|
||||
self.realpath = realpath # absolute path on host filesystem
|
||||
self.vpath = vpath # absolute path in the virtual filesystem
|
||||
@@ -81,7 +81,7 @@ class VFS(object):
|
||||
|
||||
# leaf does not exist; create and keep permissions blank
|
||||
vp = "{}/{}".format(self.vpath, dst).lstrip("/")
|
||||
vn = VFS(self.log, src, vp)
|
||||
vn = VFS(self.log, src, vp, [], [], [], {})
|
||||
vn.dbv = self.dbv or self
|
||||
self.nodes[dst] = vn
|
||||
return vn
|
||||
@@ -497,10 +497,10 @@ class AuthSrv(object):
|
||||
|
||||
if not mount:
|
||||
# -h says our defaults are CWD at root and read/write for everyone
|
||||
vfs = VFS(self.log_func, os.path.abspath("."), "", ["*"], ["*"])
|
||||
vfs = VFS(self.log_func, os.path.abspath("."), "", ["*"], ["*"], ["*"], {})
|
||||
elif "" not in mount:
|
||||
# there's volumes but no root; make root inaccessible
|
||||
vfs = VFS(self.log_func, None, "")
|
||||
vfs = VFS(self.log_func, None, "", [], [], [], {})
|
||||
vfs.flags["d2d"] = True
|
||||
|
||||
maxdepth = 0
|
||||
|
||||
@@ -4,17 +4,11 @@ from __future__ import print_function, unicode_literals
|
||||
import time
|
||||
import threading
|
||||
|
||||
from .__init__ import PY2, WINDOWS, VT100
|
||||
from .broker_util import try_exec
|
||||
from .broker_mpw import MpWorker
|
||||
from .util import mp
|
||||
|
||||
|
||||
if PY2 and not WINDOWS:
|
||||
from multiprocessing.reduction import ForkingPickler
|
||||
from StringIO import StringIO as MemesIO # pylint: disable=import-error
|
||||
|
||||
|
||||
class BrokerMp(object):
|
||||
"""external api; manages MpWorkers"""
|
||||
|
||||
@@ -33,19 +27,17 @@ class BrokerMp(object):
|
||||
cores = mp.cpu_count()
|
||||
|
||||
self.log("broker", "booting {} subprocesses".format(cores))
|
||||
for n in range(cores):
|
||||
for n in range(1, cores + 1):
|
||||
q_pend = mp.Queue(1)
|
||||
q_yield = mp.Queue(64)
|
||||
|
||||
proc = mp.Process(target=MpWorker, args=(q_pend, q_yield, self.args, n))
|
||||
proc.q_pend = q_pend
|
||||
proc.q_yield = q_yield
|
||||
proc.nid = n
|
||||
proc.clients = {}
|
||||
proc.workload = 0
|
||||
|
||||
thr = threading.Thread(
|
||||
target=self.collector, args=(proc,), name="mp-collector"
|
||||
target=self.collector, args=(proc,), name="mp-sink-{}".format(n)
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
@@ -53,13 +45,6 @@ class BrokerMp(object):
|
||||
self.procs.append(proc)
|
||||
proc.start()
|
||||
|
||||
if not self.args.q:
|
||||
thr = threading.Thread(
|
||||
target=self.debug_load_balancer, name="mp-dbg-loadbalancer"
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
def shutdown(self):
|
||||
self.log("broker", "shutting down")
|
||||
for n, proc in enumerate(self.procs):
|
||||
@@ -89,20 +74,6 @@ class BrokerMp(object):
|
||||
if dest == "log":
|
||||
self.log(*args)
|
||||
|
||||
elif dest == "workload":
|
||||
with self.mutex:
|
||||
proc.workload = args[0]
|
||||
|
||||
elif dest == "httpdrop":
|
||||
addr = args[0]
|
||||
|
||||
with self.mutex:
|
||||
del proc.clients[addr]
|
||||
if not proc.clients:
|
||||
proc.workload = 0
|
||||
|
||||
self.hub.tcpsrv.num_clients.add(-1)
|
||||
|
||||
elif dest == "retq":
|
||||
# response from previous ipc call
|
||||
with self.retpend_mutex:
|
||||
@@ -128,38 +99,9 @@ class BrokerMp(object):
|
||||
returns a Queue object which eventually contains the response if want_retval
|
||||
(not-impl here since nothing uses it yet)
|
||||
"""
|
||||
if dest == "httpconn":
|
||||
sck, addr = args
|
||||
sck2 = sck
|
||||
if PY2:
|
||||
buf = MemesIO()
|
||||
ForkingPickler(buf).dump(sck)
|
||||
sck2 = buf.getvalue()
|
||||
|
||||
proc = sorted(self.procs, key=lambda x: x.workload)[0]
|
||||
proc.q_pend.put([0, dest, [sck2, addr]])
|
||||
|
||||
with self.mutex:
|
||||
proc.clients[addr] = 50
|
||||
proc.workload += 50
|
||||
if dest == "listen":
|
||||
for p in self.procs:
|
||||
p.q_pend.put([0, dest, [args[0], len(self.procs)]])
|
||||
|
||||
else:
|
||||
raise Exception("what is " + str(dest))
|
||||
|
||||
def debug_load_balancer(self):
|
||||
fmt = "\033[1m{}\033[0;36m{:4}\033[0m "
|
||||
if not VT100:
|
||||
fmt = "({}{:4})"
|
||||
|
||||
last = ""
|
||||
while self.procs:
|
||||
msg = ""
|
||||
for proc in self.procs:
|
||||
msg += fmt.format(len(proc.clients), proc.workload)
|
||||
|
||||
if msg != last:
|
||||
last = msg
|
||||
with self.hub.log_mutex:
|
||||
print(msg)
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
@@ -3,18 +3,13 @@ from __future__ import print_function, unicode_literals
|
||||
from copyparty.authsrv import AuthSrv
|
||||
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
import threading
|
||||
|
||||
from .__init__ import PY2, WINDOWS
|
||||
from .broker_util import ExceptionalQueue
|
||||
from .httpsrv import HttpSrv
|
||||
from .util import FAKE_MP
|
||||
|
||||
if PY2 and not WINDOWS:
|
||||
import pickle # nosec
|
||||
|
||||
|
||||
class MpWorker(object):
|
||||
"""one single mp instance"""
|
||||
@@ -25,10 +20,11 @@ class MpWorker(object):
|
||||
self.args = args
|
||||
self.n = n
|
||||
|
||||
self.log = self._log_disabled if args.q and not args.lo else self._log_enabled
|
||||
|
||||
self.retpend = {}
|
||||
self.retpend_mutex = threading.Lock()
|
||||
self.mutex = threading.Lock()
|
||||
self.workload_thr_alive = False
|
||||
|
||||
# we inherited signal_handler from parent,
|
||||
# replace it with something harmless
|
||||
@@ -39,8 +35,7 @@ class MpWorker(object):
|
||||
self.asrv = AuthSrv(args, None, False)
|
||||
|
||||
# instantiate all services here (TODO: inheritance?)
|
||||
self.httpsrv = HttpSrv(self, True)
|
||||
self.httpsrv.disconnect_func = self.httpdrop
|
||||
self.httpsrv = HttpSrv(self, n)
|
||||
|
||||
# on winxp and some other platforms,
|
||||
# use thr.join() to block all signals
|
||||
@@ -53,15 +48,15 @@ class MpWorker(object):
|
||||
# print('k')
|
||||
pass
|
||||
|
||||
def log(self, src, msg, c=0):
|
||||
def _log_enabled(self, src, msg, c=0):
|
||||
self.q_yield.put([0, "log", [src, msg, c]])
|
||||
|
||||
def _log_disabled(self, src, msg, c=0):
|
||||
pass
|
||||
|
||||
def logw(self, msg, c=0):
|
||||
self.log("mp{}".format(self.n), msg, c)
|
||||
|
||||
def httpdrop(self, addr):
|
||||
self.q_yield.put([0, "httpdrop", [addr]])
|
||||
|
||||
def main(self):
|
||||
while True:
|
||||
retq_id, dest, args = self.q_pend.get()
|
||||
@@ -73,24 +68,8 @@ class MpWorker(object):
|
||||
sys.exit(0)
|
||||
return
|
||||
|
||||
elif dest == "httpconn":
|
||||
sck, addr = args
|
||||
if PY2:
|
||||
sck = pickle.loads(sck) # nosec
|
||||
|
||||
if self.args.log_conn:
|
||||
self.log("%s %s" % addr, "|%sC-qpop" % ("-" * 4,), c="1;30")
|
||||
|
||||
self.httpsrv.accept(sck, addr)
|
||||
|
||||
with self.mutex:
|
||||
if not self.workload_thr_alive:
|
||||
self.workload_thr_alive = True
|
||||
thr = threading.Thread(
|
||||
target=self.thr_workload, name="mpw-workload"
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
elif dest == "listen":
|
||||
self.httpsrv.listen(args[0], args[1])
|
||||
|
||||
elif dest == "retq":
|
||||
# response from previous ipc call
|
||||
@@ -114,16 +93,3 @@ class MpWorker(object):
|
||||
|
||||
self.q_yield.put([retq_id, dest, args])
|
||||
return retq
|
||||
|
||||
def thr_workload(self):
|
||||
"""announce workloads to MpSrv (the mp controller / loadbalancer)"""
|
||||
# avoid locking in extract_filedata by tracking difference here
|
||||
while True:
|
||||
time.sleep(0.2)
|
||||
with self.mutex:
|
||||
if self.httpsrv.num_clients() == 0:
|
||||
# no clients rn, termiante thread
|
||||
self.workload_thr_alive = False
|
||||
return
|
||||
|
||||
self.q_yield.put([0, "workload", [self.httpsrv.workload]])
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import print_function, unicode_literals
|
||||
|
||||
import threading
|
||||
|
||||
from .authsrv import AuthSrv
|
||||
from .httpsrv import HttpSrv
|
||||
from .broker_util import ExceptionalQueue, try_exec
|
||||
|
||||
@@ -20,8 +19,7 @@ class BrokerThr(object):
|
||||
self.mutex = threading.Lock()
|
||||
|
||||
# instantiate all services here (TODO: inheritance?)
|
||||
self.httpsrv = HttpSrv(self)
|
||||
self.httpsrv.disconnect_func = self.httpdrop
|
||||
self.httpsrv = HttpSrv(self, None)
|
||||
|
||||
def shutdown(self):
|
||||
# self.log("broker", "shutting down")
|
||||
@@ -29,12 +27,8 @@ class BrokerThr(object):
|
||||
pass
|
||||
|
||||
def put(self, want_retval, dest, *args):
|
||||
if dest == "httpconn":
|
||||
sck, addr = args
|
||||
if self.args.log_conn:
|
||||
self.log("%s %s" % addr, "|%sC-qpop" % ("-" * 4,), c="1;30")
|
||||
|
||||
self.httpsrv.accept(sck, addr)
|
||||
if dest == "listen":
|
||||
self.httpsrv.listen(args[0], 1)
|
||||
|
||||
else:
|
||||
# new ipc invoking managed service in hub
|
||||
@@ -51,6 +45,3 @@ class BrokerThr(object):
|
||||
retq = ExceptionalQueue(1)
|
||||
retq.put(rv)
|
||||
return retq
|
||||
|
||||
def httpdrop(self, addr):
|
||||
self.hub.tcpsrv.num_clients.add(-1)
|
||||
|
||||
@@ -37,7 +37,6 @@ class HttpCli(object):
|
||||
self.ip = conn.addr[0]
|
||||
self.addr = conn.addr # type: tuple[str, int]
|
||||
self.args = conn.args
|
||||
self.is_mp = conn.is_mp
|
||||
self.asrv = conn.asrv # type: AuthSrv
|
||||
self.ico = conn.ico
|
||||
self.thumbcli = conn.thumbcli
|
||||
@@ -227,7 +226,7 @@ class HttpCli(object):
|
||||
except Pebkac:
|
||||
return False
|
||||
|
||||
def send_headers(self, length, status=200, mime=None, headers={}):
|
||||
def send_headers(self, length, status=200, mime=None, headers=None):
|
||||
response = ["{} {} {}".format(self.http_ver, status, HTTPCODE[status])]
|
||||
|
||||
if length is not None:
|
||||
@@ -237,7 +236,8 @@ class HttpCli(object):
|
||||
response.append("Connection: " + ("Keep-Alive" if self.keepalive else "Close"))
|
||||
|
||||
# headers{} overrides anything set previously
|
||||
self.out_headers.update(headers)
|
||||
if headers:
|
||||
self.out_headers.update(headers)
|
||||
|
||||
# default to utf8 html if no content-type is set
|
||||
if not mime:
|
||||
@@ -254,7 +254,7 @@ class HttpCli(object):
|
||||
except:
|
||||
raise Pebkac(400, "client d/c while replying headers")
|
||||
|
||||
def reply(self, body, status=200, mime=None, headers={}):
|
||||
def reply(self, body, status=200, mime=None, headers=None):
|
||||
# TODO something to reply with user-supplied values safely
|
||||
self.send_headers(len(body), status, mime, headers)
|
||||
|
||||
@@ -270,7 +270,7 @@ class HttpCli(object):
|
||||
self.log(body.rstrip())
|
||||
self.reply(b"<pre>" + body.encode("utf-8") + b"\r\n", *list(args), **kwargs)
|
||||
|
||||
def urlq(self, add={}, rm=[]):
|
||||
def urlq(self, add, rm):
|
||||
"""
|
||||
generates url query based on uparam (b, pw, all others)
|
||||
removing anything in rm, adding pairs in add
|
||||
@@ -342,6 +342,9 @@ class HttpCli(object):
|
||||
if "tree" in self.uparam:
|
||||
return self.tx_tree()
|
||||
|
||||
if "stack" in self.uparam:
|
||||
return self.tx_stack()
|
||||
|
||||
# conditional redirect to single volumes
|
||||
if self.vpath == "" and not self.ouparam:
|
||||
nread = len(self.rvol)
|
||||
@@ -371,9 +374,6 @@ class HttpCli(object):
|
||||
if "scan" in self.uparam:
|
||||
return self.scanvol()
|
||||
|
||||
if "stack" in self.uparam:
|
||||
return self.tx_stack()
|
||||
|
||||
return self.tx_browser()
|
||||
|
||||
def handle_options(self):
|
||||
@@ -483,7 +483,7 @@ class HttpCli(object):
|
||||
path = os.devnull
|
||||
|
||||
with open(fsenc(path), "wb", 512 * 1024) as f:
|
||||
post_sz, _, sha_b64 = hashcopy(self.conn, reader, f)
|
||||
post_sz, _, sha_b64 = hashcopy(reader, f)
|
||||
|
||||
if not self.args.nw:
|
||||
vfs, vrem = vfs.get_dbv(rem)
|
||||
@@ -715,7 +715,7 @@ class HttpCli(object):
|
||||
|
||||
with open(fsenc(path), "rb+", 512 * 1024) as f:
|
||||
f.seek(cstart[0])
|
||||
post_sz, _, sha_b64 = hashcopy(self.conn, reader, f)
|
||||
post_sz, _, sha_b64 = hashcopy(reader, f)
|
||||
|
||||
if sha_b64 != chash:
|
||||
raise Pebkac(
|
||||
@@ -795,7 +795,7 @@ class HttpCli(object):
|
||||
vfs, rem = self.asrv.vfs.get(self.vpath, self.uname, False, True)
|
||||
self._assert_safe_rem(rem)
|
||||
|
||||
sanitized = sanitize_fn(new_dir)
|
||||
sanitized = sanitize_fn(new_dir, "", [])
|
||||
|
||||
if not nullwrite:
|
||||
fdir = os.path.join(vfs.realpath, rem)
|
||||
@@ -832,7 +832,7 @@ class HttpCli(object):
|
||||
if not new_file.endswith(".md"):
|
||||
new_file += ".md"
|
||||
|
||||
sanitized = sanitize_fn(new_file)
|
||||
sanitized = sanitize_fn(new_file, "", [])
|
||||
|
||||
if not nullwrite:
|
||||
fdir = os.path.join(vfs.realpath, rem)
|
||||
@@ -865,7 +865,7 @@ class HttpCli(object):
|
||||
if p_file and not nullwrite:
|
||||
fdir = os.path.join(vfs.realpath, rem)
|
||||
fname = sanitize_fn(
|
||||
p_file, bad=[".prologue.html", ".epilogue.html"]
|
||||
p_file, "", [".prologue.html", ".epilogue.html"]
|
||||
)
|
||||
|
||||
if not os.path.isdir(fsenc(fdir)):
|
||||
@@ -882,7 +882,7 @@ class HttpCli(object):
|
||||
with ren_open(fname, "wb", 512 * 1024, **open_args) as f:
|
||||
f, fname = f["orz"]
|
||||
self.log("writing to {}/{}".format(fdir, fname))
|
||||
sz, sha512_hex, _ = hashcopy(self.conn, p_data, f)
|
||||
sz, sha512_hex, _ = hashcopy(p_data, f)
|
||||
if sz == 0:
|
||||
raise Pebkac(400, "empty files in post")
|
||||
|
||||
@@ -1065,7 +1065,7 @@ class HttpCli(object):
|
||||
raise Pebkac(400, "expected body, got {}".format(p_field))
|
||||
|
||||
with open(fsenc(fp), "wb", 512 * 1024) as f:
|
||||
sz, sha512, _ = hashcopy(self.conn, p_data, f)
|
||||
sz, sha512, _ = hashcopy(p_data, f)
|
||||
|
||||
new_lastmod = os.stat(fsenc(fp)).st_mtime
|
||||
new_lastmod3 = int(new_lastmod * 1000)
|
||||
@@ -1255,8 +1255,7 @@ class HttpCli(object):
|
||||
if use_sendfile:
|
||||
remains = sendfile_kern(lower, upper, f, self.s)
|
||||
else:
|
||||
actor = self.conn if self.is_mp else None
|
||||
remains = sendfile_py(lower, upper, f, self.s, actor)
|
||||
remains = sendfile_py(lower, upper, f, self.s)
|
||||
|
||||
if remains > 0:
|
||||
logmsg += " \033[31m" + unicode(upper - remains) + "\033[0m"
|
||||
@@ -1313,7 +1312,7 @@ class HttpCli(object):
|
||||
|
||||
fgen = vn.zipgen(rem, items, self.uname, dots, not self.args.no_scandir)
|
||||
# for f in fgen: print(repr({k: f[k] for k in ["vp", "ap"]}))
|
||||
bgen = packer(fgen, utf8="utf" in uarg, pre_crc="crc" in uarg)
|
||||
bgen = packer(self.log, fgen, utf8="utf" in uarg, pre_crc="crc" in uarg)
|
||||
bsent = 0
|
||||
for buf in bgen.gen():
|
||||
if not buf:
|
||||
@@ -1424,7 +1423,7 @@ class HttpCli(object):
|
||||
return True
|
||||
|
||||
def tx_mounts(self):
|
||||
suf = self.urlq(rm=["h"])
|
||||
suf = self.urlq({}, ["h"])
|
||||
rvol, wvol, avol = [
|
||||
[("/" + x).rstrip("/") + "/" for x in y]
|
||||
for y in [self.rvol, self.wvol, self.avol]
|
||||
@@ -1474,7 +1473,7 @@ class HttpCli(object):
|
||||
raise Pebkac(500, x)
|
||||
|
||||
def tx_stack(self):
|
||||
if not self.readable or not self.writable:
|
||||
if not self.avol:
|
||||
raise Pebkac(403, "not admin")
|
||||
|
||||
if self.args.no_stack:
|
||||
@@ -1635,7 +1634,7 @@ class HttpCli(object):
|
||||
if self.writable:
|
||||
perms.append("write")
|
||||
|
||||
url_suf = self.urlq()
|
||||
url_suf = self.urlq({}, [])
|
||||
is_ls = "ls" in self.uparam
|
||||
|
||||
tpl = "browser"
|
||||
|
||||
@@ -34,7 +34,6 @@ class HttpConn(object):
|
||||
|
||||
self.args = hsrv.args
|
||||
self.asrv = hsrv.asrv
|
||||
self.is_mp = hsrv.is_mp
|
||||
self.cert_path = hsrv.cert_path
|
||||
|
||||
enth = HAVE_PIL and not self.args.no_thumb
|
||||
@@ -45,7 +44,6 @@ class HttpConn(object):
|
||||
self.stopping = False
|
||||
self.nreq = 0
|
||||
self.nbyte = 0
|
||||
self.workload = 0
|
||||
self.u2idx = None
|
||||
self.log_func = hsrv.log
|
||||
self.lf_url = re.compile(self.args.lf_url) if self.args.lf_url else None
|
||||
@@ -184,11 +182,6 @@ class HttpConn(object):
|
||||
self.sr = Unrecv(self.s)
|
||||
|
||||
while not self.stopping:
|
||||
if self.is_mp:
|
||||
self.workload += 50
|
||||
if self.workload >= 2 ** 31:
|
||||
self.workload = 100
|
||||
|
||||
self.nreq += 1
|
||||
cli = HttpCli(self)
|
||||
if not cli.run():
|
||||
|
||||
@@ -4,8 +4,8 @@ from __future__ import print_function, unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import math
|
||||
import base64
|
||||
import struct
|
||||
import socket
|
||||
import threading
|
||||
|
||||
@@ -26,9 +26,15 @@ except ImportError:
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
from .__init__ import E, MACOS
|
||||
from .__init__ import E, PY2, MACOS
|
||||
from .util import spack, min_ex, start_stackmon, start_log_thrs
|
||||
from .httpconn import HttpConn
|
||||
|
||||
if PY2:
|
||||
import Queue as queue
|
||||
else:
|
||||
import queue
|
||||
|
||||
|
||||
class HttpSrv(object):
|
||||
"""
|
||||
@@ -36,19 +42,26 @@ class HttpSrv(object):
|
||||
relying on MpSrv for performance (HttpSrv is just plain threads)
|
||||
"""
|
||||
|
||||
def __init__(self, broker, is_mp=False):
|
||||
def __init__(self, broker, nid):
|
||||
self.broker = broker
|
||||
self.is_mp = is_mp
|
||||
self.nid = nid
|
||||
self.args = broker.args
|
||||
self.log = broker.log
|
||||
self.asrv = broker.asrv
|
||||
|
||||
self.disconnect_func = None
|
||||
self.name = "httpsrv" + ("-n{}-i{:x}".format(nid, os.getpid()) if nid else "")
|
||||
self.mutex = threading.Lock()
|
||||
self.stopping = False
|
||||
|
||||
self.clients = {}
|
||||
self.workload = 0
|
||||
self.workload_thr_alive = False
|
||||
self.tp_nthr = 0 # actual
|
||||
self.tp_ncli = 0 # fading
|
||||
self.tp_time = None # latest worker collect
|
||||
self.tp_q = None if self.args.no_htp else queue.LifoQueue()
|
||||
|
||||
self.srvs = []
|
||||
self.ncli = 0 # exact
|
||||
self.clients = {} # laggy
|
||||
self.nclimax = 0
|
||||
self.cb_ts = 0
|
||||
self.cb_v = 0
|
||||
|
||||
@@ -65,24 +78,155 @@ class HttpSrv(object):
|
||||
else:
|
||||
self.cert_path = None
|
||||
|
||||
if self.tp_q:
|
||||
self.start_threads(4)
|
||||
|
||||
name = "httpsrv-scaler" + ("-{}".format(nid) if nid else "")
|
||||
t = threading.Thread(target=self.thr_scaler, name=name)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
if nid:
|
||||
if self.args.stackmon:
|
||||
start_stackmon(self.args.stackmon, nid)
|
||||
|
||||
if self.args.log_thrs:
|
||||
start_log_thrs(self.log, self.args.log_thrs, nid)
|
||||
|
||||
def start_threads(self, n):
|
||||
self.tp_nthr += n
|
||||
if self.args.log_htp:
|
||||
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
|
||||
|
||||
for _ in range(n):
|
||||
thr = threading.Thread(
|
||||
target=self.thr_poolw,
|
||||
name=self.name + "-poolw",
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
def stop_threads(self, n):
|
||||
self.tp_nthr -= n
|
||||
if self.args.log_htp:
|
||||
self.log(self.name, "workers -= {} = {}".format(n, self.tp_nthr), 6)
|
||||
|
||||
for _ in range(n):
|
||||
self.tp_q.put(None)
|
||||
|
||||
def thr_scaler(self):
|
||||
while True:
|
||||
time.sleep(2 if self.tp_ncli else 30)
|
||||
with self.mutex:
|
||||
self.tp_ncli = max(self.ncli, self.tp_ncli - 2)
|
||||
if self.tp_nthr > self.tp_ncli + 8:
|
||||
self.stop_threads(4)
|
||||
|
||||
def listen(self, sck, nlisteners):
|
||||
ip, port = sck.getsockname()
|
||||
self.srvs.append(sck)
|
||||
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
|
||||
t = threading.Thread(
|
||||
target=self.thr_listen,
|
||||
args=(sck,),
|
||||
name="httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
def thr_listen(self, srv_sck):
|
||||
"""listens on a shared tcp server"""
|
||||
ip, port = srv_sck.getsockname()
|
||||
fno = srv_sck.fileno()
|
||||
msg = "subscribed @ {}:{} f{}".format(ip, port, fno)
|
||||
self.log(self.name, msg)
|
||||
while not self.stopping:
|
||||
if self.args.log_conn:
|
||||
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="1;30")
|
||||
|
||||
if self.ncli >= self.nclimax:
|
||||
self.log(self.name, "at connection limit; waiting", 3)
|
||||
while self.ncli >= self.nclimax:
|
||||
time.sleep(0.1)
|
||||
|
||||
if self.args.log_conn:
|
||||
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="1;30")
|
||||
|
||||
try:
|
||||
sck, addr = srv_sck.accept()
|
||||
except (OSError, socket.error) as ex:
|
||||
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
|
||||
time.sleep(0.02)
|
||||
continue
|
||||
|
||||
if self.args.log_conn:
|
||||
m = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
|
||||
"-" * 3, ip, port % 8, port
|
||||
)
|
||||
self.log("%s %s" % addr, m, c="1;30")
|
||||
|
||||
self.accept(sck, addr)
|
||||
|
||||
def accept(self, sck, addr):
|
||||
"""takes an incoming tcp connection and creates a thread to handle it"""
|
||||
if self.args.log_conn:
|
||||
self.log("%s %s" % addr, "|%sC-cthr" % ("-" * 5,), c="1;30")
|
||||
now = time.time()
|
||||
|
||||
if self.tp_time and now - self.tp_time > 300:
|
||||
self.tp_q = None
|
||||
|
||||
if self.tp_q:
|
||||
self.tp_q.put((sck, addr))
|
||||
with self.mutex:
|
||||
self.ncli += 1
|
||||
self.tp_time = self.tp_time or now
|
||||
self.tp_ncli = max(self.tp_ncli, self.ncli + 1)
|
||||
if self.tp_nthr < self.ncli + 4:
|
||||
self.start_threads(8)
|
||||
return
|
||||
|
||||
if not self.args.no_htp:
|
||||
m = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
|
||||
self.log(self.name, m, 1)
|
||||
|
||||
with self.mutex:
|
||||
self.ncli += 1
|
||||
|
||||
thr = threading.Thread(
|
||||
target=self.thr_client,
|
||||
args=(sck, addr),
|
||||
name="httpsrv-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
|
||||
name="httpconn-{}-{}".format(addr[0].split(".", 2)[-1][-6:], addr[1]),
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
def num_clients(self):
|
||||
with self.mutex:
|
||||
return len(self.clients)
|
||||
def thr_poolw(self):
|
||||
while True:
|
||||
task = self.tp_q.get()
|
||||
if not task:
|
||||
break
|
||||
|
||||
with self.mutex:
|
||||
self.tp_time = None
|
||||
|
||||
try:
|
||||
sck, addr = task
|
||||
me = threading.current_thread()
|
||||
me.name = "httpconn-{}-{}".format(
|
||||
addr[0].split(".", 2)[-1][-6:], addr[1]
|
||||
)
|
||||
self.thr_client(sck, addr)
|
||||
me.name = self.name + "-poolw"
|
||||
except:
|
||||
self.log(self.name, "thr_client: " + min_ex(), 3)
|
||||
|
||||
def shutdown(self):
|
||||
self.stopping = True
|
||||
for srv in self.srvs:
|
||||
try:
|
||||
srv.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
clients = list(self.clients.keys())
|
||||
for cli in clients:
|
||||
try:
|
||||
@@ -90,7 +234,14 @@ class HttpSrv(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
self.log("httpsrv-n", "ok bye")
|
||||
if self.tp_q:
|
||||
self.stop_threads(self.tp_nthr)
|
||||
for _ in range(10):
|
||||
time.sleep(0.05)
|
||||
if self.tp_q.empty():
|
||||
break
|
||||
|
||||
self.log(self.name, "ok bye")
|
||||
|
||||
def thr_client(self, sck, addr):
|
||||
"""thread managing one tcp client"""
|
||||
@@ -100,25 +251,15 @@ class HttpSrv(object):
|
||||
with self.mutex:
|
||||
self.clients[cli] = 0
|
||||
|
||||
if self.is_mp:
|
||||
self.workload += 50
|
||||
if not self.workload_thr_alive:
|
||||
self.workload_thr_alive = True
|
||||
thr = threading.Thread(
|
||||
target=self.thr_workload, name="httpsrv-workload"
|
||||
)
|
||||
thr.daemon = True
|
||||
thr.start()
|
||||
|
||||
fno = sck.fileno()
|
||||
try:
|
||||
if self.args.log_conn:
|
||||
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 6,), c="1;30")
|
||||
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="1;30")
|
||||
|
||||
cli.run()
|
||||
|
||||
except (OSError, socket.error) as ex:
|
||||
if ex.errno not in [10038, 10054, 107, 57, 9]:
|
||||
if ex.errno not in [10038, 10054, 107, 57, 49, 9]:
|
||||
self.log(
|
||||
"%s %s" % addr,
|
||||
"run({}): {}".format(fno, ex),
|
||||
@@ -128,7 +269,7 @@ class HttpSrv(object):
|
||||
finally:
|
||||
sck = cli.s
|
||||
if self.args.log_conn:
|
||||
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 7,), c="1;30")
|
||||
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="1;30")
|
||||
|
||||
try:
|
||||
fno = sck.fileno()
|
||||
@@ -152,35 +293,7 @@ class HttpSrv(object):
|
||||
finally:
|
||||
with self.mutex:
|
||||
del self.clients[cli]
|
||||
|
||||
if self.disconnect_func:
|
||||
self.disconnect_func(addr) # pylint: disable=not-callable
|
||||
|
||||
def thr_workload(self):
|
||||
"""indicates the python interpreter workload caused by this HttpSrv"""
|
||||
# avoid locking in extract_filedata by tracking difference here
|
||||
while True:
|
||||
time.sleep(0.2)
|
||||
with self.mutex:
|
||||
if not self.clients:
|
||||
# no clients rn, termiante thread
|
||||
self.workload_thr_alive = False
|
||||
self.workload = 0
|
||||
return
|
||||
|
||||
total = 0
|
||||
with self.mutex:
|
||||
for cli in self.clients.keys():
|
||||
now = cli.workload
|
||||
delta = now - self.clients[cli]
|
||||
if delta < 0:
|
||||
# was reset in HttpCli to prevent overflow
|
||||
delta = now
|
||||
|
||||
total += delta
|
||||
self.clients[cli] = now
|
||||
|
||||
self.workload = total
|
||||
self.ncli -= 1
|
||||
|
||||
def cachebuster(self):
|
||||
if time.time() - self.cb_ts < 1:
|
||||
@@ -199,7 +312,7 @@ class HttpSrv(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
v = base64.urlsafe_b64encode(struct.pack(">xxL", int(v)))
|
||||
v = base64.urlsafe_b64encode(spack(b">xxL", int(v)))
|
||||
self.cb_v = v.decode("ascii")[-4:]
|
||||
self.cb_ts = time.time()
|
||||
return self.cb_v
|
||||
|
||||
@@ -33,10 +33,11 @@ class QFile(object):
|
||||
class StreamTar(object):
|
||||
"""construct in-memory tar file from the given path"""
|
||||
|
||||
def __init__(self, fgen, **kwargs):
|
||||
def __init__(self, log, fgen, **kwargs):
|
||||
self.ci = 0
|
||||
self.co = 0
|
||||
self.qfile = QFile()
|
||||
self.log = log
|
||||
self.fgen = fgen
|
||||
self.errf = None
|
||||
|
||||
@@ -91,7 +92,8 @@ class StreamTar(object):
|
||||
errors.append([f["vp"], repr(ex)])
|
||||
|
||||
if errors:
|
||||
self.errf = errdesc(errors)
|
||||
self.errf, txt = errdesc(errors)
|
||||
self.log("\n".join(([repr(self.errf)] + txt[1:])))
|
||||
self.ser(self.errf)
|
||||
|
||||
self.tar.close()
|
||||
|
||||
@@ -25,4 +25,4 @@ def errdesc(errors):
|
||||
"vp": "archive-errors-{}.txt".format(dt),
|
||||
"ap": tf_path,
|
||||
"st": os.stat(tf_path),
|
||||
}
|
||||
}, report
|
||||
|
||||
@@ -11,7 +11,7 @@ from datetime import datetime, timedelta
|
||||
import calendar
|
||||
|
||||
from .__init__ import E, PY2, WINDOWS, MACOS, VT100
|
||||
from .util import mp
|
||||
from .util import mp, start_log_thrs, start_stackmon
|
||||
from .authsrv import AuthSrv
|
||||
from .tcpsrv import TcpSrv
|
||||
from .up2k import Up2k
|
||||
@@ -42,6 +42,12 @@ class SvcHub(object):
|
||||
if args.lo:
|
||||
self._setup_logfile(printed)
|
||||
|
||||
if args.stackmon:
|
||||
start_stackmon(args.stackmon, 0)
|
||||
|
||||
if args.log_thrs:
|
||||
start_log_thrs(self.log, args.log_thrs, 0)
|
||||
|
||||
# initiate all services to manage
|
||||
self.asrv = AuthSrv(self.args, self.log, False)
|
||||
if args.ls:
|
||||
@@ -222,16 +228,13 @@ class SvcHub(object):
|
||||
vmin = sys.version_info[1]
|
||||
if WINDOWS:
|
||||
msg = "need python 3.3 or newer for multiprocessing;"
|
||||
if PY2:
|
||||
# py2 pickler doesn't support winsock
|
||||
return msg
|
||||
elif vmin < 3:
|
||||
if PY2 or vmin < 3:
|
||||
return msg
|
||||
elif MACOS:
|
||||
return "multiprocessing is wonky on mac osx;"
|
||||
else:
|
||||
msg = "need python 2.7 or 3.3+ for multiprocessing;"
|
||||
if not PY2 and vmin < 3:
|
||||
msg = "need python 3.3+ for multiprocessing;"
|
||||
if PY2 or vmin < 3:
|
||||
return msg
|
||||
|
||||
try:
|
||||
|
||||
@@ -4,15 +4,14 @@ from __future__ import print_function, unicode_literals
|
||||
import os
|
||||
import time
|
||||
import zlib
|
||||
import struct
|
||||
from datetime import datetime
|
||||
|
||||
from .sutil import errdesc
|
||||
from .util import yieldfile, sanitize_fn
|
||||
from .util import yieldfile, sanitize_fn, spack, sunpack
|
||||
|
||||
|
||||
def dostime2unix(buf):
|
||||
t, d = struct.unpack("<HH", buf)
|
||||
t, d = sunpack(b"<HH", buf)
|
||||
|
||||
ts = (t & 0x1F) * 2
|
||||
tm = (t >> 5) & 0x3F
|
||||
@@ -36,13 +35,13 @@ def unixtime2dos(ts):
|
||||
|
||||
bd = ((dy - 1980) << 9) + (dm << 5) + dd
|
||||
bt = (th << 11) + (tm << 5) + ts // 2
|
||||
return struct.pack("<HH", bt, bd)
|
||||
return spack(b"<HH", bt, bd)
|
||||
|
||||
|
||||
def gen_fdesc(sz, crc32, z64):
|
||||
ret = b"\x50\x4b\x07\x08"
|
||||
fmt = "<LQQ" if z64 else "<LLL"
|
||||
ret += struct.pack(fmt, crc32, sz, sz)
|
||||
fmt = b"<LQQ" if z64 else b"<LLL"
|
||||
ret += spack(fmt, crc32, sz, sz)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -66,7 +65,7 @@ def gen_hdr(h_pos, fn, sz, lastmod, utf8, crc32, pre_crc):
|
||||
req_ver = b"\x2d\x00" if z64 else b"\x0a\x00"
|
||||
|
||||
if crc32:
|
||||
crc32 = struct.pack("<L", crc32)
|
||||
crc32 = spack(b"<L", crc32)
|
||||
else:
|
||||
crc32 = b"\x00" * 4
|
||||
|
||||
@@ -87,14 +86,14 @@ def gen_hdr(h_pos, fn, sz, lastmod, utf8, crc32, pre_crc):
|
||||
# however infozip does actual sz and it even works on winxp
|
||||
# (same reasning for z64 extradata later)
|
||||
vsz = 0xFFFFFFFF if z64 else sz
|
||||
ret += struct.pack("<LL", vsz, vsz)
|
||||
ret += spack(b"<LL", vsz, vsz)
|
||||
|
||||
# windows support (the "?" replace below too)
|
||||
fn = sanitize_fn(fn, ok="/")
|
||||
fn = sanitize_fn(fn, "/", [])
|
||||
bfn = fn.encode("utf-8" if utf8 else "cp437", "replace").replace(b"?", b"_")
|
||||
|
||||
z64_len = len(z64v) * 8 + 4 if z64v else 0
|
||||
ret += struct.pack("<HH", len(bfn), z64_len)
|
||||
ret += spack(b"<HH", len(bfn), z64_len)
|
||||
|
||||
if h_pos is not None:
|
||||
# 2b comment, 2b diskno
|
||||
@@ -106,12 +105,12 @@ def gen_hdr(h_pos, fn, sz, lastmod, utf8, crc32, pre_crc):
|
||||
ret += b"\x01\x00\x00\x00\xa4\x81"
|
||||
|
||||
# 4b local-header-ofs
|
||||
ret += struct.pack("<L", min(h_pos, 0xFFFFFFFF))
|
||||
ret += spack(b"<L", min(h_pos, 0xFFFFFFFF))
|
||||
|
||||
ret += bfn
|
||||
|
||||
if z64v:
|
||||
ret += struct.pack("<HH" + "Q" * len(z64v), 1, len(z64v) * 8, *z64v)
|
||||
ret += spack(b"<HH" + b"Q" * len(z64v), 1, len(z64v) * 8, *z64v)
|
||||
|
||||
return ret
|
||||
|
||||
@@ -136,7 +135,7 @@ def gen_ecdr(items, cdir_pos, cdir_end):
|
||||
need_64 = nitems == 0xFFFF or 0xFFFFFFFF in [csz, cpos]
|
||||
|
||||
# 2b tnfiles, 2b dnfiles, 4b dir sz, 4b dir pos
|
||||
ret += struct.pack("<HHLL", nitems, nitems, csz, cpos)
|
||||
ret += spack(b"<HHLL", nitems, nitems, csz, cpos)
|
||||
|
||||
# 2b comment length
|
||||
ret += b"\x00\x00"
|
||||
@@ -163,7 +162,7 @@ def gen_ecdr64(items, cdir_pos, cdir_end):
|
||||
|
||||
# 8b tnfiles, 8b dnfiles, 8b dir sz, 8b dir pos
|
||||
cdir_sz = cdir_end - cdir_pos
|
||||
ret += struct.pack("<QQQQ", len(items), len(items), cdir_sz, cdir_pos)
|
||||
ret += spack(b"<QQQQ", len(items), len(items), cdir_sz, cdir_pos)
|
||||
|
||||
return ret
|
||||
|
||||
@@ -178,13 +177,14 @@ def gen_ecdr64_loc(ecdr64_pos):
|
||||
ret = b"\x50\x4b\x06\x07"
|
||||
|
||||
# 4b cdisk, 8b start of ecdr64, 4b ndisks
|
||||
ret += struct.pack("<LQL", 0, ecdr64_pos, 1)
|
||||
ret += spack(b"<LQL", 0, ecdr64_pos, 1)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class StreamZip(object):
|
||||
def __init__(self, fgen, utf8=False, pre_crc=False):
|
||||
def __init__(self, log, fgen, utf8=False, pre_crc=False):
|
||||
self.log = log
|
||||
self.fgen = fgen
|
||||
self.utf8 = utf8
|
||||
self.pre_crc = pre_crc
|
||||
@@ -247,8 +247,8 @@ class StreamZip(object):
|
||||
errors.append([f["vp"], repr(ex)])
|
||||
|
||||
if errors:
|
||||
errf = errdesc(errors)
|
||||
print(repr(errf))
|
||||
errf, txt = errdesc(errors)
|
||||
self.log("\n".join(([repr(errf)] + txt[1:])))
|
||||
for x in self.ser(errf):
|
||||
yield x
|
||||
|
||||
|
||||
@@ -2,11 +2,9 @@
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import re
|
||||
import time
|
||||
import socket
|
||||
import select
|
||||
|
||||
from .util import chkcmd, Counter
|
||||
from .util import chkcmd
|
||||
|
||||
|
||||
class TcpSrv(object):
|
||||
@@ -20,7 +18,6 @@ class TcpSrv(object):
|
||||
self.args = hub.args
|
||||
self.log = hub.log
|
||||
|
||||
self.num_clients = Counter()
|
||||
self.stopping = False
|
||||
|
||||
ip = "127.0.0.1"
|
||||
@@ -66,47 +63,13 @@ class TcpSrv(object):
|
||||
for srv in self.srv:
|
||||
srv.listen(self.args.nc)
|
||||
ip, port = srv.getsockname()
|
||||
msg = "listening @ {0}:{1}".format(ip, port)
|
||||
fno = srv.fileno()
|
||||
msg = "listening @ {}:{} f{}".format(ip, port, fno)
|
||||
self.log("tcpsrv", msg)
|
||||
if self.args.q:
|
||||
print(msg)
|
||||
|
||||
while not self.stopping:
|
||||
if self.args.log_conn:
|
||||
self.log("tcpsrv", "|%sC-ncli" % ("-" * 1,), c="1;30")
|
||||
|
||||
if self.num_clients.v >= self.args.nc:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
if self.args.log_conn:
|
||||
self.log("tcpsrv", "|%sC-acc1" % ("-" * 2,), c="1;30")
|
||||
|
||||
try:
|
||||
# macos throws bad-fd
|
||||
ready, _, _ = select.select(self.srv, [], [])
|
||||
except:
|
||||
ready = []
|
||||
if not self.stopping:
|
||||
raise
|
||||
|
||||
for srv in ready:
|
||||
if self.stopping:
|
||||
break
|
||||
|
||||
sck, addr = srv.accept()
|
||||
sip, sport = srv.getsockname()
|
||||
if self.args.log_conn:
|
||||
self.log(
|
||||
"%s %s" % addr,
|
||||
"|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
|
||||
"-" * 3, sip, sport % 8, sport
|
||||
),
|
||||
c="1;30",
|
||||
)
|
||||
|
||||
self.num_clients.add()
|
||||
self.hub.broker.put(False, "httpconn", sck, addr)
|
||||
self.hub.broker.put(False, "listen", srv)
|
||||
|
||||
def shutdown(self):
|
||||
self.stopping = True
|
||||
|
||||
@@ -49,7 +49,7 @@ except:
|
||||
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
|
||||
# ffmpeg -formats
|
||||
FMT_PIL = "bmp dib gif icns ico jpg jpeg jp2 jpx pcx png pbm pgm ppm pnm sgi tga tif tiff webp xbm dds xpm"
|
||||
FMT_FF = "av1 asf avi flv m4v mkv mjpeg mjpg mpg mpeg mpg2 mpeg2 h264 avc h265 hevc mov 3gp mp4 ts mpegts nut ogv ogm rm vob webm wmv"
|
||||
FMT_FF = "av1 asf avi flv m4v mkv mjpeg mjpg mpg mpeg mpg2 mpeg2 h264 avc mts h265 hevc mov 3gp mp4 ts mpegts nut ogv ogm rm vob webm wmv"
|
||||
|
||||
if HAVE_HEIF:
|
||||
FMT_PIL += " heif heifs heic heics"
|
||||
@@ -130,9 +130,10 @@ class ThumbSrv(object):
|
||||
msg += ", ".join(missing)
|
||||
self.log(msg, c=3)
|
||||
|
||||
t = threading.Thread(target=self.cleaner, name="thumb-cleaner")
|
||||
t.daemon = True
|
||||
t.start()
|
||||
if self.args.th_clean:
|
||||
t = threading.Thread(target=self.cleaner, name="thumb-cleaner")
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
def log(self, msg, c=0):
|
||||
self.log_func("thumb", msg, c)
|
||||
@@ -259,7 +260,7 @@ class ThumbSrv(object):
|
||||
pass # default q = 75
|
||||
|
||||
if im.mode not in fmts:
|
||||
print("conv {}".format(im.mode))
|
||||
# print("conv {}".format(im.mode))
|
||||
im = im.convert("RGB")
|
||||
|
||||
im.save(tpath, quality=40, method=6)
|
||||
|
||||
@@ -103,13 +103,15 @@ class Up2k(object):
|
||||
self.deferred_init()
|
||||
else:
|
||||
t = threading.Thread(
|
||||
target=self.deferred_init,
|
||||
name="up2k-deferred-init",
|
||||
target=self.deferred_init, name="up2k-deferred-init", args=(0.5,)
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
def deferred_init(self):
|
||||
def deferred_init(self, wait=0):
|
||||
if wait:
|
||||
time.sleep(wait)
|
||||
|
||||
all_vols = self.asrv.vfs.all_vols
|
||||
have_e2d = self.init_indexes(all_vols)
|
||||
|
||||
@@ -193,7 +195,7 @@ class Up2k(object):
|
||||
|
||||
return True, ret
|
||||
|
||||
def init_indexes(self, all_vols, scan_vols=[]):
|
||||
def init_indexes(self, all_vols, scan_vols=None):
|
||||
self.pp = ProgressPrinter()
|
||||
vols = all_vols.values()
|
||||
t0 = time.time()
|
||||
@@ -989,7 +991,7 @@ class Up2k(object):
|
||||
if cj["ptop"] not in self.registry:
|
||||
raise Pebkac(410, "location unavailable")
|
||||
|
||||
cj["name"] = sanitize_fn(cj["name"], bad=[".prologue.html", ".epilogue.html"])
|
||||
cj["name"] = sanitize_fn(cj["name"], "", [".prologue.html", ".epilogue.html"])
|
||||
cj["poke"] = time.time()
|
||||
wark = self._get_wark(cj)
|
||||
now = time.time()
|
||||
|
||||
@@ -16,6 +16,7 @@ import mimetypes
|
||||
import contextlib
|
||||
import subprocess as sp # nosec
|
||||
from datetime import datetime
|
||||
from collections import Counter
|
||||
|
||||
from .__init__ import PY2, WINDOWS, ANYWIN
|
||||
from .stolen import surrogateescape
|
||||
@@ -42,6 +43,20 @@ else:
|
||||
from Queue import Queue # pylint: disable=import-error,no-name-in-module
|
||||
from StringIO import StringIO as BytesIO
|
||||
|
||||
|
||||
try:
|
||||
struct.unpack(b">i", b"idgi")
|
||||
spack = struct.pack
|
||||
sunpack = struct.unpack
|
||||
except:
|
||||
|
||||
def spack(f, *a, **ka):
|
||||
return struct.pack(f.decode("ascii"), *a, **ka)
|
||||
|
||||
def sunpack(f, *a, **ka):
|
||||
return struct.unpack(f.decode("ascii"), *a, **ka)
|
||||
|
||||
|
||||
surrogateescape.register_surrogateescape()
|
||||
FS_ENCODING = sys.getfilesystemencoding()
|
||||
if WINDOWS and PY2:
|
||||
@@ -123,20 +138,6 @@ REKOBO_KEY = {
|
||||
REKOBO_LKEY = {k.lower(): v for k, v in REKOBO_KEY.items()}
|
||||
|
||||
|
||||
class Counter(object):
|
||||
def __init__(self, v=0):
|
||||
self.v = v
|
||||
self.mutex = threading.Lock()
|
||||
|
||||
def add(self, delta=1):
|
||||
with self.mutex:
|
||||
self.v += delta
|
||||
|
||||
def set(self, absval):
|
||||
with self.mutex:
|
||||
self.v = absval
|
||||
|
||||
|
||||
class Cooldown(object):
|
||||
def __init__(self, maxage):
|
||||
self.maxage = maxage
|
||||
@@ -231,7 +232,7 @@ def nuprint(msg):
|
||||
|
||||
def rice_tid():
|
||||
tid = threading.current_thread().ident
|
||||
c = struct.unpack(b"B" * 5, struct.pack(b">Q", tid)[-5:])
|
||||
c = sunpack(b"B" * 5, spack(b">Q", tid)[-5:])
|
||||
return "".join("\033[1;37;48;5;{}m{:02x}".format(x, x) for x in c) + "\033[0m"
|
||||
|
||||
|
||||
@@ -282,15 +283,69 @@ def alltrace():
|
||||
return "\n".join(rret + bret)
|
||||
|
||||
|
||||
def start_stackmon(arg_str, nid):
|
||||
suffix = "-{}".format(nid) if nid else ""
|
||||
fp, f = arg_str.rsplit(",", 1)
|
||||
f = int(f)
|
||||
t = threading.Thread(
|
||||
target=stackmon,
|
||||
args=(fp, f, suffix),
|
||||
name="stackmon" + suffix,
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
||||
def stackmon(fp, ival, suffix):
|
||||
ctr = 0
|
||||
while True:
|
||||
ctr += 1
|
||||
time.sleep(ival)
|
||||
st = "{}, {}\n{}".format(ctr, time.time(), alltrace())
|
||||
with open(fp + suffix, "wb") as f:
|
||||
f.write(st.encode("utf-8", "replace"))
|
||||
|
||||
|
||||
def start_log_thrs(logger, ival, nid):
|
||||
ival = int(ival)
|
||||
tname = lname = "log-thrs"
|
||||
if nid:
|
||||
tname = "logthr-n{}-i{:x}".format(nid, os.getpid())
|
||||
lname = tname[3:]
|
||||
|
||||
t = threading.Thread(
|
||||
target=log_thrs,
|
||||
args=(logger, ival, lname),
|
||||
name=tname,
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
||||
def log_thrs(log, ival, name):
|
||||
while True:
|
||||
time.sleep(ival)
|
||||
tv = [x.name for x in threading.enumerate()]
|
||||
tv = [
|
||||
x.split("-")[0]
|
||||
if x.startswith("httpconn-") or x.startswith("thumb-")
|
||||
else "listen"
|
||||
if "-listen-" in x
|
||||
else x
|
||||
for x in tv
|
||||
if not x.startswith("pydevd.")
|
||||
]
|
||||
tv = ["{}\033[36m{}".format(v, k) for k, v in sorted(Counter(tv).items())]
|
||||
log(name, "\033[0m \033[33m".join(tv), 3)
|
||||
|
||||
|
||||
def min_ex():
|
||||
et, ev, tb = sys.exc_info()
|
||||
tb = traceback.extract_tb(tb, 2)
|
||||
ex = [
|
||||
"{} @ {} <{}>: {}".format(fp.split(os.sep)[-1], ln, fun, txt)
|
||||
for fp, ln, fun, txt in tb
|
||||
]
|
||||
ex.append("{}: {}".format(et.__name__, ev))
|
||||
return "\n".join(ex)
|
||||
tb = traceback.extract_tb(tb)
|
||||
fmt = "{} @ {} <{}>: {}"
|
||||
ex = [fmt.format(fp.split(os.sep)[-1], ln, fun, txt) for fp, ln, fun, txt in tb]
|
||||
ex.append("[{}] {}".format(et.__name__, ev))
|
||||
return "\n".join(ex[-8:])
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -674,7 +729,7 @@ def undot(path):
|
||||
return "/".join(ret)
|
||||
|
||||
|
||||
def sanitize_fn(fn, ok="", bad=[]):
|
||||
def sanitize_fn(fn, ok, bad):
|
||||
if "/" not in ok:
|
||||
fn = fn.replace("\\", "/").split("/")[-1]
|
||||
|
||||
@@ -904,16 +959,10 @@ def yieldfile(fn):
|
||||
yield buf
|
||||
|
||||
|
||||
def hashcopy(actor, fin, fout):
|
||||
is_mp = actor.is_mp
|
||||
def hashcopy(fin, fout):
|
||||
hashobj = hashlib.sha512()
|
||||
tlen = 0
|
||||
for buf in fin:
|
||||
if is_mp:
|
||||
actor.workload += 1
|
||||
if actor.workload > 2 ** 31:
|
||||
actor.workload = 100
|
||||
|
||||
tlen += len(buf)
|
||||
hashobj.update(buf)
|
||||
fout.write(buf)
|
||||
@@ -924,15 +973,10 @@ def hashcopy(actor, fin, fout):
|
||||
return tlen, hashobj.hexdigest(), digest_b64
|
||||
|
||||
|
||||
def sendfile_py(lower, upper, f, s, actor=None):
|
||||
def sendfile_py(lower, upper, f, s):
|
||||
remains = upper - lower
|
||||
f.seek(lower)
|
||||
while remains > 0:
|
||||
if actor:
|
||||
actor.workload += 1
|
||||
if actor.workload > 2 ** 31:
|
||||
actor.workload = 100
|
||||
|
||||
# time.sleep(0.01)
|
||||
buf = f.read(min(1024 * 32, remains))
|
||||
if not buf:
|
||||
@@ -1068,10 +1112,7 @@ def gzip_orig_sz(fn):
|
||||
with open(fsenc(fn), "rb") as f:
|
||||
f.seek(-4, 2)
|
||||
rv = f.read(4)
|
||||
try:
|
||||
return struct.unpack(b"I", rv)[0]
|
||||
except:
|
||||
return struct.unpack("I", rv)[0]
|
||||
return sunpack(b"I", rv)[0]
|
||||
|
||||
|
||||
def py_desc():
|
||||
|
||||
@@ -28,10 +28,16 @@ window.baguetteBox = (function () {
|
||||
isOverlayVisible = false,
|
||||
touch = {}, // start-pos
|
||||
touchFlag = false, // busy
|
||||
regex = /.+\.(gif|jpe?g|png|webp)/i,
|
||||
re_i = /.+\.(gif|jpe?g|png|webp)(\?|$)/i,
|
||||
re_v = /.+\.(webm|mp4)(\?|$)/i,
|
||||
data = {}, // all galleries
|
||||
imagesElements = [],
|
||||
documentLastFocus = null;
|
||||
documentLastFocus = null,
|
||||
isFullscreen = false;
|
||||
|
||||
var onFSC = function (e) {
|
||||
isFullscreen = !!document.fullscreenElement;
|
||||
};
|
||||
|
||||
var overlayClickHandler = function (event) {
|
||||
if (event.target.id.indexOf('baguette-img') !== -1) {
|
||||
@@ -96,10 +102,6 @@ window.baguetteBox = (function () {
|
||||
data[selector] = selectorData;
|
||||
|
||||
[].forEach.call(galleryNodeList, function (galleryElement) {
|
||||
if (userOptions && userOptions.filter) {
|
||||
regex = userOptions.filter;
|
||||
}
|
||||
|
||||
var tagsNodeList = [];
|
||||
if (galleryElement.tagName === 'A') {
|
||||
tagsNodeList = [galleryElement];
|
||||
@@ -109,7 +111,7 @@ window.baguetteBox = (function () {
|
||||
|
||||
tagsNodeList = [].filter.call(tagsNodeList, function (element) {
|
||||
if (element.className.indexOf(userOptions && userOptions.ignoreClass) === -1) {
|
||||
return regex.test(element.href);
|
||||
return re_i.test(element.href) || re_v.test(element.href);
|
||||
}
|
||||
});
|
||||
if (tagsNodeList.length === 0) {
|
||||
@@ -209,24 +211,46 @@ window.baguetteBox = (function () {
|
||||
bindEvents();
|
||||
}
|
||||
|
||||
function keyDownHandler(event) {
|
||||
switch (event.keyCode) {
|
||||
case 37: // Left
|
||||
showPreviousImage();
|
||||
break;
|
||||
case 39: // Right
|
||||
showNextImage();
|
||||
break;
|
||||
case 27: // Esc
|
||||
hideOverlay();
|
||||
break;
|
||||
case 36: // Home
|
||||
showFirstImage(event);
|
||||
break;
|
||||
case 35: // End
|
||||
showLastImage(event);
|
||||
break;
|
||||
}
|
||||
function keyDownHandler(e) {
|
||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing)
|
||||
return;
|
||||
|
||||
var k = e.code + '';
|
||||
|
||||
if (k == "ArrowLeft" || k == "KeyJ")
|
||||
showPreviousImage();
|
||||
else if (k == "ArrowRight" || k == "KeyL")
|
||||
showNextImage();
|
||||
else if (k == "Escape")
|
||||
hideOverlay();
|
||||
else if (k == "Home")
|
||||
showFirstImage(e);
|
||||
else if (k == "End")
|
||||
showLastImage(e);
|
||||
else if (k == "Space" || k == "KeyP" || k == "KeyK")
|
||||
playpause();
|
||||
else if (k == "KeyU" || k == "KeyO")
|
||||
relseek(k == "KeyU" ? -10 : 10);
|
||||
else if (k == "KeyM" && vid())
|
||||
vid().muted = !vid().muted;
|
||||
else if (k == "KeyF")
|
||||
try {
|
||||
if (isFullscreen)
|
||||
document.exitFullscreen();
|
||||
else
|
||||
vid().requestFullscreen();
|
||||
}
|
||||
catch (ex) { }
|
||||
}
|
||||
|
||||
function keyUpHandler(e) {
|
||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing)
|
||||
return;
|
||||
|
||||
var k = e.code + '';
|
||||
|
||||
if (k == "Space")
|
||||
ev(e);
|
||||
}
|
||||
|
||||
var passiveSupp = false;
|
||||
@@ -325,6 +349,8 @@ window.baguetteBox = (function () {
|
||||
}
|
||||
|
||||
bind(document, 'keydown', keyDownHandler);
|
||||
bind(document, 'keyup', keyUpHandler);
|
||||
bind(document, 'fullscreenchange', onFSC);
|
||||
currentIndex = chosenImageIndex;
|
||||
touch = {
|
||||
count: 0,
|
||||
@@ -366,6 +392,7 @@ window.baguetteBox = (function () {
|
||||
|
||||
function hideOverlay(e) {
|
||||
ev(e);
|
||||
playvid(false);
|
||||
if (options.noScrollbars) {
|
||||
document.documentElement.style.overflowY = 'auto';
|
||||
document.body.style.overflowY = 'auto';
|
||||
@@ -375,6 +402,8 @@ window.baguetteBox = (function () {
|
||||
}
|
||||
|
||||
unbind(document, 'keydown', keyDownHandler);
|
||||
unbind(document, 'keyup', keyUpHandler);
|
||||
unbind(document, 'fullscreenchange', onFSC);
|
||||
// Fade out and hide the overlay
|
||||
overlay.className = '';
|
||||
setTimeout(function () {
|
||||
@@ -398,8 +427,8 @@ window.baguetteBox = (function () {
|
||||
return; // out-of-bounds or gallery dirty
|
||||
}
|
||||
|
||||
if (imageContainer.getElementsByTagName('img')[0]) {
|
||||
// image is loaded, cb and bail
|
||||
if (imageContainer.querySelector('img, video')) {
|
||||
// was loaded, cb and bail
|
||||
if (callback) {
|
||||
callback();
|
||||
}
|
||||
@@ -408,7 +437,7 @@ window.baguetteBox = (function () {
|
||||
|
||||
var imageElement = galleryItem.imageElement,
|
||||
imageSrc = imageElement.href,
|
||||
thumbnailElement = imageElement.getElementsByTagName('img')[0],
|
||||
thumbnailElement = imageElement.querySelector('img, video'),
|
||||
imageCaption = typeof options.captions === 'function' ?
|
||||
options.captions.call(currentGallery, imageElement) :
|
||||
imageElement.getAttribute('data-caption') || imageElement.title;
|
||||
@@ -428,16 +457,20 @@ window.baguetteBox = (function () {
|
||||
}
|
||||
imageContainer.appendChild(figure);
|
||||
|
||||
var image = mknod('img');
|
||||
image.onload = function () {
|
||||
var is_vid = re_v.test(imageSrc),
|
||||
image = mknod(is_vid ? 'video' : 'img');
|
||||
|
||||
clmod(imageContainer, 'vid', is_vid);
|
||||
|
||||
image.addEventListener(is_vid ? 'loadedmetadata' : 'load', function () {
|
||||
// Remove loader element
|
||||
var spinner = document.querySelector('#baguette-img-' + index + ' .baguetteBox-spinner');
|
||||
figure.removeChild(spinner);
|
||||
if (!options.async && callback) {
|
||||
if (!options.async && callback)
|
||||
callback();
|
||||
}
|
||||
};
|
||||
});
|
||||
image.setAttribute('src', imageSrc);
|
||||
image.setAttribute('controls', 'controls');
|
||||
image.alt = thumbnailElement ? thumbnailElement.alt || '' : '';
|
||||
if (options.titleTag && imageCaption) {
|
||||
image.title = imageCaption;
|
||||
@@ -498,6 +531,7 @@ window.baguetteBox = (function () {
|
||||
return false;
|
||||
}
|
||||
|
||||
playvid(false);
|
||||
currentIndex = index;
|
||||
loadImage(currentIndex, function () {
|
||||
preloadNext(currentIndex);
|
||||
@@ -512,6 +546,26 @@ window.baguetteBox = (function () {
|
||||
return true;
|
||||
}
|
||||
|
||||
function vid() {
|
||||
return imagesElements[currentIndex].querySelector('video');
|
||||
}
|
||||
|
||||
function playvid(play) {
|
||||
if (vid())
|
||||
vid()[play ? 'play' : 'pause']();
|
||||
}
|
||||
|
||||
function playpause() {
|
||||
var v = vid();
|
||||
if (v)
|
||||
v[v.paused ? "play" : "pause"]();
|
||||
}
|
||||
|
||||
function relseek(sec) {
|
||||
if (vid())
|
||||
vid().currentTime += sec;
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers the bounce animation
|
||||
* @param {('left'|'right')} direction - Direction of the movement
|
||||
@@ -534,6 +588,8 @@ window.baguetteBox = (function () {
|
||||
} else {
|
||||
slider.style.transform = 'translate3d(' + offset + ',0,0)';
|
||||
}
|
||||
playvid(false);
|
||||
playvid(true);
|
||||
}
|
||||
|
||||
function preloadNext(index) {
|
||||
@@ -566,6 +622,7 @@ window.baguetteBox = (function () {
|
||||
unbindEvents();
|
||||
clearCachedData();
|
||||
unbind(document, 'keydown', keyDownHandler);
|
||||
unbind(document, 'keyup', keyUpHandler);
|
||||
document.getElementsByTagName('body')[0].removeChild(ebi('baguetteBox-overlay'));
|
||||
data = {};
|
||||
currentGallery = [];
|
||||
@@ -577,6 +634,8 @@ window.baguetteBox = (function () {
|
||||
show: show,
|
||||
showNext: showNextImage,
|
||||
showPrevious: showPreviousImage,
|
||||
relseek: relseek,
|
||||
playpause: playpause,
|
||||
hide: hideOverlay,
|
||||
destroy: destroyPlugin
|
||||
};
|
||||
|
||||
@@ -1174,22 +1174,27 @@ html.light #tree::-webkit-scrollbar {
|
||||
margin: 0;
|
||||
height: 100%;
|
||||
}
|
||||
#baguetteBox-overlay .full-image img {
|
||||
#baguetteBox-overlay .full-image img,
|
||||
#baguetteBox-overlay .full-image video {
|
||||
display: inline-block;
|
||||
width: auto;
|
||||
height: auto;
|
||||
max-height: 100%;
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
max-height: calc(100% - 1.4em);
|
||||
margin-bottom: 1.4em;
|
||||
vertical-align: middle;
|
||||
box-shadow: 0 0 8px rgba(0, 0, 0, 0.6);
|
||||
}
|
||||
#baguetteBox-overlay .full-image video {
|
||||
background: #333;
|
||||
}
|
||||
#baguetteBox-overlay .full-image figcaption {
|
||||
display: block;
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
position: fixed;
|
||||
bottom: .1em;
|
||||
width: 100%;
|
||||
text-align: center;
|
||||
line-height: 1.8;
|
||||
white-space: normal;
|
||||
color: #ccc;
|
||||
}
|
||||
|
||||
@@ -1756,7 +1756,7 @@ document.onkeydown = function (e) {
|
||||
if (e.ctrlKey || e.altKey || e.metaKey || e.isComposing)
|
||||
return;
|
||||
|
||||
var k = (e.code + ''), pos = -1, n;
|
||||
var k = e.code + '', pos = -1, n;
|
||||
|
||||
if (e.shiftKey && k != 'KeyA' && k != 'KeyD')
|
||||
return;
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
</table>
|
||||
</td></tr></table>
|
||||
<div class="btns">
|
||||
<a href="{{ avol[0] }}?stack">dump stack</a>
|
||||
<a href="/?stack">dump stack</a>
|
||||
</div>
|
||||
{%- endif %}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ function vis_exh(msg, url, lineNo, columnNo, error) {
|
||||
html.push('<h2>' + find[a] + '</h2>' +
|
||||
esc(String(error[find[a]])).replace(/\n/g, '<br />\n'));
|
||||
}
|
||||
html.push('<p style="border-top:1px solid #999;margin-top:1.5em;font-size:1.4em">if you are stuck here, try to <a href="#" onclick="localStorage.clear();location.reload();">reset copyparty settings</a></p>');
|
||||
document.body.innerHTML = html.join('\n');
|
||||
|
||||
var s = mknod('style');
|
||||
|
||||
@@ -23,10 +23,10 @@ def hdr(query):
|
||||
|
||||
|
||||
class Cfg(Namespace):
|
||||
def __init__(self, a=[], v=[], c=None):
|
||||
def __init__(self, a=None, v=None, c=None):
|
||||
super(Cfg, self).__init__(
|
||||
a=a,
|
||||
v=v,
|
||||
a=a or [],
|
||||
v=v or [],
|
||||
c=c,
|
||||
rproxy=0,
|
||||
ed=False,
|
||||
|
||||
@@ -16,7 +16,7 @@ from copyparty import util
|
||||
|
||||
|
||||
class Cfg(Namespace):
|
||||
def __init__(self, a=[], v=[], c=None):
|
||||
def __init__(self, a=None, v=None, c=None):
|
||||
ex = {k: False for k in "nw e2d e2ds e2dsa e2t e2ts e2tsr".split()}
|
||||
ex2 = {
|
||||
"mtp": [],
|
||||
@@ -27,7 +27,7 @@ class Cfg(Namespace):
|
||||
"rproxy": 0,
|
||||
}
|
||||
ex.update(ex2)
|
||||
super(Cfg, self).__init__(a=a, v=v, c=c, **ex)
|
||||
super(Cfg, self).__init__(a=a or [], v=v or [], c=c, **ex)
|
||||
|
||||
|
||||
class TestVFS(unittest.TestCase):
|
||||
|
||||
@@ -126,7 +126,6 @@ class VHttpConn(object):
|
||||
self.hsrv = VHttpSrv()
|
||||
self.nreq = 0
|
||||
self.nbyte = 0
|
||||
self.workload = 0
|
||||
self.ico = None
|
||||
self.thumbcli = None
|
||||
self.t0 = time.time()
|
||||
Reference in New Issue
Block a user