Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f000beeaf | ||
|
|
ff0a71f212 | ||
|
|
22dfc6ec24 | ||
|
|
48147c079e | ||
|
|
d715479ef6 | ||
|
|
fc8298c468 | ||
|
|
e94ca5dc91 | ||
|
|
114b71b751 | ||
|
|
b2770a2087 | ||
|
|
cba1878bb2 | ||
|
|
a2e037d6af | ||
|
|
65a2b6a223 | ||
|
|
9ed799e803 |
@@ -219,7 +219,7 @@ also see [comparison to similar software](./docs/versus.md)
|
||||
* upload
|
||||
* ☑ basic: plain multipart, ie6 support
|
||||
* ☑ [up2k](#uploading): js, resumable, multithreaded
|
||||
* **no filesize limit!** ...unless you use Cloudflare, then it's 383.9 GiB
|
||||
* **no filesize limit!** even on Cloudflare
|
||||
* ☑ stash: simple PUT filedropper
|
||||
* ☑ filename randomizer
|
||||
* ☑ write-only folders
|
||||
@@ -654,7 +654,7 @@ up2k has several advantages:
|
||||
* uploads resume if you reboot your browser or pc, just upload the same files again
|
||||
* server detects any corruption; the client reuploads affected chunks
|
||||
* the client doesn't upload anything that already exists on the server
|
||||
* no filesize limit unless imposed by a proxy, for example Cloudflare, which blocks uploads over 383.9 GiB
|
||||
* no filesize limit, even when a proxy limits the request size (for example Cloudflare)
|
||||
* much higher speeds than ftp/scp/tarpipe on some internet connections (mainly american ones) thanks to parallel connections
|
||||
* the last-modified timestamp of the file is preserved
|
||||
|
||||
@@ -690,6 +690,8 @@ note that since up2k has to read each file twice, `[🎈] bup` can *theoreticall
|
||||
|
||||
if you are resuming a massive upload and want to skip hashing the files which already finished, you can enable `turbo` in the `[⚙️] config` tab, but please read the tooltip on that button
|
||||
|
||||
if the server is behind a proxy which imposes a request-size limit, you can configure up2k to sneak below the limit with server-option `--u2sz` (the default is 96 MiB to support Cloudflare)
|
||||
|
||||
|
||||
### file-search
|
||||
|
||||
|
||||
130
bin/u2c.py
130
bin/u2c.py
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
S_VERSION = "2.2"
|
||||
S_BUILD_DT = "2024-10-13"
|
||||
S_VERSION = "2.4"
|
||||
S_BUILD_DT = "2024-10-16"
|
||||
|
||||
"""
|
||||
u2c.py: upload to copyparty
|
||||
@@ -62,6 +62,9 @@ else:
|
||||
|
||||
unicode = str
|
||||
|
||||
|
||||
WTF8 = "replace" if PY2 else "surrogateescape"
|
||||
|
||||
VT100 = platform.system() != "Windows"
|
||||
|
||||
|
||||
@@ -228,7 +231,7 @@ class File(object):
|
||||
self.lmod = lmod # type: float
|
||||
|
||||
self.abs = os.path.join(top, rel) # type: bytes
|
||||
self.name = self.rel.split(b"/")[-1].decode("utf-8", "replace") # type: str
|
||||
self.name = self.rel.split(b"/")[-1].decode("utf-8", WTF8) # type: str
|
||||
|
||||
# set by get_hashlist
|
||||
self.cids = [] # type: list[tuple[str, int, int]] # [ hash, ofs, sz ]
|
||||
@@ -267,10 +270,41 @@ class FileSlice(object):
|
||||
raise Exception(9)
|
||||
tlen += clen
|
||||
|
||||
self.len = tlen
|
||||
self.len = self.tlen = tlen
|
||||
self.cdr = self.car + self.len
|
||||
self.ofs = 0 # type: int
|
||||
self.f = open(file.abs, "rb", 512 * 1024)
|
||||
|
||||
self.f = None
|
||||
self.seek = self._seek0
|
||||
self.read = self._read0
|
||||
|
||||
def subchunk(self, maxsz, nth):
|
||||
if self.tlen <= maxsz:
|
||||
return -1
|
||||
|
||||
if not nth:
|
||||
self.car0 = self.car
|
||||
self.cdr0 = self.cdr
|
||||
|
||||
self.car = self.car0 + maxsz * nth
|
||||
if self.car >= self.cdr0:
|
||||
return -2
|
||||
|
||||
self.cdr = self.car + min(self.cdr0 - self.car, maxsz)
|
||||
self.len = self.cdr - self.car
|
||||
self.seek(0)
|
||||
return nth
|
||||
|
||||
def unsub(self):
|
||||
self.car = self.car0
|
||||
self.cdr = self.cdr0
|
||||
self.len = self.tlen
|
||||
|
||||
def _open(self):
|
||||
self.seek = self._seek
|
||||
self.read = self._read
|
||||
|
||||
self.f = open(self.file.abs, "rb", 512 * 1024)
|
||||
self.f.seek(self.car)
|
||||
|
||||
# https://stackoverflow.com/questions/4359495/what-is-exactly-a-file-like-object-in-python
|
||||
@@ -282,10 +316,15 @@ class FileSlice(object):
|
||||
except:
|
||||
pass # py27 probably
|
||||
|
||||
def close(self, *a, **ka):
|
||||
return # until _open
|
||||
|
||||
def tell(self):
|
||||
return self.ofs
|
||||
|
||||
def seek(self, ofs, wh=0):
|
||||
def _seek(self, ofs, wh=0):
|
||||
assert self.f # !rm
|
||||
|
||||
if wh == 1:
|
||||
ofs = self.ofs + ofs
|
||||
elif wh == 2:
|
||||
@@ -299,12 +338,22 @@ class FileSlice(object):
|
||||
self.ofs = ofs
|
||||
self.f.seek(self.car + ofs)
|
||||
|
||||
def read(self, sz):
|
||||
def _read(self, sz):
|
||||
assert self.f # !rm
|
||||
|
||||
sz = min(sz, self.len - self.ofs)
|
||||
ret = self.f.read(sz)
|
||||
self.ofs += len(ret)
|
||||
return ret
|
||||
|
||||
def _seek0(self, ofs, wh=0):
|
||||
self._open()
|
||||
return self.seek(ofs, wh)
|
||||
|
||||
def _read0(self, sz):
|
||||
self._open()
|
||||
return self.read(sz)
|
||||
|
||||
|
||||
class MTHash(object):
|
||||
def __init__(self, cores):
|
||||
@@ -557,13 +606,17 @@ def walkdir(err, top, excl, seen):
|
||||
for ap, inf in sorted(statdir(err, top)):
|
||||
if excl.match(ap):
|
||||
continue
|
||||
yield ap, inf
|
||||
if stat.S_ISDIR(inf.st_mode):
|
||||
yield ap, inf
|
||||
try:
|
||||
for x in walkdir(err, ap, excl, seen):
|
||||
yield x
|
||||
except Exception as ex:
|
||||
err.append((ap, str(ex)))
|
||||
elif stat.S_ISREG(inf.st_mode):
|
||||
yield ap, inf
|
||||
else:
|
||||
err.append((ap, "irregular filetype 0%o" % (inf.st_mode,)))
|
||||
|
||||
|
||||
def walkdirs(err, tops, excl):
|
||||
@@ -609,11 +662,12 @@ def walkdirs(err, tops, excl):
|
||||
|
||||
# mostly from copyparty/util.py
|
||||
def quotep(btxt):
|
||||
# type: (bytes) -> bytes
|
||||
quot1 = quote(btxt, safe=b"/")
|
||||
if not PY2:
|
||||
quot1 = quot1.encode("ascii")
|
||||
|
||||
return quot1.replace(b" ", b"+") # type: ignore
|
||||
return quot1.replace(b" ", b"%20") # type: ignore
|
||||
|
||||
|
||||
# from copyparty/util.py
|
||||
@@ -641,7 +695,7 @@ def up2k_chunksize(filesize):
|
||||
while True:
|
||||
for mul in [1, 2]:
|
||||
nchunks = math.ceil(filesize * 1.0 / chunksize)
|
||||
if nchunks <= 256 or (chunksize >= 32 * 1024 * 1024 and nchunks < 4096):
|
||||
if nchunks <= 256 or (chunksize >= 32 * 1024 * 1024 and nchunks <= 4096):
|
||||
return chunksize
|
||||
|
||||
chunksize += stepsize
|
||||
@@ -720,7 +774,7 @@ def handshake(ar, file, search):
|
||||
url = file.url
|
||||
else:
|
||||
if b"/" in file.rel:
|
||||
url = quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8", "replace")
|
||||
url = quotep(file.rel.rsplit(b"/", 1)[0]).decode("utf-8")
|
||||
else:
|
||||
url = ""
|
||||
url = ar.vtop + url
|
||||
@@ -766,15 +820,15 @@ def handshake(ar, file, search):
|
||||
if search:
|
||||
return r["hits"], False
|
||||
|
||||
file.url = r["purl"]
|
||||
file.url = quotep(r["purl"].encode("utf-8", WTF8)).decode("utf-8")
|
||||
file.name = r["name"]
|
||||
file.wark = r["wark"]
|
||||
|
||||
return r["hash"], r["sprs"]
|
||||
|
||||
|
||||
def upload(fsl, stats):
|
||||
# type: (FileSlice, str) -> None
|
||||
def upload(fsl, stats, maxsz):
|
||||
# type: (FileSlice, str, int) -> None
|
||||
"""upload a range of file data, defined by one or more `cid` (chunk-hash)"""
|
||||
|
||||
ctxt = fsl.cids[0]
|
||||
@@ -792,21 +846,33 @@ def upload(fsl, stats):
|
||||
if stats:
|
||||
headers["X-Up2k-Stat"] = stats
|
||||
|
||||
nsub = 0
|
||||
try:
|
||||
sc, txt = web.req("POST", fsl.file.url, headers, fsl, MO)
|
||||
while nsub != -1:
|
||||
nsub = fsl.subchunk(maxsz, nsub)
|
||||
if nsub == -2:
|
||||
return
|
||||
if nsub >= 0:
|
||||
headers["X-Up2k-Subc"] = str(maxsz * nsub)
|
||||
headers.pop(CLEN, None)
|
||||
nsub += 1
|
||||
|
||||
if sc == 400:
|
||||
if (
|
||||
"already being written" in txt
|
||||
or "already got that" in txt
|
||||
or "only sibling chunks" in txt
|
||||
):
|
||||
fsl.file.nojoin = 1
|
||||
sc, txt = web.req("POST", fsl.file.url, headers, fsl, MO)
|
||||
|
||||
if sc >= 400:
|
||||
raise Exception("http %s: %s" % (sc, txt))
|
||||
if sc == 400:
|
||||
if (
|
||||
"already being written" in txt
|
||||
or "already got that" in txt
|
||||
or "only sibling chunks" in txt
|
||||
):
|
||||
fsl.file.nojoin = 1
|
||||
|
||||
if sc >= 400:
|
||||
raise Exception("http %s: %s" % (sc, txt))
|
||||
finally:
|
||||
fsl.f.close()
|
||||
if nsub != -1:
|
||||
fsl.unsub()
|
||||
|
||||
|
||||
class Ctl(object):
|
||||
@@ -938,7 +1004,7 @@ class Ctl(object):
|
||||
print(" %d up %s" % (ncs - nc, cid))
|
||||
stats = "%d/0/0/%d" % (nf, self.nfiles - nf)
|
||||
fslice = FileSlice(file, [cid])
|
||||
upload(fslice, stats)
|
||||
upload(fslice, stats, self.ar.szm)
|
||||
|
||||
print(" ok!")
|
||||
if file.recheck:
|
||||
@@ -1057,7 +1123,7 @@ class Ctl(object):
|
||||
print(" ls ~{0}".format(srd))
|
||||
zt = (
|
||||
self.ar.vtop,
|
||||
quotep(rd.replace(b"\\", b"/")).decode("utf-8", "replace"),
|
||||
quotep(rd.replace(b"\\", b"/")).decode("utf-8"),
|
||||
)
|
||||
sc, txt = web.req("GET", "%s%s?ls<&dots" % zt, {})
|
||||
if sc >= 400:
|
||||
@@ -1066,7 +1132,7 @@ class Ctl(object):
|
||||
j = json.loads(txt)
|
||||
for f in j["dirs"] + j["files"]:
|
||||
rfn = f["href"].split("?")[0].rstrip("/")
|
||||
ls[unquote(rfn.encode("utf-8", "replace"))] = f
|
||||
ls[unquote(rfn.encode("utf-8", WTF8))] = f
|
||||
except Exception as ex:
|
||||
print(" mkdir ~{0} ({1})".format(srd, ex))
|
||||
|
||||
@@ -1080,7 +1146,7 @@ class Ctl(object):
|
||||
lnodes = [x.split(b"/")[-1] for x in zls]
|
||||
bnames = [x for x in ls if x not in lnodes and x != b".hist"]
|
||||
vpath = self.ar.url.split("://")[-1].split("/", 1)[-1]
|
||||
names = [x.decode("utf-8", "replace") for x in bnames]
|
||||
names = [x.decode("utf-8", WTF8) for x in bnames]
|
||||
locs = [vpath + srd + "/" + x for x in names]
|
||||
while locs:
|
||||
req = locs
|
||||
@@ -1286,7 +1352,7 @@ class Ctl(object):
|
||||
self._check_if_done()
|
||||
continue
|
||||
|
||||
njoin = (self.ar.sz * 1024 * 1024) // chunksz
|
||||
njoin = self.ar.sz // chunksz
|
||||
cs = hs[:]
|
||||
while cs:
|
||||
fsl = FileSlice(file, cs[:1])
|
||||
@@ -1338,7 +1404,7 @@ class Ctl(object):
|
||||
)
|
||||
|
||||
try:
|
||||
upload(fsl, stats)
|
||||
upload(fsl, stats, self.ar.szm)
|
||||
except Exception as ex:
|
||||
t = "upload failed, retrying: %s #%s+%d (%s)\n"
|
||||
eprint(t % (file.name, cids[0][:8], len(cids) - 1, ex))
|
||||
@@ -1427,6 +1493,7 @@ source file/folder selection uses rsync syntax, meaning that:
|
||||
ap.add_argument("-j", type=int, metavar="CONNS", default=2, help="parallel connections")
|
||||
ap.add_argument("-J", type=int, metavar="CORES", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
|
||||
ap.add_argument("--sz", type=int, metavar="MiB", default=64, help="try to make each POST this big")
|
||||
ap.add_argument("--szm", type=int, metavar="MiB", default=96, help="max size of each POST (default is cloudflare max)")
|
||||
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
|
||||
ap.add_argument("-ns", action="store_true", help="no status panel (for slow consoles and macos)")
|
||||
ap.add_argument("--cd", type=float, metavar="SEC", default=5, help="delay before reattempting a failed handshake/upload")
|
||||
@@ -1454,6 +1521,9 @@ source file/folder selection uses rsync syntax, meaning that:
|
||||
if ar.dr:
|
||||
ar.ow = True
|
||||
|
||||
ar.sz *= 1024 * 1024
|
||||
ar.szm *= 1024 * 1024
|
||||
|
||||
ar.x = "|".join(ar.x or [])
|
||||
|
||||
setattr(ar, "wlist", ar.url == "-")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Maintainer: icxes <dev.null@need.moe>
|
||||
pkgname=copyparty
|
||||
pkgver="1.15.6"
|
||||
pkgver="1.15.7"
|
||||
pkgrel=1
|
||||
pkgdesc="File server with accelerated resumable uploads, dedup, WebDAV, FTP, TFTP, zeroconf, media indexer, thumbnails++"
|
||||
arch=("any")
|
||||
@@ -21,7 +21,7 @@ optdepends=("ffmpeg: thumbnails for videos, images (slower) and audio, music tag
|
||||
)
|
||||
source=("https://github.com/9001/${pkgname}/releases/download/v${pkgver}/${pkgname}-${pkgver}.tar.gz")
|
||||
backup=("etc/${pkgname}.d/init" )
|
||||
sha256sums=("abb5c1705cd80ea553d647d4a7b35b5e1dac5a517200551bcca79aa199f30875")
|
||||
sha256sums=("b492f91e3e157d30b17c5cddad43e9a64f8e18e1ce0a05b50c09ca5eba843a56")
|
||||
|
||||
build() {
|
||||
cd "${srcdir}/${pkgname}-${pkgver}"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"url": "https://github.com/9001/copyparty/releases/download/v1.15.6/copyparty-sfx.py",
|
||||
"version": "1.15.6",
|
||||
"hash": "sha256-0ikt3jv9/XT/w/ew+R4rZxF6s7LwNhUvUYYIZtkQqbk="
|
||||
"url": "https://github.com/9001/copyparty/releases/download/v1.15.7/copyparty-sfx.py",
|
||||
"version": "1.15.7",
|
||||
"hash": "sha256-j5zkMkrN/lPxXGBe2cUfIh6fFXM4lgkFLIt3TCJb3Mg="
|
||||
}
|
||||
@@ -1017,7 +1017,7 @@ def add_upload(ap):
|
||||
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
|
||||
ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; [\033[32m-1\033[0m] = forbidden/always-off, [\033[32m0\033[0m] = default-off and warn if enabled, [\033[32m1\033[0m] = default-off, [\033[32m2\033[0m] = on, [\033[32m3\033[0m] = on and disable datecheck")
|
||||
ap2.add_argument("--u2j", metavar="JOBS", type=int, default=2, help="web-client: number of file chunks to upload in parallel; 1 or 2 is good for low-latency (same-country) connections, 4-8 for android clients, 16 for cross-atlantic (max=64)")
|
||||
ap2.add_argument("--u2sz", metavar="N,N,N", type=u, default="1,64,96", help="web-client: default upload chunksize (MiB); sets \033[33mmin,default,max\033[0m in the settings gui. Each HTTP POST will aim for this size. Cloudflare max is 96. Big values are good for cross-atlantic but may increase HDD fragmentation on some FS. Disable this optimization with [\033[32m1,1,1\033[0m]")
|
||||
ap2.add_argument("--u2sz", metavar="N,N,N", type=u, default="1,64,96", help="web-client: default upload chunksize (MiB); sets \033[33mmin,default,max\033[0m in the settings gui. Each HTTP POST will aim for \033[33mdefault\033[0m, and never exceed \033[33mmax\033[0m. Cloudflare max is 96. Big values are good for cross-atlantic but may increase HDD fragmentation on some FS. Disable this optimization with [\033[32m1,1,1\033[0m]")
|
||||
ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; [\033[32ms\033[0m]=smallest-first, [\033[32mn\033[0m]=alphabetical, [\033[32mfs\033[0m]=force-s, [\033[32mfn\033[0m]=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine")
|
||||
ap2.add_argument("--write-uplog", action="store_true", help="write POST reports to textfiles in working-directory")
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# coding: utf-8
|
||||
|
||||
VERSION = (1, 15, 7)
|
||||
VERSION = (1, 15, 8)
|
||||
CODENAME = "fill the drives"
|
||||
BUILD_DT = (2024, 10, 14)
|
||||
BUILD_DT = (2024, 10, 16)
|
||||
|
||||
S_VERSION = ".".join(map(str, VERSION))
|
||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||
|
||||
@@ -66,6 +66,7 @@ if PY2:
|
||||
LEELOO_DALLAS = "leeloo_dallas"
|
||||
|
||||
SEE_LOG = "see log for details"
|
||||
SEESLOG = " (see serverlog for details)"
|
||||
SSEELOG = " ({})".format(SEE_LOG)
|
||||
BAD_CFG = "invalid config; {}".format(SEE_LOG)
|
||||
SBADCFG = " ({})".format(BAD_CFG)
|
||||
|
||||
@@ -1884,7 +1884,7 @@ class HttpCli(object):
|
||||
f, fn = ren_open(fn, *open_a, **params)
|
||||
try:
|
||||
path = os.path.join(fdir, fn)
|
||||
post_sz, sha_hex, sha_b64 = hashcopy(reader, f, self.args.s_wr_slp)
|
||||
post_sz, sha_hex, sha_b64 = hashcopy(reader, f, None, 0, self.args.s_wr_slp)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
@@ -2348,7 +2348,7 @@ class HttpCli(object):
|
||||
broker = self.conn.hsrv.broker
|
||||
x = broker.ask("up2k.handle_chunks", ptop, wark, chashes)
|
||||
response = x.get()
|
||||
chashes, chunksize, cstarts, path, lastmod, sprs = response
|
||||
chashes, chunksize, cstarts, path, lastmod, fsize, sprs = response
|
||||
maxsize = chunksize * len(chashes)
|
||||
cstart0 = cstarts[0]
|
||||
locked = chashes # remaining chunks to be received in this request
|
||||
@@ -2356,6 +2356,50 @@ class HttpCli(object):
|
||||
num_left = -1 # num chunks left according to most recent up2k release
|
||||
treport = time.time() # ratelimit up2k reporting to reduce overhead
|
||||
|
||||
if "x-up2k-subc" in self.headers:
|
||||
sc_ofs = int(self.headers["x-up2k-subc"])
|
||||
chash = chashes[0]
|
||||
|
||||
u2sc = self.conn.hsrv.u2sc
|
||||
try:
|
||||
sc_pofs, hasher = u2sc[chash]
|
||||
if not sc_ofs:
|
||||
t = "client restarted the chunk; forgetting subchunk offset %d"
|
||||
self.log(t % (sc_pofs,))
|
||||
raise Exception()
|
||||
except:
|
||||
sc_pofs = 0
|
||||
hasher = hashlib.sha512()
|
||||
|
||||
et = "subchunk protocol error; resetting chunk "
|
||||
if sc_pofs != sc_ofs:
|
||||
u2sc.pop(chash, None)
|
||||
t = "%s[%s]: the expected resume-point was %d, not %d"
|
||||
raise Pebkac(400, t % (et, chash, sc_pofs, sc_ofs))
|
||||
if len(cstarts) > 1:
|
||||
u2sc.pop(chash, None)
|
||||
t = "%s[%s]: only a single subchunk can be uploaded in one request; you are sending %d chunks"
|
||||
raise Pebkac(400, t % (et, chash, len(cstarts)))
|
||||
csize = min(chunksize, fsize - cstart0[0])
|
||||
cstart0[0] += sc_ofs # also sets cstarts[0][0]
|
||||
sc_next_ofs = sc_ofs + postsize
|
||||
if sc_next_ofs > csize:
|
||||
u2sc.pop(chash, None)
|
||||
t = "%s[%s]: subchunk offset (%d) plus postsize (%d) exceeds chunksize (%d)"
|
||||
raise Pebkac(400, t % (et, chash, sc_ofs, postsize, csize))
|
||||
else:
|
||||
final_subchunk = sc_next_ofs == csize
|
||||
t = "subchunk %s %d:%d/%d %s"
|
||||
zs = "END" if final_subchunk else ""
|
||||
self.log(t % (chash[:15], sc_ofs, sc_next_ofs, csize, zs), 6)
|
||||
if final_subchunk:
|
||||
u2sc.pop(chash, None)
|
||||
else:
|
||||
u2sc[chash] = (sc_next_ofs, hasher)
|
||||
else:
|
||||
hasher = None
|
||||
final_subchunk = True
|
||||
|
||||
try:
|
||||
if self.args.nw:
|
||||
path = os.devnull
|
||||
@@ -2386,9 +2430,11 @@ class HttpCli(object):
|
||||
reader = read_socket(
|
||||
self.sr, self.args.s_rd_sz, min(remains, chunksize)
|
||||
)
|
||||
post_sz, _, sha_b64 = hashcopy(reader, f, self.args.s_wr_slp)
|
||||
post_sz, _, sha_b64 = hashcopy(
|
||||
reader, f, hasher, 0, self.args.s_wr_slp
|
||||
)
|
||||
|
||||
if sha_b64 != chash:
|
||||
if sha_b64 != chash and final_subchunk:
|
||||
try:
|
||||
self.bakflip(
|
||||
f, path, cstart[0], post_sz, chash, sha_b64, vfs.flags
|
||||
@@ -2420,7 +2466,8 @@ class HttpCli(object):
|
||||
|
||||
# be quick to keep the tcp winsize scale;
|
||||
# if we can't confirm rn then that's fine
|
||||
written.append(chash)
|
||||
if final_subchunk:
|
||||
written.append(chash)
|
||||
now = time.time()
|
||||
if now - treport < 1:
|
||||
continue
|
||||
@@ -2813,7 +2860,7 @@ class HttpCli(object):
|
||||
tabspath = os.path.join(fdir, tnam)
|
||||
self.log("writing to {}".format(tabspath))
|
||||
sz, sha_hex, sha_b64 = hashcopy(
|
||||
p_data, f, self.args.s_wr_slp, max_sz
|
||||
p_data, f, None, max_sz, self.args.s_wr_slp
|
||||
)
|
||||
if sz == 0:
|
||||
raise Pebkac(400, "empty files in post")
|
||||
@@ -3145,7 +3192,7 @@ class HttpCli(object):
|
||||
wunlink(self.log, fp, vfs.flags)
|
||||
|
||||
with open(fsenc(fp), "wb", self.args.iobuf) as f:
|
||||
sz, sha512, _ = hashcopy(p_data, f, self.args.s_wr_slp)
|
||||
sz, sha512, _ = hashcopy(p_data, f, None, 0, self.args.s_wr_slp)
|
||||
|
||||
if lim:
|
||||
lim.nup(self.ip)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import hashlib
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
@@ -144,6 +145,7 @@ class HttpSrv(object):
|
||||
self.t_periodic: Optional[threading.Thread] = None
|
||||
|
||||
self.u2fh = FHC()
|
||||
self.u2sc: dict[str, tuple[int, "hashlib._Hash"]] = {}
|
||||
self.pipes = CachedDict(0.2)
|
||||
self.metrics = Metrics(self)
|
||||
self.nreq = 0
|
||||
|
||||
@@ -20,7 +20,7 @@ from copy import deepcopy
|
||||
from queue import Queue
|
||||
|
||||
from .__init__ import ANYWIN, PY2, TYPE_CHECKING, WINDOWS, E
|
||||
from .authsrv import LEELOO_DALLAS, SSEELOG, VFS, AuthSrv
|
||||
from .authsrv import LEELOO_DALLAS, SEESLOG, VFS, AuthSrv
|
||||
from .bos import bos
|
||||
from .cfg import vf_bmap, vf_cmap, vf_vmap
|
||||
from .fsutil import Fstab
|
||||
@@ -2891,9 +2891,6 @@ class Up2k(object):
|
||||
"user": cj["user"],
|
||||
"addr": ip,
|
||||
"at": at,
|
||||
"hash": [],
|
||||
"need": [],
|
||||
"busy": {},
|
||||
}
|
||||
for k in ["life"]:
|
||||
if k in cj:
|
||||
@@ -2927,17 +2924,20 @@ class Up2k(object):
|
||||
hashes2, st = self._hashlist_from_file(orig_ap)
|
||||
wark2 = up2k_wark_from_hashlist(self.salt, st.st_size, hashes2)
|
||||
if dwark != wark2:
|
||||
t = "will not dedup (fs index desync): fs=%s, db=%s, file: %s"
|
||||
self.log(t % (wark2, dwark, orig_ap))
|
||||
t = "will not dedup (fs index desync): fs=%s, db=%s, file: %s\n%s"
|
||||
self.log(t % (wark2, dwark, orig_ap, rj))
|
||||
lost.append(dupe[3:])
|
||||
continue
|
||||
data_ok = True
|
||||
job = rj
|
||||
break
|
||||
|
||||
if job and wark in reg:
|
||||
# self.log("pop " + wark + " " + job["name"] + " handle_json db", 4)
|
||||
del reg[wark]
|
||||
if job:
|
||||
if wark in reg:
|
||||
del reg[wark]
|
||||
job["hash"] = job["need"] = []
|
||||
job["done"] = True
|
||||
job["busy"] = {}
|
||||
|
||||
if lost:
|
||||
c2 = None
|
||||
@@ -2966,7 +2966,7 @@ class Up2k(object):
|
||||
path = djoin(rj["ptop"], rj["prel"], fn)
|
||||
try:
|
||||
st = bos.stat(path)
|
||||
if st.st_size > 0 or not rj["need"]:
|
||||
if st.st_size > 0 or "done" in rj:
|
||||
# upload completed or both present
|
||||
break
|
||||
except:
|
||||
@@ -2980,13 +2980,13 @@ class Up2k(object):
|
||||
inc_ap = djoin(cj["ptop"], cj["prel"], cj["name"])
|
||||
orig_ap = djoin(rj["ptop"], rj["prel"], rj["name"])
|
||||
|
||||
if self.args.nw or n4g or not st:
|
||||
if self.args.nw or n4g or not st or "done" not in rj:
|
||||
pass
|
||||
|
||||
elif st.st_size != rj["size"]:
|
||||
t = "will not dedup (fs index desync): {}, size fs={} db={}, mtime fs={} db={}, file: {}"
|
||||
t = "will not dedup (fs index desync): {}, size fs={} db={}, mtime fs={} db={}, file: {}\n{}"
|
||||
t = t.format(
|
||||
wark, st.st_size, rj["size"], st.st_mtime, rj["lmod"], path
|
||||
wark, st.st_size, rj["size"], st.st_mtime, rj["lmod"], path, rj
|
||||
)
|
||||
self.log(t)
|
||||
del reg[wark]
|
||||
@@ -2996,8 +2996,8 @@ class Up2k(object):
|
||||
hashes2, _ = self._hashlist_from_file(orig_ap)
|
||||
wark2 = up2k_wark_from_hashlist(self.salt, st.st_size, hashes2)
|
||||
if wark != wark2:
|
||||
t = "will not dedup (fs index desync): fs=%s, idx=%s, file: %s"
|
||||
self.log(t % (wark2, wark, orig_ap))
|
||||
t = "will not dedup (fs index desync): fs=%s, idx=%s, file: %s\n%s"
|
||||
self.log(t % (wark2, wark, orig_ap, rj))
|
||||
del reg[wark]
|
||||
|
||||
if job or wark in reg:
|
||||
@@ -3012,7 +3012,7 @@ class Up2k(object):
|
||||
dst = djoin(cj["ptop"], cj["prel"], cj["name"])
|
||||
vsrc = djoin(job["vtop"], job["prel"], job["name"])
|
||||
vsrc = vsrc.replace("\\", "/") # just for prints anyways
|
||||
if job["need"]:
|
||||
if "done" not in job:
|
||||
self.log("unfinished:\n {0}\n {1}".format(src, dst))
|
||||
err = "partial upload exists at a different location; please resume uploading here instead:\n"
|
||||
err += "/" + quotep(vsrc) + " "
|
||||
@@ -3373,14 +3373,14 @@ class Up2k(object):
|
||||
|
||||
def handle_chunks(
|
||||
self, ptop: str, wark: str, chashes: list[str]
|
||||
) -> tuple[list[str], int, list[list[int]], str, float, bool]:
|
||||
) -> tuple[list[str], int, list[list[int]], str, float, int, bool]:
|
||||
with self.mutex, self.reg_mutex:
|
||||
self.db_act = self.vol_act[ptop] = time.time()
|
||||
job = self.registry[ptop].get(wark)
|
||||
if not job:
|
||||
known = " ".join([x for x in self.registry[ptop].keys()])
|
||||
self.log("unknown wark [{}], known: {}".format(wark, known))
|
||||
raise Pebkac(400, "unknown wark" + SSEELOG)
|
||||
raise Pebkac(400, "unknown wark" + SEESLOG)
|
||||
|
||||
if "t0c" not in job:
|
||||
job["t0c"] = time.time()
|
||||
@@ -3396,7 +3396,7 @@ class Up2k(object):
|
||||
try:
|
||||
nchunk = uniq.index(chashes[0])
|
||||
except:
|
||||
raise Pebkac(400, "unknown chunk0 [%s]" % (chashes[0]))
|
||||
raise Pebkac(400, "unknown chunk0 [%s]" % (chashes[0],))
|
||||
expanded = [chashes[0]]
|
||||
for prefix in chashes[1:]:
|
||||
nchunk += 1
|
||||
@@ -3431,7 +3431,7 @@ class Up2k(object):
|
||||
for chash in chashes:
|
||||
nchunk = [n for n, v in enumerate(job["hash"]) if v == chash]
|
||||
if not nchunk:
|
||||
raise Pebkac(400, "unknown chunk %s" % (chash))
|
||||
raise Pebkac(400, "unknown chunk %s" % (chash,))
|
||||
|
||||
ofs = [chunksize * x for x in nchunk]
|
||||
coffsets.append(ofs)
|
||||
@@ -3456,7 +3456,7 @@ class Up2k(object):
|
||||
|
||||
job["poke"] = time.time()
|
||||
|
||||
return chashes, chunksize, coffsets, path, job["lmod"], job["sprs"]
|
||||
return chashes, chunksize, coffsets, path, job["lmod"], job["size"], job["sprs"]
|
||||
|
||||
def fast_confirm_chunks(
|
||||
self, ptop: str, wark: str, chashes: list[str]
|
||||
@@ -3525,11 +3525,13 @@ class Up2k(object):
|
||||
src = djoin(pdir, job["tnam"])
|
||||
dst = djoin(pdir, job["name"])
|
||||
except Exception as ex:
|
||||
raise Pebkac(500, "finish_upload, wark, " + repr(ex))
|
||||
self.log(min_ex(), 1)
|
||||
raise Pebkac(500, "finish_upload, wark, %r%s" % (ex, SEESLOG))
|
||||
|
||||
if job["need"]:
|
||||
t = "finish_upload {} with remaining chunks {}"
|
||||
raise Pebkac(500, t.format(wark, job["need"]))
|
||||
self.log(min_ex(), 1)
|
||||
t = "finish_upload %s with remaining chunks %s%s"
|
||||
raise Pebkac(500, t % (wark, job["need"], SEESLOG))
|
||||
|
||||
upt = job.get("at") or time.time()
|
||||
vflags = self.flags[ptop]
|
||||
@@ -4054,7 +4056,9 @@ class Up2k(object):
|
||||
self.db_act = self.vol_act[dbv.realpath] = time.time()
|
||||
svpf = "/".join(x for x in [dbv.vpath, vrem, fn[0]] if x)
|
||||
if not svpf.startswith(svp + "/"): # assert
|
||||
raise Pebkac(500, "mv: bug at {}, top {}".format(svpf, svp))
|
||||
self.log(min_ex(), 1)
|
||||
t = "mv: bug at %s, top %s%s"
|
||||
raise Pebkac(500, t % (svpf, svp, SEESLOG))
|
||||
|
||||
dvpf = dvp + svpf[len(svp) :]
|
||||
self._mv_file(uname, ip, svpf, dvpf, curs)
|
||||
@@ -4069,7 +4073,9 @@ class Up2k(object):
|
||||
for zsl in (rm_ok, rm_ng):
|
||||
for ap in reversed(zsl):
|
||||
if not ap.startswith(sabs):
|
||||
raise Pebkac(500, "mv_d: bug at {}, top {}".format(ap, sabs))
|
||||
self.log(min_ex(), 1)
|
||||
t = "mv_d: bug at %s, top %s%s"
|
||||
raise Pebkac(500, t % (ap, sabs, SEESLOG))
|
||||
|
||||
rem = ap[len(sabs) :].replace(os.sep, "/").lstrip("/")
|
||||
vp = vjoin(dvp, rem)
|
||||
|
||||
@@ -2723,10 +2723,12 @@ def yieldfile(fn: str, bufsz: int) -> Generator[bytes, None, None]:
|
||||
def hashcopy(
|
||||
fin: Generator[bytes, None, None],
|
||||
fout: Union[typing.BinaryIO, typing.IO[Any]],
|
||||
slp: float = 0,
|
||||
max_sz: int = 0,
|
||||
hashobj: Optional["hashlib._Hash"],
|
||||
max_sz: int,
|
||||
slp: float,
|
||||
) -> tuple[int, str, str]:
|
||||
hashobj = hashlib.sha512()
|
||||
if not hashobj:
|
||||
hashobj = hashlib.sha512()
|
||||
tlen = 0
|
||||
for buf in fin:
|
||||
tlen += len(buf)
|
||||
|
||||
@@ -32,7 +32,7 @@ window.baguetteBox = (function () {
|
||||
scrollCSS = ['', ''],
|
||||
scrollTimer = 0,
|
||||
re_i = /^[^?]+\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp)(\?|$)/i,
|
||||
re_v = /^[^?]+\.(webm|mkv|mp4)(\?|$)/i,
|
||||
re_v = /^[^?]+\.(webm|mkv|mp4|m4v)(\?|$)/i,
|
||||
anims = ['slideIn', 'fadeIn', 'none'],
|
||||
data = {}, // all galleries
|
||||
imagesElements = [],
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use strict";
|
||||
|
||||
var XHR = XMLHttpRequest,
|
||||
img_re = /\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp|webm|mkv|mp4)(\?|$)/i;
|
||||
img_re = /\.(a?png|avif|bmp|gif|heif|jpe?g|jfif|svg|webp|webm|mkv|mp4|m4v)(\?|$)/i;
|
||||
|
||||
var Ls = {
|
||||
"eng": {
|
||||
|
||||
@@ -73,8 +73,8 @@ html {
|
||||
position: absolute;
|
||||
height: 1px;
|
||||
top: 1px;
|
||||
right: 1%;
|
||||
width: 99%;
|
||||
right: 1px;
|
||||
left: 1px;
|
||||
animation: toastt var(--tmtime) steps(var(--tmstep)) forwards;
|
||||
transform-origin: right;
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ var up2k = null,
|
||||
m = 'will use ' + sha_js + ' instead of native sha512 due to';
|
||||
|
||||
try {
|
||||
if (sread('nosubtle') || window.nosubtle)
|
||||
throw 'chickenbit';
|
||||
var cf = crypto.subtle || crypto.webkitSubtle;
|
||||
cf.digest('SHA-512', new Uint8Array(1)).then(
|
||||
function (x) { console.log('sha-ok'); up2k = up2k_init(cf); },
|
||||
@@ -853,8 +855,13 @@ function up2k_init(subtle) {
|
||||
|
||||
setmsg(suggest_up2k, 'msg');
|
||||
|
||||
var u2szs = u2sz.split(','),
|
||||
u2sz_min = parseInt(u2szs[0]),
|
||||
u2sz_tgt = parseInt(u2szs[1]),
|
||||
u2sz_max = parseInt(u2szs[2]);
|
||||
|
||||
var parallel_uploads = ebi('nthread').value = icfg_get('nthread', u2j),
|
||||
stitch_tgt = ebi('u2szg').value = icfg_get('u2sz', u2sz.split(',')[1]),
|
||||
stitch_tgt = ebi('u2szg').value = icfg_get('u2sz', u2sz_tgt),
|
||||
uc = {},
|
||||
fdom_ctr = 0,
|
||||
biggest_file = 0;
|
||||
@@ -1353,6 +1360,10 @@ function up2k_init(subtle) {
|
||||
for (var a = 0; a < Math.min(navigator.hardwareConcurrency || 4, 16); a++)
|
||||
hws.push(new Worker(SR + '/.cpr/w.hash.js?_=' + TS));
|
||||
|
||||
if (!subtle)
|
||||
for (var a = 0; a < hws.length; a++)
|
||||
hws[a].postMessage('nosubtle');
|
||||
|
||||
console.log(hws.length + " hashers");
|
||||
}
|
||||
|
||||
@@ -2574,8 +2585,7 @@ function up2k_init(subtle) {
|
||||
nparts = upt.nparts,
|
||||
pcar = nparts[0],
|
||||
pcdr = nparts[nparts.length - 1],
|
||||
snpart = pcar == pcdr ? pcar : ('' + pcar + '~' + pcdr),
|
||||
tries = 0;
|
||||
maxsz = (u2sz_max > 1 ? u2sz_max : 2040) * 1024 * 1024;
|
||||
|
||||
if (t.done)
|
||||
return console.log('done; skip chunk', t.name, t);
|
||||
@@ -2595,6 +2605,30 @@ function up2k_init(subtle) {
|
||||
if (cdr >= t.size)
|
||||
cdr = t.size;
|
||||
|
||||
if (cdr - car <= maxsz)
|
||||
return upload_sub(t, upt, pcar, pcdr, car, cdr, chunksize, car, []);
|
||||
|
||||
var car0 = car, subs = [];
|
||||
while (car < cdr) {
|
||||
subs.push([car, Math.min(cdr, car + maxsz)]);
|
||||
car += maxsz;
|
||||
}
|
||||
upload_sub(t, upt, pcar, pcdr, 0, 0, chunksize, car0, subs);
|
||||
}
|
||||
|
||||
function upload_sub(t, upt, pcar, pcdr, car, cdr, chunksize, car0, subs) {
|
||||
var nparts = upt.nparts,
|
||||
is_sub = subs.length;
|
||||
|
||||
if (is_sub) {
|
||||
var x = subs.shift();
|
||||
car = x[0];
|
||||
cdr = x[1];
|
||||
}
|
||||
|
||||
var snpart = is_sub ? ('' + pcar + '(' + (car-car0) +'+'+ (cdr-car)) :
|
||||
pcar == pcdr ? pcar : ('' + pcar + '~' + pcdr);
|
||||
|
||||
var orz = function (xhr) {
|
||||
st.bytes.inflight -= xhr.bsent;
|
||||
var txt = unpre((xhr.response && xhr.response.err) || xhr.responseText);
|
||||
@@ -2608,6 +2642,10 @@ function up2k_init(subtle) {
|
||||
return;
|
||||
}
|
||||
if (xhr.status == 200) {
|
||||
car = car0;
|
||||
if (subs.length)
|
||||
return upload_sub(t, upt, pcar, pcdr, 0, 0, chunksize, car0, subs);
|
||||
|
||||
var bdone = cdr - car;
|
||||
for (var a = pcar; a <= pcdr; a++) {
|
||||
pvis.prog(t, a, Math.min(bdone, chunksize));
|
||||
@@ -2674,7 +2712,7 @@ function up2k_init(subtle) {
|
||||
toast.warn(9.98, L.u_cuerr.format(snpart, Math.ceil(t.size / chunksize), t.name), t);
|
||||
|
||||
t.nojoin = t.nojoin || t.postlist.length; // maybe rproxy postsize limit
|
||||
console.log('chunkpit onerror,', ++tries, t.name, t);
|
||||
console.log('chunkpit onerror,', t.name, t);
|
||||
orz2(xhr);
|
||||
};
|
||||
|
||||
@@ -2692,6 +2730,9 @@ function up2k_init(subtle) {
|
||||
xhr.open('POST', t.purl, true);
|
||||
xhr.setRequestHeader("X-Up2k-Hash", ctxt);
|
||||
xhr.setRequestHeader("X-Up2k-Wark", t.wark);
|
||||
if (is_sub)
|
||||
xhr.setRequestHeader("X-Up2k-Subc", car - car0);
|
||||
|
||||
xhr.setRequestHeader("X-Up2k-Stat", "{0}/{1}/{2}/{3} {4}/{5} {6}".format(
|
||||
pvis.ctr.ok, pvis.ctr.ng, pvis.ctr.bz, pvis.ctr.q, btot, btot - bfin,
|
||||
st.eta.t.split(' ').pop()));
|
||||
@@ -2812,13 +2853,13 @@ function up2k_init(subtle) {
|
||||
}
|
||||
|
||||
var read_u2sz = function () {
|
||||
var el = ebi('u2szg'), n = parseInt(el.value), dv = u2sz.split(',');
|
||||
var el = ebi('u2szg'), n = parseInt(el.value);
|
||||
stitch_tgt = n = (
|
||||
isNaN(n) ? dv[1] :
|
||||
n < dv[0] ? dv[0] :
|
||||
n > dv[2] ? dv[2] : n
|
||||
isNaN(n) ? u2sz_tgt :
|
||||
n < u2sz_min ? u2sz_min :
|
||||
n > u2sz_max ? u2sz_max : n
|
||||
);
|
||||
if (n == dv[1]) sdrop('u2sz'); else swrite('u2sz', n);
|
||||
if (n == u2sz_tgt) sdrop('u2sz'); else swrite('u2sz', n);
|
||||
if (el.value != n) el.value = n;
|
||||
};
|
||||
ebi('u2szg').addEventListener('blur', read_u2sz);
|
||||
|
||||
@@ -1527,21 +1527,26 @@ var toast = (function () {
|
||||
if (sec)
|
||||
te = setTimeout(r.hide, sec * 1000);
|
||||
|
||||
var tb = ebi('toastt');
|
||||
if (same && delta < 1000 && tb) {
|
||||
tb.style.animation = 'none';
|
||||
tb.offsetHeight;
|
||||
tb.style.animation = null;
|
||||
if (same && delta < 1000) {
|
||||
var tb = ebi('toastt');
|
||||
if (tb) {
|
||||
tb.style.animation = 'none';
|
||||
tb.offsetHeight;
|
||||
tb.style.animation = null;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (txt.indexOf('<body>') + 1)
|
||||
txt = txt.slice(0, txt.indexOf('<')) + ' [...]';
|
||||
|
||||
setcvar('--tmtime', sec + 's');
|
||||
setcvar('--tmstep', sec * 15);
|
||||
|
||||
obj.innerHTML = '<div id="toastt"></div><a href="#" id="toastc">x</a><div id="toastb">' + lf2br(txt) + '</div>';
|
||||
var html = '';
|
||||
if (sec) {
|
||||
setcvar('--tmtime', sec + 's');
|
||||
setcvar('--tmstep', sec * 15);
|
||||
html += '<div id="toastt"></div>';
|
||||
}
|
||||
obj.innerHTML = html + '<a href="#" id="toastc">x</a><div id="toastb">' + lf2br(txt) + '</div>';
|
||||
obj.className = cl;
|
||||
sec += obj.offsetWidth;
|
||||
obj.className += ' vis';
|
||||
|
||||
@@ -20,6 +20,7 @@ catch (ex) {
|
||||
function load_fb() {
|
||||
subtle = null;
|
||||
importScripts('deps/sha512.hw.js');
|
||||
console.log('using fallback hasher');
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +30,9 @@ var reader = null,
|
||||
|
||||
|
||||
onmessage = (d) => {
|
||||
if (d.data == 'nosubtle')
|
||||
return load_fb();
|
||||
|
||||
if (busy)
|
||||
return postMessage(["panic", 'worker got another task while busy']);
|
||||
|
||||
|
||||
@@ -1,3 +1,31 @@
|
||||
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||
# 2024-1013-2244 `v1.15.7` the 'a' in "ip address" stands for authentication
|
||||
|
||||
## 🧪 new features
|
||||
|
||||
* [cidr-based autologin](https://github.com/9001/copyparty#ip-auth) b7f9bf5a
|
||||
* map a cidr ip-range to a username; anyone connecting from that ip-range will autologin as that user
|
||||
* thx to @byteturtle for the idea!
|
||||
* [u2c](https://github.com/9001/copyparty/blob/hovudstraum/bin/README.md#u2cpy) / commandline uploader:
|
||||
* option `--chs` to list individual chunk hashes cf1b7562
|
||||
* fix progress indicator when resuming an upload 53ffd245
|
||||
* up2k: verbose logging of detected/corrected bitflips ee628363
|
||||
* *foreshadowing intensifies* (story still developing)
|
||||
|
||||
## 🩹 bugfixes
|
||||
|
||||
* up2k with database disabled / running without `-e2d` 705f598b
|
||||
* respect `noforget` when loading snaps
|
||||
* ...but actually forget deleted files otherwise
|
||||
* snap-loader adds empty need/hash entries as necessary
|
||||
|
||||
## 🔧 other changes
|
||||
|
||||
* authed users can now unpost recent uploads of unauthed users from the same IP 22b58e31
|
||||
* would have become problematic now that cidr-based autologin is a thing
|
||||
|
||||
|
||||
|
||||
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||
# 2024-1011-2256 `v1.15.6` preadme
|
||||
|
||||
|
||||
Reference in New Issue
Block a user