Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bccc44dc21 | ||
|
|
2f20d29edd | ||
|
|
c6acd3a904 | ||
|
|
2b24c50eb7 | ||
|
|
d30ae8453d | ||
|
|
8e5c436bef | ||
|
|
f500e55e68 |
@@ -1291,6 +1291,8 @@ you may experience poor upload performance this way, but that can sometimes be f
|
||||
|
||||
someone has also tested geesefs in combination with [gocryptfs](https://nuetzlich.net/gocryptfs/) with surprisingly good results, getting 60 MiB/s upload speeds on a gbit line, but JuiceFS won with 80 MiB/s using its built-in encryption
|
||||
|
||||
you may improve performance by specifying larger values for `--iobuf` / `--s-rd-sz` / `--s-wr-sz`
|
||||
|
||||
|
||||
## hiding from google
|
||||
|
||||
@@ -1740,6 +1742,7 @@ below are some tweaks roughly ordered by usefulness:
|
||||
* `--hist` pointing to a fast location (ssd) will make directory listings and searches faster when `-e2d` or `-e2t` is set
|
||||
* and also makes thumbnails load faster, regardless of e2d/e2t
|
||||
* `--no-hash .` when indexing a network-disk if you don't care about the actual filehashes and only want the names/tags searchable
|
||||
* if your volumes are on a network-disk such as NFS / SMB / s3, specifying larger values for `--iobuf` and/or `--s-rd-sz` and/or `--s-wr-sz` may help; try setting all of them to `524288` or `1048576` or `4194304`
|
||||
* `--no-htp --hash-mt=0 --mtag-mt=1 --th-mt=1` minimizes the number of threads; can help in some eccentric environments (like the vscode debugger)
|
||||
* `-j0` enables multiprocessing (actual multithreading), can reduce latency to `20+80/numCores` percent and generally improve performance in cpu-intensive workloads, for example:
|
||||
* lots of connections (many users or heavy clients)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Maintainer: icxes <dev.null@need.moe>
|
||||
pkgname=copyparty
|
||||
pkgver="1.11.0"
|
||||
pkgver="1.11.1"
|
||||
pkgrel=1
|
||||
pkgdesc="File server with accelerated resumable uploads, dedup, WebDAV, FTP, TFTP, zeroconf, media indexer, thumbnails++"
|
||||
arch=("any")
|
||||
@@ -21,7 +21,7 @@ optdepends=("ffmpeg: thumbnails for videos, images (slower) and audio, music tag
|
||||
)
|
||||
source=("https://github.com/9001/${pkgname}/releases/download/v${pkgver}/${pkgname}-${pkgver}.tar.gz")
|
||||
backup=("etc/${pkgname}.d/init" )
|
||||
sha256sums=("95f39a239dc38844fc27c5a1473635d07d8907bc98679dc79eb1de475e36fe42")
|
||||
sha256sums=("13e4a65d1854f4f95308fa91c00bd8a5f5977b3ea4fa844ed08c7e1cb1c4bf29")
|
||||
|
||||
build() {
|
||||
cd "${srcdir}/${pkgname}-${pkgver}"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"url": "https://github.com/9001/copyparty/releases/download/v1.11.0/copyparty-sfx.py",
|
||||
"version": "1.11.0",
|
||||
"hash": "sha256-MkNp+tI/Pl5QB4FMdJNOePbSUPO1MHWJLLC7gNh9K+c="
|
||||
"url": "https://github.com/9001/copyparty/releases/download/v1.11.1/copyparty-sfx.py",
|
||||
"version": "1.11.1",
|
||||
"hash": "sha256-q7RiaB5yo1EDTwdPeMCNFnmcNj0TsKzBsbsddMSqTH4="
|
||||
}
|
||||
@@ -869,6 +869,7 @@ def add_fs(ap):
|
||||
ap2 = ap.add_argument_group("filesystem options")
|
||||
rm_re_def = "5/0.1" if ANYWIN else "0/0"
|
||||
ap2.add_argument("--rm-retry", metavar="T/R", type=u, default=rm_re_def, help="if a file cannot be deleted because it is busy, continue trying for \033[33mT\033[0m seconds, retry every \033[33mR\033[0m seconds; disable with 0/0 (volflag=rm_retry)")
|
||||
ap2.add_argument("--iobuf", metavar="BYTES", type=int, default=256*1024, help="file I/O buffer-size; if your volumes are on a network drive, try increasing to \033[32m524288\033[0m or even \033[32m4194304\033[0m (and let me know if that improves your performance)")
|
||||
|
||||
|
||||
def add_upload(ap):
|
||||
@@ -916,6 +917,7 @@ def add_network(ap):
|
||||
ap2.add_argument("--freebind", action="store_true", help="allow listening on IPs which do not yet exist, for example if the network interfaces haven't finished going up. Only makes sense for IPs other than '0.0.0.0', '127.0.0.1', '::', and '::1'. May require running as root (unless net.ipv6.ip_nonlocal_bind)")
|
||||
ap2.add_argument("--s-thead", metavar="SEC", type=int, default=120, help="socket timeout (read request header)")
|
||||
ap2.add_argument("--s-tbody", metavar="SEC", type=float, default=186, help="socket timeout (read/write request/response bodies). Use 60 on fast servers (default is extremely safe). Disable with 0 if reverse-proxied for a 2%% speed boost")
|
||||
ap2.add_argument("--s-rd-sz", metavar="B", type=int, default=256*1024, help="socket read size in bytes (indirectly affects filesystem writes; recommendation: keep equal-to or lower-than \033[33m--iobuf\033[0m)")
|
||||
ap2.add_argument("--s-wr-sz", metavar="B", type=int, default=256*1024, help="socket write size in bytes")
|
||||
ap2.add_argument("--s-wr-slp", metavar="SEC", type=float, default=0, help="debug: socket write delay in seconds")
|
||||
ap2.add_argument("--rsp-slp", metavar="SEC", type=float, default=0, help="debug: response delay in seconds")
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# coding: utf-8
|
||||
|
||||
VERSION = (1, 11, 1)
|
||||
VERSION = (1, 11, 2)
|
||||
CODENAME = "You Can (Not) Proceed"
|
||||
BUILD_DT = (2024, 3, 18)
|
||||
BUILD_DT = (2024, 3, 23)
|
||||
|
||||
S_VERSION = ".".join(map(str, VERSION))
|
||||
S_BUILD_DT = "{0:04d}-{1:02d}-{2:02d}".format(*BUILD_DT)
|
||||
|
||||
@@ -1224,7 +1224,9 @@ class AuthSrv(object):
|
||||
if un.startswith("@"):
|
||||
grp = un[1:]
|
||||
uns = [x[0] for x in un_gns.items() if grp in x[1]]
|
||||
if not uns and grp != "${g}" and not self.args.idp_h_grp:
|
||||
if grp == "${g}":
|
||||
unames.append(un)
|
||||
elif not uns and not self.args.idp_h_grp:
|
||||
t = "group [%s] must be defined with --grp argument (or in a [groups] config section)"
|
||||
raise CfgEx(t % (grp,))
|
||||
|
||||
@@ -1234,31 +1236,28 @@ class AuthSrv(object):
|
||||
|
||||
# unames may still contain ${u} and ${g} so now expand those;
|
||||
un_gn = [(un, gn) for un, gns in un_gns.items() for gn in gns]
|
||||
if "*" not in un_gns:
|
||||
# need ("*","") to match "*" in unames
|
||||
un_gn.append(("*", ""))
|
||||
|
||||
for _, dst, vu, vg in vols:
|
||||
unames2 = set()
|
||||
for un, gn in un_gn:
|
||||
# if vu/vg (volume user/group) is non-null,
|
||||
# then each non-null value corresponds to
|
||||
# ${u}/${g}; consider this a filter to
|
||||
# apply to unames, as well as un_gn
|
||||
if (vu and vu != un) or (vg and vg != gn):
|
||||
continue
|
||||
for src, dst, vu, vg in vols:
|
||||
unames2 = set(unames)
|
||||
|
||||
for uname in unames + ([un] if vu or vg else []):
|
||||
if uname == "${u}":
|
||||
uname = vu or un
|
||||
elif uname in ("${g}", "@${g}"):
|
||||
uname = vg or gn
|
||||
if "${u}" in unames:
|
||||
if not vu:
|
||||
t = "cannot use ${u} in accs of volume [%s] because the volume url does not contain ${u}"
|
||||
raise CfgEx(t % (src,))
|
||||
unames2.add(vu)
|
||||
|
||||
if vu and vu != uname:
|
||||
continue
|
||||
if "@${g}" in unames:
|
||||
if not vg:
|
||||
t = "cannot use @${g} in accs of volume [%s] because the volume url does not contain @${g}"
|
||||
raise CfgEx(t % (src,))
|
||||
unames2.update([un for un, gn in un_gn if gn == vg])
|
||||
|
||||
if uname:
|
||||
unames2.add(uname)
|
||||
if "${g}" in unames:
|
||||
t = 'the accs of volume [%s] contains "${g}" but the only supported way of specifying that is "@${g}"'
|
||||
raise CfgEx(t % (src,))
|
||||
|
||||
unames2.discard("${u}")
|
||||
unames2.discard("@${g}")
|
||||
|
||||
self._read_vol_str(lvl, list(unames2), axs[dst])
|
||||
|
||||
|
||||
@@ -218,7 +218,7 @@ class FtpFs(AbstractedFS):
|
||||
raise FSE("Cannot open existing file for writing")
|
||||
|
||||
self.validpath(ap)
|
||||
return open(fsenc(ap), mode)
|
||||
return open(fsenc(ap), mode, self.args.iobuf)
|
||||
|
||||
def chdir(self, path: str) -> None:
|
||||
nwd = join(self.cwd, path)
|
||||
|
||||
@@ -36,6 +36,7 @@ from .bos import bos
|
||||
from .star import StreamTar
|
||||
from .sutil import StreamArc, gfilter
|
||||
from .szip import StreamZip
|
||||
from .util import unquote # type: ignore
|
||||
from .util import (
|
||||
APPLESAN_RE,
|
||||
BITNESS,
|
||||
@@ -84,7 +85,6 @@ from .util import (
|
||||
sendfile_py,
|
||||
undot,
|
||||
unescape_cookie,
|
||||
unquote, # type: ignore
|
||||
unquotep,
|
||||
vjoin,
|
||||
vol_san,
|
||||
@@ -174,7 +174,6 @@ class HttpCli(object):
|
||||
self.parser: Optional[MultipartParser] = None
|
||||
# end placeholders
|
||||
|
||||
self.bufsz = 1024 * 32
|
||||
self.html_head = ""
|
||||
|
||||
def log(self, msg: str, c: Union[int, str] = 0) -> None:
|
||||
@@ -1611,15 +1610,16 @@ class HttpCli(object):
|
||||
return enc or "utf-8"
|
||||
|
||||
def get_body_reader(self) -> tuple[Generator[bytes, None, None], int]:
|
||||
bufsz = self.args.s_rd_sz
|
||||
if "chunked" in self.headers.get("transfer-encoding", "").lower():
|
||||
return read_socket_chunked(self.sr), -1
|
||||
return read_socket_chunked(self.sr, bufsz), -1
|
||||
|
||||
remains = int(self.headers.get("content-length", -1))
|
||||
if remains == -1:
|
||||
self.keepalive = False
|
||||
return read_socket_unbounded(self.sr), remains
|
||||
return read_socket_unbounded(self.sr, bufsz), remains
|
||||
else:
|
||||
return read_socket(self.sr, remains), remains
|
||||
return read_socket(self.sr, bufsz, remains), remains
|
||||
|
||||
def dump_to_file(self, is_put: bool) -> tuple[int, str, str, int, str, str]:
|
||||
# post_sz, sha_hex, sha_b64, remains, path, url
|
||||
@@ -1641,7 +1641,7 @@ class HttpCli(object):
|
||||
bos.makedirs(fdir)
|
||||
|
||||
open_ka: dict[str, Any] = {"fun": open}
|
||||
open_a = ["wb", 512 * 1024]
|
||||
open_a = ["wb", self.args.iobuf]
|
||||
|
||||
# user-request || config-force
|
||||
if ("gz" in vfs.flags or "xz" in vfs.flags) and (
|
||||
@@ -1900,7 +1900,7 @@ class HttpCli(object):
|
||||
f.seek(ofs)
|
||||
with open(fp, "wb") as fo:
|
||||
while nrem:
|
||||
buf = f.read(min(nrem, 512 * 1024))
|
||||
buf = f.read(min(nrem, self.args.iobuf))
|
||||
if not buf:
|
||||
break
|
||||
|
||||
@@ -1922,7 +1922,7 @@ class HttpCli(object):
|
||||
return "%s %s n%s" % (spd1, spd2, self.conn.nreq)
|
||||
|
||||
def handle_post_multipart(self) -> bool:
|
||||
self.parser = MultipartParser(self.log, self.sr, self.headers)
|
||||
self.parser = MultipartParser(self.log, self.args, self.sr, self.headers)
|
||||
self.parser.parse()
|
||||
|
||||
file0: list[tuple[str, Optional[str], Generator[bytes, None, None]]] = []
|
||||
@@ -2151,7 +2151,7 @@ class HttpCli(object):
|
||||
|
||||
self.log("writing {} #{} @{} len {}".format(path, chash, cstart, remains))
|
||||
|
||||
reader = read_socket(self.sr, remains)
|
||||
reader = read_socket(self.sr, self.args.s_rd_sz, remains)
|
||||
|
||||
f = None
|
||||
fpool = not self.args.no_fpool and sprs
|
||||
@@ -2162,7 +2162,7 @@ class HttpCli(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
f = f or open(fsenc(path), "rb+", 512 * 1024)
|
||||
f = f or open(fsenc(path), "rb+", self.args.iobuf)
|
||||
|
||||
try:
|
||||
f.seek(cstart[0])
|
||||
@@ -2185,7 +2185,8 @@ class HttpCli(object):
|
||||
)
|
||||
ofs = 0
|
||||
while ofs < chunksize:
|
||||
bufsz = min(chunksize - ofs, 4 * 1024 * 1024)
|
||||
bufsz = max(4 * 1024 * 1024, self.args.iobuf)
|
||||
bufsz = min(chunksize - ofs, bufsz)
|
||||
f.seek(cstart[0] + ofs)
|
||||
buf = f.read(bufsz)
|
||||
for wofs in cstart[1:]:
|
||||
@@ -2438,6 +2439,18 @@ class HttpCli(object):
|
||||
suffix = "-{:.6f}-{}".format(time.time(), dip)
|
||||
open_args = {"fdir": fdir, "suffix": suffix}
|
||||
|
||||
if "replace" in self.uparam:
|
||||
abspath = os.path.join(fdir, fname)
|
||||
if not self.can_delete:
|
||||
self.log("user not allowed to overwrite with ?replace")
|
||||
elif bos.path.exists(abspath):
|
||||
try:
|
||||
bos.unlink(abspath)
|
||||
t = "overwriting file with new upload: %s"
|
||||
except:
|
||||
t = "toctou while deleting for ?replace: %s"
|
||||
self.log(t % (abspath,))
|
||||
|
||||
# reserve destination filename
|
||||
with ren_open(fname, "wb", fdir=fdir, suffix=suffix) as zfw:
|
||||
fname = zfw["orz"][1]
|
||||
@@ -2482,7 +2495,7 @@ class HttpCli(object):
|
||||
v2 = lim.dfv - lim.dfl
|
||||
max_sz = min(v1, v2) if v1 and v2 else v1 or v2
|
||||
|
||||
with ren_open(tnam, "wb", 512 * 1024, **open_args) as zfw:
|
||||
with ren_open(tnam, "wb", self.args.iobuf, **open_args) as zfw:
|
||||
f, tnam = zfw["orz"]
|
||||
tabspath = os.path.join(fdir, tnam)
|
||||
self.log("writing to {}".format(tabspath))
|
||||
@@ -2778,7 +2791,7 @@ class HttpCli(object):
|
||||
if bos.path.exists(fp):
|
||||
wunlink(self.log, fp, vfs.flags)
|
||||
|
||||
with open(fsenc(fp), "wb", 512 * 1024) as f:
|
||||
with open(fsenc(fp), "wb", self.args.iobuf) as f:
|
||||
sz, sha512, _ = hashcopy(p_data, f, self.args.s_wr_slp)
|
||||
|
||||
if lim:
|
||||
@@ -3010,8 +3023,7 @@ class HttpCli(object):
|
||||
upper = gzip_orig_sz(fs_path)
|
||||
else:
|
||||
open_func = open
|
||||
# 512 kB is optimal for huge files, use 64k
|
||||
open_args = [fsenc(fs_path), "rb", 64 * 1024]
|
||||
open_args = [fsenc(fs_path), "rb", self.args.iobuf]
|
||||
use_sendfile = (
|
||||
# fmt: off
|
||||
not self.tls
|
||||
@@ -3146,6 +3158,7 @@ class HttpCli(object):
|
||||
|
||||
bgen = packer(
|
||||
self.log,
|
||||
self.args,
|
||||
fgen,
|
||||
utf8="utf" in uarg,
|
||||
pre_crc="crc" in uarg,
|
||||
@@ -3223,7 +3236,7 @@ class HttpCli(object):
|
||||
sz_md = 0
|
||||
lead = b""
|
||||
fullfile = b""
|
||||
for buf in yieldfile(fs_path):
|
||||
for buf in yieldfile(fs_path, self.args.iobuf):
|
||||
if sz_md < max_sz:
|
||||
fullfile += buf
|
||||
else:
|
||||
@@ -3296,7 +3309,7 @@ class HttpCli(object):
|
||||
if fullfile:
|
||||
self.s.sendall(fullfile)
|
||||
else:
|
||||
for buf in yieldfile(fs_path):
|
||||
for buf in yieldfile(fs_path, self.args.iobuf):
|
||||
self.s.sendall(html_bescape(buf))
|
||||
|
||||
self.s.sendall(html[1])
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import stat
|
||||
import tarfile
|
||||
@@ -44,11 +45,12 @@ class StreamTar(StreamArc):
|
||||
def __init__(
|
||||
self,
|
||||
log: "NamedLogger",
|
||||
args: argparse.Namespace,
|
||||
fgen: Generator[dict[str, Any], None, None],
|
||||
cmp: str = "",
|
||||
**kwargs: Any
|
||||
):
|
||||
super(StreamTar, self).__init__(log, fgen)
|
||||
super(StreamTar, self).__init__(log, args, fgen)
|
||||
|
||||
self.ci = 0
|
||||
self.co = 0
|
||||
@@ -126,7 +128,7 @@ class StreamTar(StreamArc):
|
||||
inf.gid = 0
|
||||
|
||||
self.ci += inf.size
|
||||
with open(fsenc(src), "rb", 512 * 1024) as fo:
|
||||
with open(fsenc(src), "rb", self.args.iobuf) as fo:
|
||||
self.tar.addfile(inf, fo)
|
||||
|
||||
def _gen(self) -> None:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
@@ -20,10 +21,12 @@ class StreamArc(object):
|
||||
def __init__(
|
||||
self,
|
||||
log: "NamedLogger",
|
||||
args: argparse.Namespace,
|
||||
fgen: Generator[dict[str, Any], None, None],
|
||||
**kwargs: Any
|
||||
):
|
||||
self.log = log
|
||||
self.args = args
|
||||
self.fgen = fgen
|
||||
self.stopped = False
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ if True: # pylint: disable=using-constant-test
|
||||
import typing
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from .__init__ import ANYWIN, E, EXE, MACOS, TYPE_CHECKING, EnvParams, unicode
|
||||
from .__init__ import ANYWIN, EXE, MACOS, TYPE_CHECKING, E, EnvParams, unicode
|
||||
from .authsrv import BAD_CFG, AuthSrv
|
||||
from .cert import ensure_cert
|
||||
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE
|
||||
@@ -173,6 +173,26 @@ class SvcHub(object):
|
||||
self.log("root", t.format(args.j), c=3)
|
||||
args.no_fpool = True
|
||||
|
||||
for name, arg in (
|
||||
("iobuf", "iobuf"),
|
||||
("s-rd-sz", "s_rd_sz"),
|
||||
("s-wr-sz", "s_wr_sz"),
|
||||
):
|
||||
zi = getattr(args, arg)
|
||||
if zi < 32768:
|
||||
t = "WARNING: expect very poor performance because you specified a very low value (%d) for --%s"
|
||||
self.log("root", t % (zi, name), 3)
|
||||
zi = 2
|
||||
zi2 = 2 ** (zi - 1).bit_length()
|
||||
if zi != zi2:
|
||||
zi3 = 2 ** ((zi - 1).bit_length() - 1)
|
||||
t = "WARNING: expect poor performance because --%s is not a power-of-two; consider using %d or %d instead of %d"
|
||||
self.log("root", t % (name, zi2, zi3, zi), 3)
|
||||
|
||||
if args.s_rd_sz > args.iobuf:
|
||||
t = "WARNING: --s-rd-sz (%d) is larger than --iobuf (%d); this may lead to reduced performance"
|
||||
self.log("root", t % (args.s_rd_sz, args.iobuf), 3)
|
||||
|
||||
bri = "zy"[args.theme % 2 :][:1]
|
||||
ch = "abcdefghijklmnopqrstuvwx"[int(args.theme / 2)]
|
||||
args.theme = "{0}{1} {0} {1}".format(ch, bri)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import calendar
|
||||
import stat
|
||||
import time
|
||||
@@ -218,12 +219,13 @@ class StreamZip(StreamArc):
|
||||
def __init__(
|
||||
self,
|
||||
log: "NamedLogger",
|
||||
args: argparse.Namespace,
|
||||
fgen: Generator[dict[str, Any], None, None],
|
||||
utf8: bool = False,
|
||||
pre_crc: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
super(StreamZip, self).__init__(log, fgen)
|
||||
super(StreamZip, self).__init__(log, args, fgen)
|
||||
|
||||
self.utf8 = utf8
|
||||
self.pre_crc = pre_crc
|
||||
@@ -248,7 +250,7 @@ class StreamZip(StreamArc):
|
||||
|
||||
crc = 0
|
||||
if self.pre_crc:
|
||||
for buf in yieldfile(src):
|
||||
for buf in yieldfile(src, self.args.iobuf):
|
||||
crc = zlib.crc32(buf, crc)
|
||||
|
||||
crc &= 0xFFFFFFFF
|
||||
@@ -257,7 +259,7 @@ class StreamZip(StreamArc):
|
||||
buf = gen_hdr(None, name, sz, ts, self.utf8, crc, self.pre_crc)
|
||||
yield self._ct(buf)
|
||||
|
||||
for buf in yieldfile(src):
|
||||
for buf in yieldfile(src, self.args.iobuf):
|
||||
if not self.pre_crc:
|
||||
crc = zlib.crc32(buf, crc)
|
||||
|
||||
|
||||
@@ -340,6 +340,9 @@ class Tftpd(object):
|
||||
if not self.args.tftp_nols and bos.path.isdir(ap):
|
||||
return self._ls(vpath, "", 0, True)
|
||||
|
||||
if not a:
|
||||
a = [self.args.iobuf]
|
||||
|
||||
return open(ap, mode, *a, **ka)
|
||||
|
||||
def _mkdir(self, vpath: str, *a) -> None:
|
||||
|
||||
@@ -16,9 +16,9 @@ from .__init__ import ANYWIN, TYPE_CHECKING
|
||||
from .authsrv import VFS
|
||||
from .bos import bos
|
||||
from .mtag import HAVE_FFMPEG, HAVE_FFPROBE, ffprobe
|
||||
from .util import BytesIO # type: ignore
|
||||
from .util import (
|
||||
FFMPEG_URL,
|
||||
BytesIO, # type: ignore
|
||||
Cooldown,
|
||||
Daemon,
|
||||
Pebkac,
|
||||
|
||||
@@ -3920,7 +3920,7 @@ class Up2k(object):
|
||||
csz = up2k_chunksize(fsz)
|
||||
ret = []
|
||||
suffix = " MB, {}".format(path)
|
||||
with open(fsenc(path), "rb", 512 * 1024) as f:
|
||||
with open(fsenc(path), "rb", self.args.iobuf) as f:
|
||||
if self.mth and fsz >= 1024 * 512:
|
||||
tlt = self.mth.hash(f, fsz, csz, self.pp, prefix, suffix)
|
||||
ret = [x[0] for x in tlt]
|
||||
|
||||
@@ -1400,10 +1400,15 @@ def ren_open(
|
||||
|
||||
class MultipartParser(object):
|
||||
def __init__(
|
||||
self, log_func: "NamedLogger", sr: Unrecv, http_headers: dict[str, str]
|
||||
self,
|
||||
log_func: "NamedLogger",
|
||||
args: argparse.Namespace,
|
||||
sr: Unrecv,
|
||||
http_headers: dict[str, str],
|
||||
):
|
||||
self.sr = sr
|
||||
self.log = log_func
|
||||
self.args = args
|
||||
self.headers = http_headers
|
||||
|
||||
self.re_ctype = re.compile(r"^content-type: *([^; ]+)", re.IGNORECASE)
|
||||
@@ -1502,7 +1507,7 @@ class MultipartParser(object):
|
||||
|
||||
def _read_data(self) -> Generator[bytes, None, None]:
|
||||
blen = len(self.boundary)
|
||||
bufsz = 32 * 1024
|
||||
bufsz = self.args.s_rd_sz
|
||||
while True:
|
||||
try:
|
||||
buf = self.sr.recv(bufsz)
|
||||
@@ -2243,10 +2248,11 @@ def shut_socket(log: "NamedLogger", sck: socket.socket, timeout: int = 3) -> Non
|
||||
sck.close()
|
||||
|
||||
|
||||
def read_socket(sr: Unrecv, total_size: int) -> Generator[bytes, None, None]:
|
||||
def read_socket(
|
||||
sr: Unrecv, bufsz: int, total_size: int
|
||||
) -> Generator[bytes, None, None]:
|
||||
remains = total_size
|
||||
while remains > 0:
|
||||
bufsz = 32 * 1024
|
||||
if bufsz > remains:
|
||||
bufsz = remains
|
||||
|
||||
@@ -2260,16 +2266,16 @@ def read_socket(sr: Unrecv, total_size: int) -> Generator[bytes, None, None]:
|
||||
yield buf
|
||||
|
||||
|
||||
def read_socket_unbounded(sr: Unrecv) -> Generator[bytes, None, None]:
|
||||
def read_socket_unbounded(sr: Unrecv, bufsz: int) -> Generator[bytes, None, None]:
|
||||
try:
|
||||
while True:
|
||||
yield sr.recv(32 * 1024)
|
||||
yield sr.recv(bufsz)
|
||||
except:
|
||||
return
|
||||
|
||||
|
||||
def read_socket_chunked(
|
||||
sr: Unrecv, log: Optional["NamedLogger"] = None
|
||||
sr: Unrecv, bufsz: int, log: Optional["NamedLogger"] = None
|
||||
) -> Generator[bytes, None, None]:
|
||||
err = "upload aborted: expected chunk length, got [{}] |{}| instead"
|
||||
while True:
|
||||
@@ -2303,7 +2309,7 @@ def read_socket_chunked(
|
||||
if log:
|
||||
log("receiving %d byte chunk" % (chunklen,))
|
||||
|
||||
for chunk in read_socket(sr, chunklen):
|
||||
for chunk in read_socket(sr, bufsz, chunklen):
|
||||
yield chunk
|
||||
|
||||
x = sr.recv_ex(2, False)
|
||||
@@ -2361,10 +2367,11 @@ def build_netmap(csv: str):
|
||||
return NetMap(ips, cidrs, True)
|
||||
|
||||
|
||||
def yieldfile(fn: str) -> Generator[bytes, None, None]:
|
||||
with open(fsenc(fn), "rb", 512 * 1024) as f:
|
||||
def yieldfile(fn: str, bufsz: int) -> Generator[bytes, None, None]:
|
||||
readsz = min(bufsz, 128 * 1024)
|
||||
with open(fsenc(fn), "rb", bufsz) as f:
|
||||
while True:
|
||||
buf = f.read(128 * 1024)
|
||||
buf = f.read(readsz)
|
||||
if not buf:
|
||||
break
|
||||
|
||||
|
||||
96
docs/bufsize.txt
Normal file
96
docs/bufsize.txt
Normal file
@@ -0,0 +1,96 @@
|
||||
notes from testing various buffer sizes of files and sockets
|
||||
|
||||
summary:
|
||||
|
||||
download-folder-as-tar: would be 7% faster with --iobuf 65536 (but got 20% faster in v1.11.2)
|
||||
|
||||
download-folder-as-zip: optimal with default --iobuf 262144
|
||||
|
||||
download-file-over-https: optimal with default --iobuf 262144
|
||||
|
||||
put-large-file: optimal with default --iobuf 262144, --s-rd-sz 262144 (and got 14% faster in v1.11.2)
|
||||
|
||||
post-large-file: optimal with default --iobuf 262144, --s-rd-sz 262144 (and got 18% faster in v1.11.2)
|
||||
|
||||
----
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/bigs/?tar
|
||||
3.3 req/s 1.11.1
|
||||
4.3 4.0 3.3 req/s 1.12.2
|
||||
64 256 512 --iobuf 256 (prefer smaller)
|
||||
32 32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/bigs/?zip
|
||||
2.9 req/s 1.11.1
|
||||
2.5 2.9 2.9 req/s 1.12.2
|
||||
64 256 512 --iobuf 256 (prefer bigger)
|
||||
32 32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/pairdupes/?tar
|
||||
8.3 req/s 1.11.1
|
||||
8.4 8.4 8.5 req/s 1.12.2
|
||||
64 256 512 --iobuf 256 (prefer bigger)
|
||||
32 32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/pairdupes/?zip
|
||||
13.9 req/s 1.11.1
|
||||
14.1 14.0 13.8 req/s 1.12.2
|
||||
64 256 512 --iobuf 256 (prefer smaller)
|
||||
32 32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/pairdupes/987a
|
||||
5260 req/s 1.11.1
|
||||
5246 5246 5280 5268 req/s 1.12.2
|
||||
64 256 512 256 --iobuf dontcare
|
||||
32 32 32 512 --s-rd-sz dontcare
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure https://127.0.0.1:3923/pairdupes/987a
|
||||
4445 req/s 1.11.1
|
||||
4462 4494 4444 req/s 1.12.2
|
||||
64 256 512 --iobuf dontcare
|
||||
32 32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure http://127.0.0.1:3923/bigs/gssc-02-cannonball-skydrift/track10.cdda.flac
|
||||
95 req/s 1.11.1
|
||||
95 97 req/s 1.12.2
|
||||
64 512 --iobuf dontcare
|
||||
32 32 --s-rd-sz
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure https://127.0.0.1:3923/bigs/gssc-02-cannonball-skydrift/track10.cdda.flac
|
||||
15.4 req/s 1.11.1
|
||||
15.4 15.3 14.9 15.4 req/s 1.12.2
|
||||
64 256 512 512 --iobuf 256 (prefer smaller, and smaller than s-wr-sz)
|
||||
32 32 32 32 --s-rd-sz
|
||||
256 256 256 512 --s-wr-sz
|
||||
|
||||
----
|
||||
|
||||
python3 ~/dev/old/copyparty\ v1.11.1\ dont\ ban\ the\ pipes.py -q -i 127.0.0.1 -v .::A --daw
|
||||
python3 ~/dev/copyparty/dist/copyparty-sfx.py -q -i 127.0.0.1 -v .::A --daw --iobuf $((1024*512))
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure -mPUT -r0 -D ~/Music/gssc-02-cannonball-skydrift/track10.cdda.flac http://127.0.0.1:3923/a.bin
|
||||
10.8 req/s 1.11.1
|
||||
10.8 11.5 11.8 12.1 12.2 12.3 req/s new
|
||||
512 512 512 512 512 256 --iobuf 256
|
||||
32 64 128 256 512 256 --s-rd-sz 256 (prefer bigger)
|
||||
|
||||
----
|
||||
|
||||
buildpost() {
|
||||
b=--jeg-er-grensestaven;
|
||||
printf -- "$b\r\nContent-Disposition: form-data; name=\"act\"\r\n\r\nbput\r\n$b\r\nContent-Disposition: form-data; name=\"f\"; filename=\"a.bin\"\r\nContent-Type: audio/mpeg\r\n\r\n"
|
||||
cat "$1"
|
||||
printf -- "\r\n${b}--\r\n"
|
||||
}
|
||||
buildpost ~/Music/gssc-02-cannonball-skydrift/track10.cdda.flac >big.post
|
||||
buildpost ~/Music/bottomtext.txt >smol.post
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure -mPOST -r0 -T 'multipart/form-data; boundary=jeg-er-grensestaven' -D big.post http://127.0.0.1:3923/?replace
|
||||
9.6 11.2 11.3 11.1 10.9 req/s v1.11.2
|
||||
512 512 256 128 256 --iobuf 256
|
||||
32 512 256 128 128 --s-rd-sz 256
|
||||
|
||||
oha -z10s -c1 --ipv4 --insecure -mPOST -r0 -T 'multipart/form-data; boundary=jeg-er-grensestaven' -D smol.post http://127.0.0.1:3923/?replace
|
||||
2445 2414 2401 2437
|
||||
256 128 256 256 --iobuf 256
|
||||
128 128 256 64 --s-rd-sz 128 (but use 256 since big posts are more important)
|
||||
@@ -1,3 +1,24 @@
|
||||
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||
# 2024-0318-1709 `v1.11.1` dont ban the pipes
|
||||
|
||||
the [previous release](https://github.com/9001/copyparty/releases/tag/v1.11.0) had all the fun new features... this one's just bugfixes
|
||||
|
||||
## bugfixes
|
||||
|
||||
* less aggressive rejection of requests from banned IPs 51d31588
|
||||
* clients would get kicked before the header was parsed (which contains the xff header), meaning the server could become inaccessible to everyone if the reverse-proxy itself were to "somehow" get banned
|
||||
* ...which can happen if a server behind cloudflare also accepts non-cloudflare connections, meaning the client IP would not be resolved, and it'll ban the LAN IP instead heh
|
||||
* that part still happens, but now it won't affect legit clients through the intended route
|
||||
* the old behavior can be restored with `--early-ban` to save some cycles, and/or avoid slowloris somewhat
|
||||
* the unpost feature could appear to be disabled on servers where no volume was mapped to `/` 0287c7ba
|
||||
* python 3.12 support for [compiling the dependencies](https://github.com/9001/copyparty/tree/hovudstraum/bin/mtag#dependencies) necessary to detect bpm/key in audio files 32553e45
|
||||
|
||||
## other changes
|
||||
|
||||
* mention [real-ip configuration](https://github.com/9001/copyparty?tab=readme-ov-file#real-ip) in the readme ee80cdb9
|
||||
|
||||
|
||||
|
||||
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
|
||||
# 2024-0315-2047 `v1.11.0` You Can (Not) Proceed
|
||||
|
||||
|
||||
@@ -164,6 +164,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
|
||||
| PUT | `?xz` | (binary data) | compress with xz and write into file at URL |
|
||||
| mPOST | | `f=FILE` | upload `FILE` into the folder at URL |
|
||||
| mPOST | `?j` | `f=FILE` | ...and reply with json |
|
||||
| mPOST | `?replace` | `f=FILE` | ...and overwrite existing files |
|
||||
| mPOST | | `act=mkdir`, `name=foo` | create directory `foo` at URL |
|
||||
| POST | `?delete` | | delete URL recursively |
|
||||
| jPOST | `?delete` | `["/foo","/bar"]` | delete `/foo` and `/bar` recursively |
|
||||
|
||||
15
docs/idp.md
15
docs/idp.md
@@ -5,3 +5,18 @@ to configure IdP from scratch, you must place copyparty behind a reverse-proxy w
|
||||
in the copyparty `[global]` config, specify which headers to read client info from; username is required (`idp-h-usr: X-Authooley-User`), group(s) are optional (`idp-h-grp: X-Authooley-Groups`)
|
||||
|
||||
* it is also required to specify the subnet that legit requests will be coming from, for example `--xff-src=10.88.0.0/24` to allow 10.88.x.x (or `--xff-src=lan` for all private IPs), and it is recommended to configure the reverseproxy to include a secret header as proof that the other headers are also legit (and not smuggled in by a malicious client), telling copyparty the headername to expect with `idp-h-key: shangala-bangala`
|
||||
|
||||
|
||||
# important notes
|
||||
|
||||
## IdP volumes are forgotten on shutdown
|
||||
|
||||
IdP volumes, meaning dynamically-created volumes, meaning volumes that contain `${u}` or `${g}` in their URL, will be forgotten during a server restart and then "revived" when the volume's owner sends their first request after the restart
|
||||
|
||||
until each IdP volume is revived, it will inherit the permissions of its parent volume (if any)
|
||||
|
||||
this means that, if an IdP volume is located inside a folder that is readable by anyone, then each of those IdP volumes will **also become readable by anyone** until the volume is revived
|
||||
|
||||
and likewise -- if the IdP volume is inside a folder that is only accessible by certain users, but the IdP volume is configured to allow access from unauthenticated users, then the contents of the volume will NOT be accessible until it is revived
|
||||
|
||||
until this limitation is fixed (if ever), it is recommended to place IdP volumes inside an appropriate parent volume, so they can inherit acceptable permissions until their revival; see the "strategic volumes" at the bottom of [./examples/docker/idp/copyparty.conf](./examples/docker/idp/copyparty.conf)
|
||||
|
||||
@@ -234,8 +234,9 @@ def u8(gen):
|
||||
|
||||
|
||||
def yieldfile(fn):
|
||||
with open(fn, "rb") as f:
|
||||
for block in iter(lambda: f.read(64 * 1024), b""):
|
||||
s = 64 * 1024
|
||||
with open(fn, "rb", s * 4) as f:
|
||||
for block in iter(lambda: f.read(s), b""):
|
||||
yield block
|
||||
|
||||
|
||||
|
||||
24
tests/res/idp/6.conf
Normal file
24
tests/res/idp/6.conf
Normal file
@@ -0,0 +1,24 @@
|
||||
# -*- mode: yaml -*-
|
||||
# vim: ft=yaml:
|
||||
|
||||
[global]
|
||||
idp-h-usr: x-idp-user
|
||||
idp-h-grp: x-idp-group
|
||||
|
||||
[/get/${u}]
|
||||
/get/${u}
|
||||
accs:
|
||||
g: *
|
||||
r: ${u}, @su
|
||||
m: @su
|
||||
|
||||
[/priv/${u}]
|
||||
/priv/${u}
|
||||
accs:
|
||||
r: ${u}, @su
|
||||
m: @su
|
||||
|
||||
[/team/${g}/${u}]
|
||||
/team/${g}/${u}
|
||||
accs:
|
||||
r: @${g}
|
||||
@@ -49,11 +49,7 @@ class TestHttpCli(unittest.TestCase):
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(filepath.encode("utf-8"))
|
||||
|
||||
vcfg = [
|
||||
".::r,u1:r.,u2",
|
||||
"a:a:r,u1:r,u2",
|
||||
".b:.b:r.,u1:r,u2"
|
||||
]
|
||||
vcfg = [".::r,u1:r.,u2", "a:a:r,u1:r,u2", ".b:.b:r.,u1:r,u2"]
|
||||
self.args = Cfg(v=vcfg, a=["u1:u1", "u2:u2"], e2dsa=True)
|
||||
self.asrv = AuthSrv(self.args, self.log)
|
||||
|
||||
@@ -96,7 +92,7 @@ class TestHttpCli(unittest.TestCase):
|
||||
tar = tarfile.open(fileobj=io.BytesIO(b), mode="r|").getnames()
|
||||
top = ("top" if not url else url.lstrip(".").split("/")[0]) + "/"
|
||||
assert len(tar) == len([x for x in tar if x.startswith(top)])
|
||||
return " ".join([x[len(top):] for x in tar])
|
||||
return " ".join([x[len(top) :] for x in tar])
|
||||
|
||||
def curl(self, url, uname, binary=False):
|
||||
conn = tu.VHttpConn(self.args, self.asrv, self.log, hdr(url, uname))
|
||||
|
||||
@@ -15,6 +15,16 @@ class TestVFS(unittest.TestCase):
|
||||
print(json.dumps(vfs, indent=4, sort_keys=True, default=lambda o: o.__dict__))
|
||||
|
||||
def log(self, src, msg, c=0):
|
||||
m = "%s" % (msg,)
|
||||
if (
|
||||
"warning: filesystem-path does not exist:" in m
|
||||
or "you are sharing a system directory:" in m
|
||||
or "reinitializing due to new user from IdP:" in m
|
||||
or m.startswith("hint: argument")
|
||||
or (m.startswith("loaded ") and " config files:" in m)
|
||||
):
|
||||
return
|
||||
|
||||
print(("[%s] %s" % (src, msg)).encode("ascii", "replace").decode("ascii"))
|
||||
|
||||
def nav(self, au, vp):
|
||||
@@ -30,21 +40,23 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertEqual(unpacked, expected + [[]] * pad)
|
||||
|
||||
def assertAxsAt(self, au, vp, expected):
|
||||
self.assertAxs(self.nav(au, vp).axs, expected)
|
||||
vn = self.nav(au, vp)
|
||||
self.assertAxs(vn.axs, expected)
|
||||
|
||||
def assertNodes(self, vfs, expected):
|
||||
got = list(sorted(vfs.nodes.keys()))
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
def assertNodesAt(self, au, vp, expected):
|
||||
self.assertNodes(self.nav(au, vp), expected)
|
||||
vn = self.nav(au, vp)
|
||||
self.assertNodes(vn, expected)
|
||||
|
||||
def prep(self):
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
cfgdir = os.path.join(here, "res", "idp")
|
||||
|
||||
# globals are applied by main so need to cheat a little
|
||||
xcfg = { "idp_h_usr": "x-idp-user", "idp_h_grp": "x-idp-group" }
|
||||
xcfg = {"idp_h_usr": "x-idp-user", "idp_h_grp": "x-idp-group"}
|
||||
|
||||
return here, cfgdir, xcfg
|
||||
|
||||
@@ -140,6 +152,11 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertEqual(self.nav(au, "vg/iga1").realpath, "/g1-iga")
|
||||
self.assertEqual(self.nav(au, "vg/iga2").realpath, "/g2-iga")
|
||||
|
||||
au.idp_checkin(None, "iub", "iga")
|
||||
self.assertAxsAt(au, "vu/iua", [["iua"]])
|
||||
self.assertAxsAt(au, "vg/iga1", [["iua", "iub"]])
|
||||
self.assertAxsAt(au, "vg/iga2", [["iua", "iub", "ua"]])
|
||||
|
||||
def test_5(self):
|
||||
"""
|
||||
one IdP user in multiple groups
|
||||
@@ -169,3 +186,44 @@ class TestVFS(unittest.TestCase):
|
||||
self.assertAxsAt(au, "g", [["iua"]])
|
||||
self.assertAxsAt(au, "ga", [["iua"]])
|
||||
self.assertAxsAt(au, "gb", [["iua"]])
|
||||
|
||||
def test_6(self):
|
||||
"""
|
||||
IdP volumes with anon-get and other users/groups (github#79)
|
||||
"""
|
||||
_, cfgdir, xcfg = self.prep()
|
||||
au = AuthSrv(Cfg(c=[cfgdir + "/6.conf"], **xcfg), self.log)
|
||||
|
||||
self.assertAxs(au.vfs.axs, [])
|
||||
self.assertEqual(au.vfs.vpath, "")
|
||||
self.assertEqual(au.vfs.realpath, "")
|
||||
self.assertNodes(au.vfs, [])
|
||||
|
||||
au.idp_checkin(None, "iua", "")
|
||||
star = ["*", "iua"]
|
||||
self.assertNodes(au.vfs, ["get", "priv"])
|
||||
self.assertAxsAt(au, "get/iua", [["iua"], [], [], [], star])
|
||||
self.assertAxsAt(au, "priv/iua", [["iua"], [], []])
|
||||
|
||||
au.idp_checkin(None, "iub", "")
|
||||
star = ["*", "iua", "iub"]
|
||||
self.assertNodes(au.vfs, ["get", "priv"])
|
||||
self.assertAxsAt(au, "get/iua", [["iua"], [], [], [], star])
|
||||
self.assertAxsAt(au, "get/iub", [["iub"], [], [], [], star])
|
||||
self.assertAxsAt(au, "priv/iua", [["iua"], [], []])
|
||||
self.assertAxsAt(au, "priv/iub", [["iub"], [], []])
|
||||
|
||||
au.idp_checkin(None, "iuc", "su")
|
||||
star = ["*", "iua", "iub", "iuc"]
|
||||
self.assertNodes(au.vfs, ["get", "priv", "team"])
|
||||
self.assertAxsAt(au, "get/iua", [["iua", "iuc"], [], ["iuc"], [], star])
|
||||
self.assertAxsAt(au, "get/iub", [["iub", "iuc"], [], ["iuc"], [], star])
|
||||
self.assertAxsAt(au, "get/iuc", [["iuc"], [], ["iuc"], [], star])
|
||||
self.assertAxsAt(au, "priv/iua", [["iua", "iuc"], [], ["iuc"]])
|
||||
self.assertAxsAt(au, "priv/iub", [["iub", "iuc"], [], ["iuc"]])
|
||||
self.assertAxsAt(au, "priv/iuc", [["iuc"], [], ["iuc"]])
|
||||
self.assertAxsAt(au, "team/su/iuc", [["iuc"]])
|
||||
|
||||
au.idp_checkin(None, "iud", "su")
|
||||
self.assertAxsAt(au, "team/su/iuc", [["iuc", "iud"]])
|
||||
self.assertAxsAt(au, "team/su/iud", [["iuc", "iud"]])
|
||||
|
||||
@@ -147,6 +147,7 @@ class Cfg(Namespace):
|
||||
dbd="wal",
|
||||
fk_salt="a" * 16,
|
||||
idp_gsep=re.compile("[|:;+,]"),
|
||||
iobuf=256 * 1024,
|
||||
lang="eng",
|
||||
log_badpwd=1,
|
||||
logout=573,
|
||||
@@ -154,7 +155,8 @@ class Cfg(Namespace):
|
||||
mth={},
|
||||
mtp=[],
|
||||
rm_retry="0/0",
|
||||
s_wr_sz=512 * 1024,
|
||||
s_rd_sz=256 * 1024,
|
||||
s_wr_sz=256 * 1024,
|
||||
sort="href",
|
||||
srch_hits=99999,
|
||||
th_crop="y",
|
||||
@@ -254,4 +256,4 @@ class VHttpConn(object):
|
||||
self.thumbcli = None
|
||||
self.u2fh = FHC()
|
||||
|
||||
self.get_u2idx = self.hsrv.get_u2idx
|
||||
self.get_u2idx = self.hsrv.get_u2idx
|
||||
|
||||
Reference in New Issue
Block a user