fix zipseq store not storing
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
|||||||
"dailyFormat": "daily_YYMMDD",
|
"dailyFormat": "daily_YYMMDD",
|
||||||
"structDir": "A:\\1 Amazon_Active_Projects\\3 ProjectStructure",
|
"structDir": "A:\\1 Amazon_Active_Projects\\3 ProjectStructure",
|
||||||
"zipper": "7z",
|
"zipper": "7z",
|
||||||
"compression": 5,
|
"compression": 0,
|
||||||
"Max7zInst": 0
|
"Max7zInst": 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
118
zip_sequences.py
118
zip_sequences.py
@@ -476,15 +476,42 @@ def is_archive_path(path: Path) -> bool:
|
|||||||
return any(part in ("_archive", "_CURRENT") for part in path.parts)
|
return any(part in ("_archive", "_CURRENT") for part in path.parts)
|
||||||
|
|
||||||
|
|
||||||
def find_sequence_dirs(root: Path) -> Iterator[Path]:
|
def find_sequence_dirs(root: Path, *, verbose: bool = False) -> Iterator[Path]:
|
||||||
|
seen_dirs = set() # Track directories we've already yielded to avoid duplicates
|
||||||
for dirpath, dirnames, filenames in os.walk(root):
|
for dirpath, dirnames, filenames in os.walk(root):
|
||||||
path = Path(dirpath)
|
path = Path(dirpath)
|
||||||
dirnames[:] = [d for d in dirnames if d not in ("_archive", "_CURRENT")]
|
dirnames[:] = [d for d in dirnames if d not in ("_archive", "_CURRENT")]
|
||||||
if is_archive_path(path):
|
if is_archive_path(path):
|
||||||
|
if verbose:
|
||||||
|
rel = path.relative_to(root) if path.is_relative_to(root) else path
|
||||||
|
log("scan", f"Skipping archive path: {rel}", verbose_only=True, verbose=verbose)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Check if this directory has sequence files directly
|
||||||
has_frames = any(Path(dirpath, f).suffix.lower() in SEQUENCE_EXTENSIONS for f in filenames)
|
has_frames = any(Path(dirpath, f).suffix.lower() in SEQUENCE_EXTENSIONS for f in filenames)
|
||||||
if has_frames:
|
if has_frames:
|
||||||
yield path
|
path_resolved = path.resolve()
|
||||||
|
if path_resolved not in seen_dirs:
|
||||||
|
seen_dirs.add(path_resolved)
|
||||||
|
yield path
|
||||||
|
elif verbose:
|
||||||
|
# Log directories that don't have sequence files for debugging
|
||||||
|
rel = path.relative_to(root) if path.is_relative_to(root) else path
|
||||||
|
if "scab" in path.name.lower():
|
||||||
|
# Special logging for directories that might be sequences
|
||||||
|
frame_extensions = [f for f in filenames if Path(f).suffix.lower() in SEQUENCE_EXTENSIONS]
|
||||||
|
all_files = list(path.iterdir()) if path.exists() else []
|
||||||
|
log("scan", f"Directory {rel}: {len(filenames)} files in dir, {len(frame_extensions)} with sequence extensions, {len(all_files)} total items", verbose_only=True, verbose=verbose)
|
||||||
|
# Check subdirectories
|
||||||
|
for subdir in dirnames[:5]: # Check first 5 subdirs
|
||||||
|
subdir_path = path / subdir
|
||||||
|
try:
|
||||||
|
subdir_files = list(subdir_path.iterdir()) if subdir_path.exists() else []
|
||||||
|
subdir_frame_files = [f for f in subdir_files if f.is_file() and f.suffix.lower() in SEQUENCE_EXTENSIONS]
|
||||||
|
if subdir_frame_files:
|
||||||
|
log("scan", f" Subdirectory {subdir} has {len(subdir_frame_files)} sequence files", verbose_only=True, verbose=verbose)
|
||||||
|
except (OSError, PermissionError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def iter_sequence_files(seq_dir: Path) -> Iterator[Path]:
|
def iter_sequence_files(seq_dir: Path) -> Iterator[Path]:
|
||||||
@@ -539,12 +566,20 @@ def state_changed(seq_state: dict, stored_state: dict | None) -> bool:
|
|||||||
def archive_path_for(seq_dir: Path) -> Path:
|
def archive_path_for(seq_dir: Path) -> Path:
|
||||||
rel = seq_dir.relative_to(RENDER_ROOT)
|
rel = seq_dir.relative_to(RENDER_ROOT)
|
||||||
suffix = ".7z" if ZIPPER_TYPE == "7z" else ".zip"
|
suffix = ".7z" if ZIPPER_TYPE == "7z" else ".zip"
|
||||||
return (ARCHIVE_ROOT / rel).with_suffix(suffix)
|
# Append suffix instead of replacing, since directory names might have dots (e.g., "scab_v2.1")
|
||||||
|
return ARCHIVE_ROOT / f"{rel}{suffix}"
|
||||||
|
|
||||||
|
|
||||||
def sequence_dir_for(zip_path: Path) -> Path:
|
def sequence_dir_for(zip_path: Path) -> Path:
|
||||||
rel = zip_path.relative_to(ARCHIVE_ROOT)
|
rel = zip_path.relative_to(ARCHIVE_ROOT)
|
||||||
return (RENDER_ROOT / rel).with_suffix("")
|
# Remove the archive suffix (.7z or .zip) from the end
|
||||||
|
# Handle both .7z and .zip extensions
|
||||||
|
rel_str = str(rel)
|
||||||
|
if rel_str.endswith(".7z"):
|
||||||
|
rel_str = rel_str[:-3]
|
||||||
|
elif rel_str.endswith(".zip"):
|
||||||
|
rel_str = rel_str[:-4]
|
||||||
|
return RENDER_ROOT / rel_str
|
||||||
|
|
||||||
|
|
||||||
def state_path_for(zip_path: Path) -> Path:
|
def state_path_for(zip_path: Path) -> Path:
|
||||||
@@ -612,42 +647,49 @@ def zip_sequence(seq_dir: Path, zip_path: Path, per_job_memory_limit: int | None
|
|||||||
]
|
]
|
||||||
|
|
||||||
# Set compression method and memory/dictionary size based on method
|
# Set compression method and memory/dictionary size based on method
|
||||||
# PPMd uses memory parameter in method string, LZMA2 uses -md flag
|
# At compression level 0, use Copy (store) method for maximum speed
|
||||||
FIXED_DICT_SIZE_MB = 1024
|
FIXED_DICT_SIZE_MB = 1024
|
||||||
if COMPRESSION_METHOD == "PPMd":
|
if COMPRESSION_LEVEL == 0:
|
||||||
# PPMd: specify memory as part of method string
|
# Level 0 = no compression, just store files (fastest)
|
||||||
cmd.append(f"-m0=PPMd:mem={FIXED_DICT_SIZE_MB}m")
|
cmd.append("-m0=Copy")
|
||||||
elif COMPRESSION_METHOD == "LZMA2":
|
# Copy method doesn't need threading, but enable it anyway for consistency
|
||||||
# LZMA2: use -md for dictionary size
|
cmd.append("-mmt=on")
|
||||||
cmd.append("-m0=LZMA2")
|
|
||||||
cmd.append(f"-md={FIXED_DICT_SIZE_MB}m")
|
|
||||||
elif COMPRESSION_METHOD == "BZip2":
|
|
||||||
# BZip2: use -md for dictionary size (smaller max: 900KB)
|
|
||||||
max_bzip2_dict = min(FIXED_DICT_SIZE_MB, 900) # BZip2 max is 900KB
|
|
||||||
cmd.append("-m0=BZip2")
|
|
||||||
cmd.append(f"-md={max_bzip2_dict}k")
|
|
||||||
elif COMPRESSION_METHOD == "Deflate":
|
|
||||||
# Deflate: doesn't use dictionary size parameter
|
|
||||||
cmd.append("-m0=Deflate")
|
|
||||||
else:
|
else:
|
||||||
# Fallback: use LZMA2
|
# Compression levels 1-9: use configured compression method
|
||||||
cmd.append("-m0=LZMA2")
|
if COMPRESSION_METHOD == "PPMd":
|
||||||
cmd.append(f"-md={FIXED_DICT_SIZE_MB}m")
|
# PPMd: specify memory as part of method string
|
||||||
|
cmd.append(f"-m0=PPMd:mem={FIXED_DICT_SIZE_MB}m")
|
||||||
|
elif COMPRESSION_METHOD == "LZMA2":
|
||||||
|
# LZMA2: use -md for dictionary size
|
||||||
|
cmd.append("-m0=LZMA2")
|
||||||
|
cmd.append(f"-md={FIXED_DICT_SIZE_MB}m")
|
||||||
|
elif COMPRESSION_METHOD == "BZip2":
|
||||||
|
# BZip2: use -md for dictionary size (smaller max: 900KB)
|
||||||
|
max_bzip2_dict = min(FIXED_DICT_SIZE_MB, 900) # BZip2 max is 900KB
|
||||||
|
cmd.append("-m0=BZip2")
|
||||||
|
cmd.append(f"-md={max_bzip2_dict}k")
|
||||||
|
elif COMPRESSION_METHOD == "Deflate":
|
||||||
|
# Deflate: doesn't use dictionary size parameter
|
||||||
|
cmd.append("-m0=Deflate")
|
||||||
|
else:
|
||||||
|
# Fallback: use LZMA2
|
||||||
|
cmd.append("-m0=LZMA2")
|
||||||
|
cmd.append(f"-md={FIXED_DICT_SIZE_MB}m")
|
||||||
|
|
||||||
# CPU thread allocation: when there's only 1 worker, use all CPU cores
|
# CPU thread allocation: when there's only 1 worker, use all CPU cores
|
||||||
# When there are multiple workers, use auto mode to let 7z decide
|
# When there are multiple workers, use auto mode to let 7z decide
|
||||||
# Note: PPMd is single-threaded and won't benefit from -mmt
|
# Note: PPMd is single-threaded and won't benefit from -mmt
|
||||||
cpu_cores = os.cpu_count() or 1
|
cpu_cores = os.cpu_count() or 1
|
||||||
if COMPRESSION_METHOD == "PPMd":
|
if COMPRESSION_METHOD == "PPMd":
|
||||||
# PPMd is single-threaded, so -mmt won't help
|
# PPMd is single-threaded, so -mmt won't help
|
||||||
# But we can still set it for consistency
|
# But we can still set it for consistency
|
||||||
cmd.append("-mmt=on")
|
cmd.append("-mmt=on")
|
||||||
elif worker_count == 1:
|
elif worker_count == 1:
|
||||||
# Single worker: use all CPU cores for maximum speed (LZMA2, BZip2, Deflate support this)
|
# Single worker: use all CPU cores for maximum speed (LZMA2, BZip2, Deflate support this)
|
||||||
cmd.append(f"-mmt={cpu_cores}")
|
cmd.append(f"-mmt={cpu_cores}")
|
||||||
else:
|
else:
|
||||||
# Multiple workers: use auto mode (7z will manage threads)
|
# Multiple workers: use auto mode (7z will manage threads)
|
||||||
cmd.append("-mmt=on")
|
cmd.append("-mmt=on")
|
||||||
|
|
||||||
cmd.extend([
|
cmd.extend([
|
||||||
str(temp_zip_abs),
|
str(temp_zip_abs),
|
||||||
@@ -796,7 +838,7 @@ def run_zip(requested_workers: int | None, *, verbose: bool) -> int:
|
|||||||
queued = 0
|
queued = 0
|
||||||
|
|
||||||
if RENDER_ROOT.exists():
|
if RENDER_ROOT.exists():
|
||||||
for seq_dir in find_sequence_dirs(RENDER_ROOT):
|
for seq_dir in find_sequence_dirs(RENDER_ROOT, verbose=verbose):
|
||||||
total_scanned += 1
|
total_scanned += 1
|
||||||
rel = seq_dir.relative_to(RENDER_ROOT)
|
rel = seq_dir.relative_to(RENDER_ROOT)
|
||||||
if total_scanned <= 5 or total_scanned % 10 == 0:
|
if total_scanned <= 5 or total_scanned % 10 == 0:
|
||||||
|
|||||||
Reference in New Issue
Block a user