Made changes to ensure publishing is done correctly, and also added a pause between publishes to prevent 'TOO_MANY_UNCONFIRMED' error.

This commit is contained in:
crowetic 2025-04-15 19:30:35 -07:00
parent 05793f26e1
commit efa1e821fa

View File

@ -18,6 +18,8 @@ SIZE_LIMIT_MB = 410
DEFAULT_CODEC = 'h264' DEFAULT_CODEC = 'h264'
DEFAULT_USE_NVIDIA = False DEFAULT_USE_NVIDIA = False
LOG_FILE = "video-encode-and-publish.log" LOG_FILE = "video-encode-and-publish.log"
BATCH_SIZE = 5
WAIT_SECONDS = 120
# Logging # Logging
def log(msg): def log(msg):
@ -93,6 +95,8 @@ def generate_all_thumbnails(video_path, duration):
if thumb_path: if thumb_path:
thumbnails.append(f"data:image/webp;base64,{base64_encode_file(thumb_path)}") thumbnails.append(f"data:image/webp;base64,{base64_encode_file(thumb_path)}")
return thumbnails return thumbnails
def base64_encode_file(path): def base64_encode_file(path):
with open(path, 'rb') as f: with open(path, 'rb') as f:
@ -116,6 +120,34 @@ def get_api_key():
with open(path, 'r') as f: with open(path, 'r') as f:
return f.read().strip() return f.read().strip()
return input("Enter your Qortal API key: ").strip() return input("Enter your Qortal API key: ").strip()
def generate_identifier_from_filename(filepath):
base = os.path.splitext(os.path.basename(filepath))[0]
slug = slugify(base)
return f"qtube_vid_{slug}"
def check_existing_identifier(base_identifier, channel_name):
try:
response = requests.get(
f"{API_URL}/arbitrary/resources/searchsimple",
params={
"service": "DOCUMENT",
"name": channel_name,
"limit": 0,
"reverse": "true"
},
headers={"accept": "application/json"}
)
response.raise_for_status()
entries = response.json()
for entry in entries:
if entry["identifier"].startswith(base_identifier):
return entry["identifier"].replace("_metadata", "")
except Exception as e:
log(f"[WARNING] Could not check existing identifiers: {e}")
return None
def build_sign_publish_from_file(service, identifier, name, file_path, private_key, dry_run=False, metadata={}): def build_sign_publish_from_file(service, identifier, name, file_path, private_key, dry_run=False, metadata={}):
@ -372,10 +404,17 @@ def publish_qtube(video_path, private_key, mode='auto', dry_run=False):
if use_auto != 'yes': if use_auto != 'yes':
title, fullDescription, htmlDescription, category, codec, use_nvidia = prompt_for_metadata() title, fullDescription, htmlDescription, category, codec, use_nvidia = prompt_for_metadata()
slug = slugify(title) base_identifier = generate_identifier_from_filename(video_path)
base_slug = f"qtube_vid_{slug}" existing = check_existing_identifier(base_identifier, name)
identifier = None
short_id = None if existing:
identifier = existing
log(f"[INFO] Reusing existing identifier: {identifier}")
else:
short_id = generate_short_id()
identifier = f"{base_identifier}_{short_id}"
log(f"[INFO] Using new identifier: {identifier}")
short_id = None
try: try:
response = requests.get( response = requests.get(
@ -452,6 +491,24 @@ def publish_qtube(video_path, private_key, mode='auto', dry_run=False):
log(f"Publishing METADATA: {metadata_identifier}") log(f"Publishing METADATA: {metadata_identifier}")
build_sign_publish_from_file("DOCUMENT", metadata_identifier, name, metadata_file_path, private_key, dry_run) build_sign_publish_from_file("DOCUMENT", metadata_identifier, name, metadata_file_path, private_key, dry_run)
def process_video_batch(video_paths, private_key, mode='auto', dry_run=False):
batch = []
for video_path in video_paths:
batch.append(video_path)
if len(batch) >= BATCH_SIZE:
process_and_wait(batch, private_key, mode, dry_run)
batch = []
if batch:
process_and_wait(batch, private_key, mode, dry_run)
def process_and_wait(batch, private_key, mode, dry_run):
for video_path in batch:
try:
publish_qtube(video_path, private_key, mode=mode, dry_run=dry_run)
except Exception as e:
log(f"[ERROR] Failed to publish {video_path}: {e}")
log(f"[WAIT] Waiting {WAIT_SECONDS} seconds before next batch...")
time.sleep(WAIT_SECONDS)
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@ -460,9 +517,9 @@ def main():
args = parser.parse_args() args = parser.parse_args()
private_key = get_private_key() private_key = get_private_key()
video_paths = []
for root, dirs, files in os.walk(os.getcwd()): for root, dirs, files in os.walk(os.getcwd()):
# Skip folders we don't want to process
if 'ORIGINALS' in dirs: if 'ORIGINALS' in dirs:
dirs.remove('ORIGINALS') dirs.remove('ORIGINALS')
if 'too_large' in dirs: if 'too_large' in dirs:
@ -476,7 +533,6 @@ def main():
try: try:
full_path = os.path.join(root, file) full_path = os.path.join(root, file)
# Per-folder QDN.json check
if not qdn_template_cache: if not qdn_template_cache:
metadata_path = os.path.join(root, 'QDN.json') metadata_path = os.path.join(root, 'QDN.json')
if os.path.exists(metadata_path): if os.path.exists(metadata_path):
@ -493,16 +549,13 @@ def main():
if should_reencode(full_path): if should_reencode(full_path):
reencoded_path = reencode_video(full_path, codec=codec, use_nvidia=use_nvidia) reencoded_path = reencode_video(full_path, codec=codec, use_nvidia=use_nvidia)
if reencoded_path: if reencoded_path:
publish_qtube(reencoded_path, private_key, mode=args.mode, dry_run=args.dry_run) video_paths.append(reencoded_path)
else: else:
publish_qtube(full_path, private_key, mode=args.mode, dry_run=args.dry_run) video_paths.append(full_path)
except Exception as e: except Exception as e:
log(f"Failed to process {file}: {e}") log(f"Failed to process {file}: {e}")
process_video_batch(video_paths, private_key, mode=args.mode, dry_run=args.dry_run)
if __name__ == "__main__": if __name__ == "__main__":
main() main()