commit be0e8b075b2f40dad5c14d5b30c9109ddc85d8cb
parent 0a4b14572f1a12e69d9f8e1342c7ed755fb231a9
Author: archiveanon <>
Date: Wed, 9 Jul 2025 16:52:53 +0000
Switch to logging module instead of print
This should make it such that logging no longer blocks.
Diffstat:
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/src/autotako/job_render.py b/src/autotako/job_render.py
@@ -4,9 +4,9 @@ import asyncio
import datetime
import enum
import itertools
+import logging
import pathlib
import re
-import traceback
from typing import Self
import gofile.api # type: ignore
@@ -22,6 +22,8 @@ from .database import database_ctx
app = microdot.Microdot()
+log = logging.getLogger(__name__)
+
background_tasks = set()
render_tasks: dict[str, asyncio.Task] = {}
upload_tasks: dict[pathlib.Path, asyncio.Task] = {}
@@ -286,7 +288,7 @@ async def do_webdav_upload(webdav: WebDavConfig, filepath: pathlib.Path, target:
return
except httpx.ConnectTimeout:
if not connection_warning_seen:
- print(f"Failed to connect to {webdav.base_url}. Retrying...")
+ log.warning(f"Failed to connect to {webdav.base_url}. Retrying...")
connection_warning_seen = True
await asyncio.sleep(10)
@@ -367,9 +369,9 @@ async def _process_job(jobid):
task.add_done_callback(background_tasks.discard)
else:
t = torf.Torrent.read(torrent_file)
- print(t)
- print(t.magnet(size=False))
- print(t.files)
+ log.debug(t)
+ log.debug(t.magnet(size=False))
+ log.debug(t.files)
# punt file to webdav remote
if config.webdav and channel and channel.webdav_path:
@@ -492,7 +494,7 @@ async def upload_job(request, jobid: str):
await get_process_job_task(jobid)
return await microdot.jinja.Template("job/upload_success.html").render_async()
except torf.TorfError:
- traceback.print_exc()
+ log.exception(f"Failed to upload job {jobid}")
return await microdot.jinja.Template("job/upload_error.html").render_async()
@@ -534,11 +536,11 @@ async def job_auto_monitor():
if job.uploadability_state != job_conf.upload:
continue
if not config.autoupload.active:
- print(f"'{job.title}' meets conditions for upload")
+ log.info(f"'{job.title}' meets conditions for upload")
continue
if statuses.get(job.id):
continue
- print(f"'{job.title}' is scheduled for upload")
+ log.info(f"'{job.title}' is scheduled for upload")
get_process_job_task(job.id)
await asyncio.sleep(120)