Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Log from worker subprocess, check if result_id is received #78

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions mcrit/SpawningWorker.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,14 @@ def _executeJobPayload(self, job_payload, job):
try:
stdout_result, stderr_result = console_handle.communicate(timeout=self._queue_config.QUEUE_SPAWNINGWORKER_CHILDREN_TIMEOUT)
stdout_result = stdout_result.strip().decode("utf-8")
# TODO: log output from subprocess in the order it arrived
# instead of the split to stdout, stderr
if stdout_result:
LOGGER.info("STDOUT logs from subprocess: %s", stdout_result)
if stderr_result:
stderr_result = stderr_result.strip().decode("utf-8")
LOGGER.info("STDERR logs from subprocess: %s", stderr_result)

last_line = stdout_result.split("\n")[-1]
# successful output should be just the result_id in a single line
match = re.match("(?P<result_id>[0-9a-fA-F]{24})", last_line)
Expand All @@ -109,12 +117,14 @@ def _executeJob(self, job):
self.queue.clean()
self.t_last_cleanup = time.time()
try:
with job as j:
LOGGER.info("Processing Remote Job: %s", job)
result_id = self._executeJobPayload(j["payload"], job)
LOGGER.info("Processing Remote Job: %s", job)
result_id = self._executeJobPayload(j["payload"], job)
if result_id:
# result should have already been persisted by the child process,we repeat it here to close the job for the queue
job.result = result_id
LOGGER.info("Finished Remote Job with result_id: %s", result_id)
else:
LOGGER.info("Failed Running Remote Job: %s", job)
except Exception as exc:
pass

Expand Down
Loading