Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Check if eventIdFile size is > 0 before loading data #64

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions src/shotgunEventDaemon.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ def _loadEventIdData(self):
"""
eventIdFile = self.config.getEventIdFile()

if eventIdFile and os.path.exists(eventIdFile):
if eventIdFile and os.path.exists(eventIdFile) and os.path.getsize(eventIdFile) > 0:
try:
fh = open(eventIdFile)
try:
Expand Down Expand Up @@ -683,7 +683,7 @@ def getNextUnprocessedEventId(self):
now = datetime.datetime.now()
for k in self._backlog.keys():
v = self._backlog[k]
if v < now:
if v < now: # Look around here to not cut events from timeouts
self.logger.warning('Timeout elapsed on backlog event id %d.', k)
del(self._backlog[k])
elif nextId is None or k < nextId:
Expand Down Expand Up @@ -774,6 +774,7 @@ def process(self, event):
self.logger.info('Processed id %d from backlog.' % event['id'])
del(self._backlog[event['id']])
self._updateLastEventId(event)
# This block skips events. Let's see how we can understand it and not.
elif self._lastEventId is not None and event['id'] <= self._lastEventId:
msg = 'Event %d is too old. Last event processed was (%d).'
self.logger.debug(msg, event['id'], self._lastEventId)
Expand All @@ -789,11 +790,15 @@ def _process(self, event):
if callback.canProcess(event):
msg = 'Dispatching event %d to callback %s.'
self.logger.debug(msg, event['id'], str(callback))
if not callback.process(event):
process_status = callback.process(event)
if not process_status:
# A callback in the plugin failed. Deactivate the whole
# plugin.
self._active = False
break
elif isinstance(process_status, str):
self.logger.debug("Retry conditions detected for %s", str(callback))
return False
else:
msg = 'Skipping inactive callback %s in plugin.'
self.logger.debug(msg, str(callback))
Expand Down Expand Up @@ -957,9 +962,18 @@ def process(self, event):
tb = tb.tb_next

msg = 'An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s'
self._logger.critical(msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals))
if self._stopOnError:
# If we've gone through the config we do want to see the logs. Right now we don't want to do a big refactor
# Can we add a stack trace and retry param to plugins? To the tracebacks? Is that bad practice/too debuggy?
formatted_traceback = traceback.format_exc()
file_protocol_cycle = "AllConfigsFailed" in formatted_traceback
if file_protocol_cycle:
msg = msg + "\n\nThe above event will retry until resolved, killed, or crashes in an unhandled way\n\n"

self._logger.critical(msg, formatted_traceback, pprint.pformat(stack[1].f_locals))
if not file_protocol_cycle and self._stopOnError:
self._active = False
elif file_protocol_cycle:
return str(event['session_uuid'])

return self._active

Expand Down