Re #286 - Solving lost data/corrupted data - Tweak timing and try to write to a temp file first (#292)

* Re #286 - Tweak timing and try to write to a temp file first, Increase logging and format info message better.
pull/271/head
dgtlmoon 3 years ago committed by GitHub
parent 0a29b3a582
commit 4659993ecf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -443,6 +443,10 @@ def changedetection_app(config=None, datastore_o=None):
flash("Updated watch.") flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck # Queue the watch for immediate recheck
update_q.put(uuid) update_q.put(uuid)

@ -16,6 +16,8 @@ class ChangeDetectionStore:
lock = Lock() lock = Lock()
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"): def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
# Should only be active for docker
# logging.basicConfig(filename='/dev/stdout', level=logging.INFO)
self.needs_write = False self.needs_write = False
self.datastore_path = datastore_path self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path) self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
@ -348,20 +350,30 @@ class ChangeDetectionStore:
return fname return fname
def sync_to_json(self): def sync_to_json(self):
print("Saving..") logging.info("Saving JSON..")
data ={}
try: try:
data = deepcopy(self.__data) data = deepcopy(self.__data)
except RuntimeError: except RuntimeError as e:
time.sleep(0.5) # Try again in 15 seconds
print ("! Data changed when writing to JSON, trying again..") time.sleep(15)
logging.error ("! Data changed when writing to JSON, trying again.. %s", str(e))
self.sync_to_json() self.sync_to_json()
return return
else: else:
with open(self.json_store_path, 'w') as json_file:
json.dump(data, json_file, indent=4) try:
logging.info("Re-saved index") # Re #286 - First write to a temp file, then confirm it looks OK and rename it
# This is a fairly basic strategy to deal with the case that the file is corrupted,
# system was out of memory, out of RAM etc
with open(self.json_store_path+".tmp", 'w') as json_file:
json.dump(data, json_file, indent=4)
except Exception as e:
logging.error("Error writing JSON!! (Main JSON file save was skipped) : %s", str(e))
else:
os.rename(self.json_store_path+".tmp", self.json_store_path)
self.needs_write = False self.needs_write = False
@ -376,7 +388,9 @@ class ChangeDetectionStore:
if self.needs_write: if self.needs_write:
self.sync_to_json() self.sync_to_json()
time.sleep(3)
# Once per minute is enough, more and it can cause high CPU usage
time.sleep(60)
# Go through the datastore path and remove any snapshots that are not mentioned in the index # Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy. # This usually is not used, but can be handy.

Loading…
Cancel
Save