From 97233e90c892a31f0d48b9e690892498aa4b5af0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Oct 2015 20:21:28 +0300 Subject: [PATCH] Use flock instead of temporary files to avoid races/purge on ENOSPC/fsync time - races: two parallel `autojump --add` can lead to database purging (overwriting by one of tmp files) - ENOSPC: will leave autojump.txt empty, because no free space available - fsync: can take a while All of this can be fixed with fsync(2). --- bin/autojump_data.py | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/bin/autojump_data.py b/bin/autojump_data.py index 33987b3..e209b9e 100644 --- a/bin/autojump_data.py +++ b/bin/autojump_data.py @@ -7,7 +7,7 @@ from collections import namedtuple import os import shutil import sys -from tempfile import NamedTemporaryFile +import fcntl from time import time if sys.version_info[0] == 3: @@ -75,10 +75,13 @@ def load(config): config['data_path'], 'r', encoding='utf-8', errors='replace') as f: - return dict( + fcntl.flock(f.fileno(), fcntl.LOCK_SH) + d = dict( imap( tupleize, ifilter(correct_length, imap(parse, f)))) + fcntl.flock(f.fileno(), fcntl.LOCK_UN) + return d except (IOError, EOFError): return load_backup(config) @@ -117,24 +120,11 @@ def save(config, data): """Save data and create backup, creating a new data file if necessary.""" create_dir(os.path.dirname(config['data_path'])) - # atomically save by writing to temporary file and moving to destination - try: - temp = NamedTemporaryFile(delete=False) - # Windows cannot reuse the same open file name - temp.close() - - with open(temp.name, 'w', encoding='utf-8', errors='replace') as f: - for path, weight in data.items(): - f.write(unico("%s\t%s\n" % (weight, path))) - - f.flush() - os.fsync(f) - except IOError as ex: - print("Error saving autojump data (disk full?)" % ex, file=sys.stderr) - sys.exit(1) - - # move temp_file -> autojump.txt - move_file(temp.name, config['data_path']) + with open(config['data_path'], 'w', encoding='utf-8') as f: + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + for path, weight in data.items(): + f.write(unico("%s\t%s\n" % (weight, path))) + fcntl.flock(f.fileno(), fcntl.LOCK_UN) # create backup file if it doesn't exist or is older than BACKUP_THRESHOLD if not os.path.exists(config['backup_path']) or \