From 52f3c84b29390bfd8e729f4e11c1e522b8590d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Schaerer?= Date: Wed, 29 Sep 2010 15:22:13 +0200 Subject: [PATCH] try to make autojump more robust to concurrent uses over network filesystems When your home is on a network filesytems, multiple shells can try to write to the autojump store concurrently. The previous system was a bit dangerous since the name of the temp file was always the same. This should be better, since the different autojump instances should all write to different temporaries, and only the atomic rename() should be concurrent. Time will tell... --- autojump | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/autojump b/autojump index 7106cc8..74db517 100755 --- a/autojump +++ b/autojump @@ -19,6 +19,7 @@ from __future__ import division import cPickle import getopt from sys import argv,exit,stderr +from tempfile import NamedTemporaryFile import os import signal max_keyweight=1000 @@ -38,19 +39,19 @@ def dicadd(dic,key,increment=1): dic[key]=dic.get(key,0.)+increment def save(path_dict,dic_file): - f=open(dic_file+".tmp",'w') + f=NamedTemporaryFile(dir=config_dir,delete=False) cPickle.dump(path_dict,f,-1) f.flush() os.fsync(f) f.close() - try: - os.rename(dic_file+".tmp",dic_file) #cf. http://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/ - import time #backup file + os.rename(f.name,dic_file) #cf. http://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/ + try: #backup file + import time if not os.path.exists(dic_file+".bak") or time.time()-os.path.getmtime(dic_file+".bak")>86400: import shutil shutil.copy(dic_file,dic_file+".bak") - except OSError: - pass #Fail quietly, this usually means a concurrent autojump process already did the job + except OSError, e: + print >> stderr, "Error while creating backup autojump file. (%s)" % e def forget(path_dict,dic_file): """Gradually forget about directories. Only call from the actual jump since it can take time"""