2010-12-02 16:46:23 +00:00
|
|
|
#!/usr/bin/env python
|
2012-05-07 06:50:40 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2012-05-07 06:19:19 +00:00
|
|
|
"""
|
|
|
|
Copyright © 2008-2012 Joel Schaerer
|
2013-12-28 17:34:13 +00:00
|
|
|
Copyright © 2012-2014 William Ting
|
2012-05-07 06:19:19 +00:00
|
|
|
|
2013-12-28 17:34:13 +00:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
2012-05-07 06:19:19 +00:00
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
any later version.
|
|
|
|
|
2013-12-28 17:34:13 +00:00
|
|
|
* This program is distributed in the hope that it will be useful,
|
2012-05-07 06:19:19 +00:00
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
2013-12-28 17:34:13 +00:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
2012-05-07 06:19:19 +00:00
|
|
|
"""
|
|
|
|
|
2013-12-17 18:51:39 +00:00
|
|
|
from __future__ import print_function
|
2010-10-24 09:30:01 +00:00
|
|
|
|
2013-12-17 21:57:36 +00:00
|
|
|
from difflib import SequenceMatcher
|
2013-12-17 19:52:34 +00:00
|
|
|
from itertools import chain
|
2013-12-17 02:28:54 +00:00
|
|
|
from math import sqrt
|
|
|
|
from operator import attrgetter
|
|
|
|
from operator import itemgetter
|
2013-05-14 22:34:19 +00:00
|
|
|
import os
|
2013-12-17 16:06:13 +00:00
|
|
|
import re
|
2013-05-14 22:34:19 +00:00
|
|
|
import sys
|
2011-09-06 14:21:59 +00:00
|
|
|
|
2013-12-18 22:51:26 +00:00
|
|
|
if sys.version_info[0] == 3:
|
|
|
|
ifilter = filter
|
|
|
|
imap = map
|
|
|
|
os.getcwdu = os.getcwd
|
|
|
|
else:
|
|
|
|
from itertools import ifilter
|
|
|
|
from itertools import imap
|
|
|
|
|
2014-01-07 15:27:05 +00:00
|
|
|
from autojump_argparse import ArgumentParser
|
|
|
|
|
2013-12-30 20:49:34 +00:00
|
|
|
from autojump_data import dictify
|
|
|
|
from autojump_data import entriefy
|
|
|
|
from autojump_data import Entry
|
|
|
|
from autojump_data import load
|
|
|
|
from autojump_data import save
|
2014-01-07 17:44:44 +00:00
|
|
|
from autojump_utils import encode
|
2013-12-30 20:49:34 +00:00
|
|
|
from autojump_utils import first
|
2013-12-31 16:39:52 +00:00
|
|
|
from autojump_utils import get_tab_entry_info
|
2013-12-30 20:49:34 +00:00
|
|
|
from autojump_utils import get_pwd
|
|
|
|
from autojump_utils import has_uppercase
|
|
|
|
from autojump_utils import is_osx
|
|
|
|
from autojump_utils import last
|
|
|
|
from autojump_utils import print_entry
|
|
|
|
from autojump_utils import print_tab_menu
|
|
|
|
from autojump_utils import sanitize
|
|
|
|
from autojump_utils import take
|
2013-05-21 14:28:39 +00:00
|
|
|
|
2013-12-31 14:37:11 +00:00
|
|
|
VERSION = '22.0.0-alpha'
|
2013-12-17 21:57:36 +00:00
|
|
|
FUZZY_MATCH_THRESHOLD = 0.6
|
2013-12-18 18:03:43 +00:00
|
|
|
TAB_ENTRIES_COUNT = 9
|
2013-12-18 17:08:05 +00:00
|
|
|
TAB_SEPARATOR = '__'
|
2013-09-26 20:40:34 +00:00
|
|
|
|
2013-07-07 01:23:34 +00:00
|
|
|
|
2013-05-14 22:34:19 +00:00
|
|
|
def set_defaults():
|
|
|
|
config = {}
|
2012-05-06 23:12:39 +00:00
|
|
|
|
2013-12-17 02:28:54 +00:00
|
|
|
if is_osx():
|
|
|
|
data_home = os.path.join(
|
|
|
|
os.path.expanduser('~'),
|
|
|
|
'Library',
|
|
|
|
'autojump')
|
|
|
|
else:
|
|
|
|
data_home = os.getenv(
|
|
|
|
'XDG_DATA_HOME',
|
|
|
|
os.path.join(
|
|
|
|
os.path.expanduser('~'),
|
|
|
|
'.local',
|
|
|
|
'share',
|
|
|
|
'autojump'))
|
|
|
|
|
|
|
|
config['data_path'] = os.path.join(data_home, 'autojump.txt')
|
|
|
|
config['backup_path'] = os.path.join(data_home, 'autojump.txt.bak')
|
|
|
|
config['tmp_path'] = os.path.join(data_home, 'data.tmp')
|
2013-05-14 22:34:19 +00:00
|
|
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
2013-12-17 19:52:34 +00:00
|
|
|
def parse_arguments():
|
2013-12-17 02:28:54 +00:00
|
|
|
parser = ArgumentParser(
|
2013-12-28 17:34:13 +00:00
|
|
|
description='Automatically jump to directory passed as an \
|
|
|
|
argument.',
|
2012-05-06 23:41:00 +00:00
|
|
|
epilog="Please see autojump(1) man pages for full documentation.")
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
2013-02-25 05:55:29 +00:00
|
|
|
'directory', metavar='DIRECTORY', nargs='*', default='',
|
2012-05-06 23:41:00 +00:00
|
|
|
help='directory to jump to')
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
2013-05-15 00:03:08 +00:00
|
|
|
'-a', '--add', metavar='DIRECTORY',
|
2013-12-17 02:28:54 +00:00
|
|
|
help='add path')
|
2013-05-15 00:03:08 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-i', '--increase', metavar='WEIGHT', nargs='?', type=int,
|
2013-12-30 20:05:24 +00:00
|
|
|
const=10, default=False,
|
2013-12-17 02:28:54 +00:00
|
|
|
help='increase current directory weight')
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
2013-02-25 05:55:29 +00:00
|
|
|
'-d', '--decrease', metavar='WEIGHT', nargs='?', type=int,
|
|
|
|
const=15, default=False,
|
2013-12-17 02:28:54 +00:00
|
|
|
help='decrease current directory weight')
|
2013-12-17 22:46:01 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'--complete', action="store_true", default=False,
|
|
|
|
help='used for tab completion')
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'--purge', action="store_true", default=False,
|
2013-12-17 02:28:54 +00:00
|
|
|
help='remove non-existent paths from database')
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-s', '--stat', action="store_true", default=False,
|
2012-05-06 23:41:00 +00:00
|
|
|
help='show database entries and their key weights')
|
2013-02-25 05:49:45 +00:00
|
|
|
parser.add_argument(
|
2013-12-30 21:41:32 +00:00
|
|
|
'-v', '--version', action="version", version="%(prog)s v" +
|
2013-12-17 02:28:54 +00:00
|
|
|
VERSION, help='show version information')
|
2013-05-14 22:34:19 +00:00
|
|
|
|
2013-12-17 19:52:34 +00:00
|
|
|
return parser.parse_args()
|
2013-05-15 02:58:00 +00:00
|
|
|
|
2012-05-07 01:09:37 +00:00
|
|
|
|
2013-12-30 03:27:13 +00:00
|
|
|
def add_path(data, path, weight=10):
|
2013-12-17 15:52:41 +00:00
|
|
|
"""
|
|
|
|
Add a new path or increment an existing one.
|
|
|
|
|
2013-12-30 03:27:13 +00:00
|
|
|
os.path.realpath() is not used because it's preferable to use symlinks
|
|
|
|
with resulting duplicate entries in the database than a single canonical
|
|
|
|
path.
|
2013-12-17 15:52:41 +00:00
|
|
|
"""
|
2014-01-07 17:44:44 +00:00
|
|
|
path = encode(path).rstrip(os.sep)
|
2013-12-17 02:28:54 +00:00
|
|
|
if path == os.path.expanduser('~'):
|
2013-12-17 20:48:12 +00:00
|
|
|
return data, Entry(path, 0)
|
2013-12-17 02:28:54 +00:00
|
|
|
|
2013-12-30 03:27:13 +00:00
|
|
|
data[path] = sqrt((data.get(path, 0) ** 2) + (weight ** 2))
|
2010-07-21 14:44:43 +00:00
|
|
|
|
2013-12-17 20:48:12 +00:00
|
|
|
return data, Entry(path, data[path])
|
2012-05-07 00:34:03 +00:00
|
|
|
|
2012-04-07 14:14:19 +00:00
|
|
|
|
2013-12-30 03:27:13 +00:00
|
|
|
def decrease_path(data, path, weight=15):
|
|
|
|
"""Decrease or zero out a path."""
|
2014-01-07 17:44:44 +00:00
|
|
|
path = encode(path).rstrip(os.sep)
|
2013-12-30 03:27:13 +00:00
|
|
|
data[path] = max(0, data.get(path, 0) - weight)
|
2013-12-17 20:48:12 +00:00
|
|
|
return data, Entry(path, data[path])
|
2013-12-17 02:28:54 +00:00
|
|
|
|
|
|
|
|
2013-12-17 18:04:11 +00:00
|
|
|
def detect_smartcase(needles):
|
|
|
|
"""
|
|
|
|
If any needles contain an uppercase letter then use case sensitive
|
|
|
|
searching. Otherwise use case insensitive searching.
|
|
|
|
"""
|
|
|
|
return not any(imap(has_uppercase, needles))
|
|
|
|
|
|
|
|
|
2013-12-17 20:48:12 +00:00
|
|
|
def find_matches(entries, needles):
|
2013-12-17 19:52:34 +00:00
|
|
|
"""Return an iterator to matching entries."""
|
2013-12-17 20:48:12 +00:00
|
|
|
try:
|
|
|
|
not_cwd = lambda entry: entry.path != os.getcwdu()
|
|
|
|
except OSError:
|
|
|
|
# tautology if current working directory no longer exists
|
|
|
|
not_cwd = lambda x: True
|
|
|
|
|
2013-12-17 02:28:54 +00:00
|
|
|
data = sorted(
|
2013-12-17 20:48:12 +00:00
|
|
|
ifilter(not_cwd, entries),
|
2013-12-17 02:28:54 +00:00
|
|
|
key=attrgetter('weight'),
|
|
|
|
reverse=True)
|
|
|
|
|
2013-12-17 16:54:55 +00:00
|
|
|
ignore_case = detect_smartcase(needles)
|
2013-12-17 02:28:54 +00:00
|
|
|
|
2013-12-17 17:01:18 +00:00
|
|
|
exists = lambda entry: os.path.exists(entry.path)
|
2013-12-17 21:57:36 +00:00
|
|
|
return ifilter(
|
|
|
|
exists,
|
2013-12-17 22:25:45 +00:00
|
|
|
chain(
|
|
|
|
match_consecutive(needles, data, ignore_case),
|
|
|
|
match_fuzzy(needles, data, ignore_case),
|
2013-12-18 22:51:26 +00:00
|
|
|
match_anywhere(needles, data, ignore_case),
|
|
|
|
# default return value so calling shell functions have an
|
|
|
|
# argument to `cd` to
|
|
|
|
[Entry('.', 0)]))
|
2013-12-17 22:25:45 +00:00
|
|
|
|
|
|
|
|
2013-12-30 23:44:39 +00:00
|
|
|
def handle_tab_completion(needle, entries):
|
|
|
|
if not needle:
|
|
|
|
sys.exit(0)
|
|
|
|
|
2013-12-31 16:39:52 +00:00
|
|
|
tab_needle, tab_index, tab_path = get_tab_entry_info(needle, TAB_SEPARATOR)
|
2013-12-28 18:15:07 +00:00
|
|
|
|
2013-12-31 16:39:52 +00:00
|
|
|
if tab_path:
|
2014-01-07 17:44:44 +00:00
|
|
|
print(tab_path)
|
2013-12-31 16:39:52 +00:00
|
|
|
elif tab_index:
|
|
|
|
get_ith_path = lambda i, iterable: last(take(i, iterable)).path
|
2014-01-07 17:44:44 +00:00
|
|
|
print(get_ith_path(tab_index, find_matches(entries, tab_needle)))
|
2013-12-28 18:15:07 +00:00
|
|
|
elif tab_needle:
|
|
|
|
# found partial tab completion entry
|
|
|
|
print_tab_menu(
|
|
|
|
tab_needle,
|
|
|
|
take(TAB_ENTRIES_COUNT, find_matches(entries, tab_needle)),
|
|
|
|
TAB_SEPARATOR)
|
|
|
|
else:
|
2013-12-30 23:44:39 +00:00
|
|
|
print_tab_menu(
|
|
|
|
needle,
|
|
|
|
take(TAB_ENTRIES_COUNT, find_matches(entries, needle)),
|
|
|
|
TAB_SEPARATOR)
|
|
|
|
|
2013-12-28 18:15:07 +00:00
|
|
|
|
2013-12-17 22:25:45 +00:00
|
|
|
def match_anywhere(needles, haystack, ignore_case=False):
|
|
|
|
"""
|
|
|
|
Matches needles anywhere in the path as long as they're in the same (but
|
|
|
|
not necessary consecutive) order.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
needles = ['foo', 'baz']
|
|
|
|
regex needle = r'.*foo.*baz.*'
|
|
|
|
haystack = [
|
|
|
|
(path="/foo/bar/baz", weight=10),
|
|
|
|
(path="/baz/foo/bar", weight=10),
|
|
|
|
(path="/foo/baz", weight=10)]
|
|
|
|
|
|
|
|
result = [
|
|
|
|
(path="/moo/foo/baz", weight=10),
|
|
|
|
(path="/foo/baz", weight=10)]
|
|
|
|
"""
|
|
|
|
regex_needle = '.*' + '.*'.join(needles) + '.*'
|
|
|
|
regex_flags = re.IGNORECASE | re.UNICODE if ignore_case else re.UNICODE
|
2013-12-28 13:13:47 +00:00
|
|
|
found = lambda haystack: re.search(
|
|
|
|
regex_needle,
|
|
|
|
haystack.path,
|
|
|
|
flags=regex_flags)
|
2013-12-17 22:25:45 +00:00
|
|
|
return ifilter(found, haystack)
|
2012-05-06 23:41:00 +00:00
|
|
|
|
2013-12-17 02:28:54 +00:00
|
|
|
|
2013-12-17 17:54:40 +00:00
|
|
|
def match_consecutive(needles, haystack, ignore_case=False):
|
|
|
|
"""
|
|
|
|
Matches consecutive needles at the end of a path.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
needles = ['foo', 'baz']
|
|
|
|
haystack = [
|
2013-12-17 21:57:36 +00:00
|
|
|
(path="/foo/bar/baz", weight=10),
|
|
|
|
(path="/foo/baz/moo", weight=10),
|
|
|
|
(path="/moo/foo/baz", weight=10),
|
|
|
|
(path="/foo/baz", weight=10)]
|
2013-12-17 17:54:40 +00:00
|
|
|
|
2013-12-17 18:51:39 +00:00
|
|
|
regex_needle = re.compile(r'''
|
2013-12-17 17:54:40 +00:00
|
|
|
foo # needle #1
|
|
|
|
[^/]* # all characters except os.sep zero or more times
|
|
|
|
/ # os.sep
|
|
|
|
[^/]* # all characters except os.sep zero or more times
|
|
|
|
baz # needle #2
|
|
|
|
[^/]* # all characters except os.sep zero or more times
|
|
|
|
$ # end of string
|
2013-12-17 18:51:39 +00:00
|
|
|
''')
|
2013-12-17 17:54:40 +00:00
|
|
|
|
|
|
|
result = [
|
2013-12-17 21:57:36 +00:00
|
|
|
(path="/moo/foo/baz", weight=10),
|
|
|
|
(path="/foo/baz", weight=10)]
|
2013-12-17 17:54:40 +00:00
|
|
|
"""
|
|
|
|
regex_no_sep = '[^' + os.sep + ']*'
|
|
|
|
regex_one_sep = regex_no_sep + os.sep + regex_no_sep
|
|
|
|
regex_no_sep_end = regex_no_sep + '$'
|
|
|
|
# can't use compiled regex because of flags
|
2013-12-17 18:51:39 +00:00
|
|
|
regex_needle = regex_one_sep.join(needles) + regex_no_sep_end
|
2013-12-17 17:54:40 +00:00
|
|
|
regex_flags = re.IGNORECASE | re.UNICODE if ignore_case else re.UNICODE
|
|
|
|
|
2013-12-17 21:57:36 +00:00
|
|
|
found = lambda entry: re.search(
|
2013-12-17 18:51:39 +00:00
|
|
|
regex_needle,
|
2013-12-17 21:57:36 +00:00
|
|
|
entry.path,
|
2013-12-17 17:54:40 +00:00
|
|
|
flags=regex_flags)
|
|
|
|
return ifilter(found, haystack)
|
2013-12-17 16:54:55 +00:00
|
|
|
|
|
|
|
|
2013-12-17 19:52:34 +00:00
|
|
|
def match_fuzzy(needles, haystack, ignore_case=False):
|
2013-12-17 21:57:36 +00:00
|
|
|
"""
|
|
|
|
Performs an approximate match with the last needle against the end of
|
|
|
|
every path past an acceptable threshold (FUZZY_MATCH_THRESHOLD).
|
|
|
|
|
|
|
|
For example:
|
|
|
|
needles = ['foo', 'bar']
|
|
|
|
haystack = [
|
|
|
|
(path="/foo/bar/baz", weight=11),
|
|
|
|
(path="/foo/baz/moo", weight=10),
|
|
|
|
(path="/moo/foo/baz", weight=10),
|
|
|
|
(path="/foo/baz", weight=10),
|
|
|
|
(path="/foo/bar", weight=10)]
|
|
|
|
|
|
|
|
result = [
|
|
|
|
(path="/foo/bar/baz", weight=11),
|
|
|
|
(path="/moo/foo/baz", weight=10),
|
|
|
|
(path="/foo/baz", weight=10),
|
|
|
|
(path="/foo/bar", weight=10)]
|
|
|
|
|
2013-12-30 03:27:13 +00:00
|
|
|
This is a weak heuristic and used as a last resort to find matches.
|
2013-12-17 21:57:36 +00:00
|
|
|
"""
|
|
|
|
end_dir = lambda path: last(os.path.split(path))
|
2013-12-17 22:12:26 +00:00
|
|
|
if ignore_case:
|
|
|
|
needle = last(needles).lower()
|
|
|
|
match_percent = lambda entry: SequenceMatcher(
|
|
|
|
a=needle,
|
|
|
|
b=end_dir(entry.path.lower())).ratio()
|
|
|
|
else:
|
|
|
|
needle = last(needles)
|
|
|
|
match_percent = lambda entry: SequenceMatcher(
|
|
|
|
a=needle,
|
|
|
|
b=end_dir(entry.path)).ratio()
|
2013-12-28 17:34:13 +00:00
|
|
|
meets_threshold = lambda entry: match_percent(entry) >= \
|
|
|
|
FUZZY_MATCH_THRESHOLD
|
2013-12-17 21:57:36 +00:00
|
|
|
return ifilter(meets_threshold, haystack)
|
|
|
|
|
|
|
|
|
2013-12-17 20:48:12 +00:00
|
|
|
def purge_missing_paths(entries):
|
|
|
|
"""Remove non-existent paths from a list of entries."""
|
|
|
|
exists = lambda entry: os.path.exists(entry.path)
|
|
|
|
return ifilter(exists, entries)
|
2013-12-17 02:28:54 +00:00
|
|
|
|
|
|
|
|
2013-12-17 20:48:12 +00:00
|
|
|
def print_stats(data, data_path):
|
2013-12-18 22:51:26 +00:00
|
|
|
for path, weight in sorted(data.items(), key=itemgetter(1)):
|
2013-12-17 20:48:12 +00:00
|
|
|
print_entry(Entry(path, weight))
|
2013-12-17 02:28:54 +00:00
|
|
|
|
|
|
|
print("________________________________________\n")
|
2013-12-18 22:51:26 +00:00
|
|
|
print("%d:\t total weight" % sum(data.values()))
|
2013-12-17 02:28:54 +00:00
|
|
|
print("%d:\t number of entries" % len(data))
|
|
|
|
|
|
|
|
try:
|
|
|
|
print("%.2f:\t current directory weight" % data.get(os.getcwdu(), 0))
|
|
|
|
except OSError:
|
2013-12-18 22:51:26 +00:00
|
|
|
# current directory no longer exists
|
2013-12-17 02:28:54 +00:00
|
|
|
pass
|
|
|
|
|
2013-12-17 20:48:12 +00:00
|
|
|
print("\ndata:\t %s" % data_path)
|
2013-12-17 02:28:54 +00:00
|
|
|
|
|
|
|
|
2014-01-07 15:27:05 +00:00
|
|
|
def main(args): # noqa
|
2013-12-17 22:30:46 +00:00
|
|
|
config = set_defaults()
|
2013-12-17 19:52:34 +00:00
|
|
|
|
2013-12-28 18:15:07 +00:00
|
|
|
# all arguments are mutually exclusive
|
2013-12-17 19:52:34 +00:00
|
|
|
if args.add:
|
2013-12-17 20:48:12 +00:00
|
|
|
save(config, first(add_path(load(config), args.add)))
|
2013-12-17 22:46:01 +00:00
|
|
|
elif args.complete:
|
2013-12-30 23:44:39 +00:00
|
|
|
handle_tab_completion(
|
|
|
|
needle=first(sanitize(args.directory)),
|
|
|
|
entries=entriefy(load(config)))
|
2013-12-17 19:52:34 +00:00
|
|
|
elif args.decrease:
|
2013-12-17 20:48:12 +00:00
|
|
|
data, entry = decrease_path(load(config), get_pwd(), args.decrease)
|
|
|
|
save(config, data)
|
|
|
|
print_entry(entry)
|
2013-12-17 19:52:34 +00:00
|
|
|
elif args.increase:
|
2013-12-18 22:51:26 +00:00
|
|
|
data, entry = add_path(load(config), get_pwd(), args.increase)
|
2013-12-17 20:48:12 +00:00
|
|
|
save(config, data)
|
|
|
|
print_entry(entry)
|
2013-12-17 19:52:34 +00:00
|
|
|
elif args.purge:
|
2013-12-17 20:48:12 +00:00
|
|
|
old_data = load(config)
|
|
|
|
new_data = dictify(purge_missing_paths(entriefy(old_data)))
|
|
|
|
save(config, new_data)
|
2013-12-28 13:13:47 +00:00
|
|
|
print("Purged %d entries." % (len(old_data) - len(new_data)))
|
2013-12-17 19:52:34 +00:00
|
|
|
elif args.stat:
|
2013-12-17 20:48:12 +00:00
|
|
|
print_stats(load(config), config['data_path'])
|
2013-12-28 18:15:07 +00:00
|
|
|
elif not args.directory:
|
|
|
|
# default return value so calling shell functions have an argument
|
|
|
|
# to `cd` to
|
2014-01-07 17:44:44 +00:00
|
|
|
print('.')
|
2013-12-17 19:52:34 +00:00
|
|
|
else:
|
2013-12-18 17:08:05 +00:00
|
|
|
entries = entriefy(load(config))
|
|
|
|
needles = sanitize(args.directory)
|
2013-12-31 16:39:52 +00:00
|
|
|
tab_needle, tab_index, tab_path = \
|
|
|
|
get_tab_entry_info(first(needles), TAB_SEPARATOR)
|
|
|
|
|
|
|
|
if tab_path:
|
2014-01-07 17:44:44 +00:00
|
|
|
print(tab_path)
|
2013-12-31 16:39:52 +00:00
|
|
|
elif tab_index:
|
|
|
|
get_ith_path = lambda i, iterable: last(take(i, iterable)).path
|
2014-01-07 17:44:44 +00:00
|
|
|
print(get_ith_path(tab_index, find_matches(entries, tab_needle)))
|
2013-12-30 23:44:39 +00:00
|
|
|
else:
|
2014-01-07 17:44:44 +00:00
|
|
|
print(first(find_matches(entries, needles)).path)
|
2013-12-17 19:52:34 +00:00
|
|
|
|
|
|
|
return 0
|
2013-12-17 18:03:57 +00:00
|
|
|
|
2012-04-07 14:14:19 +00:00
|
|
|
|
2011-01-04 20:00:59 +00:00
|
|
|
if __name__ == "__main__":
|
2013-12-18 22:25:46 +00:00
|
|
|
sys.exit(main(parse_arguments()))
|