diff --git a/sandbox/grist/autocomplete_context.py b/sandbox/grist/autocomplete_context.py new file mode 100644 index 00000000..8951cb40 --- /dev/null +++ b/sandbox/grist/autocomplete_context.py @@ -0,0 +1,87 @@ +""" +Helper class for handling formula autocomplete. + +It's intended to use with rlcompleter.Completer. It allows finding global names using +lowercase searches, and adds function usage information to some results. +""" +import __builtin__ +import inspect +from collections import namedtuple + +# funcname is the function name, e.g. "MAX" +# argspec is the signature, e.g. "(arg, *more_args)" +# isgrist is a boolean for whether this function should be in Grist documentation. +Completion = namedtuple('Completion', ['funcname', 'argspec', 'isgrist']) + +def is_grist_func(func): + try: + return inspect.getmodule(func).__name__.startswith('functions.') + except Exception, e: + return e + +class AutocompleteContext(object): + def __init__(self, usercode_context): + # rlcompleter is case-sensitive. This is hard to work around while maintaining attribute + # lookups. As a middle ground, we only introduce lowercase versions of all global names. + self._context = { + key: value for key, value in usercode_context.iteritems() + # Don't propose unimplemented functions in autocomplete + if not (value and callable(value) and getattr(value, 'unimplemented', None)) + } + + # Prepare detailed Completion objects for functions where we can supply more info. + # TODO It would be nice to include builtin functions too, but getargspec doesn't work there. + self._functions = {} + for key, value in self._context.iteritems(): + if value and callable(value): + argspec = inspect.formatargspec(*inspect.getargspec(value)) + self._functions[key] = Completion(key, argspec, is_grist_func(value)) + + # Add in the important UserTable methods, with custom friendlier descriptions. + self._functions['.lookupOne'] = Completion('.lookupOne', '(colName=, ...)', True) + self._functions['.lookupRecords'] = Completion('.lookupRecords', '(colName=, ...)', True) + + # Remember the original name for each lowercase one. + self._lowercase = {} + for key in self._context: + lower = key.lower() + if lower == key: + continue + if lower not in self._context and lower not in __builtin__.__dict__: + self._lowercase[lower] = key + else: + # This is still good enough to find a match for, and translate back to the original. + # It allows rlcompleter to match e.g. 'max' against 'max', 'Max', and 'MAX' (using keys + # 'max', 'max*', and 'max**', respectively). + lower += '*' + if lower in self._lowercase: + lower += '*' + self._lowercase[lower] = key + + # Add the lowercase names to the context, and to the detailed completions in _functions. + for lower, key in self._lowercase.iteritems(): + self._context[lower] = self._context[key] + if key in self._functions: + self._functions[lower] = self._functions[key] + + def get_context(self): + return self._context + + def process_result(self, result): + # Callables are returned by rlcompleter with a trailing "(". + if result.endswith('('): + funcname = result[0:-1] + dot = funcname.rfind(".") + key = funcname[dot:] if dot >= 0 else funcname + completion = self._functions.get(key) + # Return the detailed completion if we have it, or the result string otherwise. + if completion: + # For methods (eg ".lookupOne"), use the original result as funcname (eg "Foo.lookupOne"). + if dot >= 0: + completion = completion._replace(funcname=funcname) + return tuple(completion) + + return result + + # Return translation from lowercase if there is one, or the result string otherwise. + return self._lowercase.get(result, result) diff --git a/sandbox/grist/engine.py b/sandbox/grist/engine.py index 9204298d..fe03c70a 100644 --- a/sandbox/grist/engine.py +++ b/sandbox/grist/engine.py @@ -1,3 +1,4 @@ +# pylint:disable=too-many-lines """ The data engine ties the code generated from the schema with the document data, and with dependency tracking. @@ -5,16 +6,17 @@ dependency tracking. import contextlib import itertools import re +import rlcompleter import sys +import time import traceback from collections import namedtuple, OrderedDict, Hashable from sortedcontainers import SortedSet -import time -import rlcompleter import acl import actions import action_obj +from autocomplete_context import AutocompleteContext from codebuilder import DOLLAR_REGEX import depend import docactions @@ -232,6 +234,9 @@ class Engine(object): # current cell. self._cell_required_error = None + # Initial empty context for autocompletions; we update it when we generate the usercode module. + self._autocomplete_context = AutocompleteContext({}) + def load_empty(self): """ Initialize an empty document, e.g. a newly-created one. @@ -1016,6 +1021,9 @@ class Engine(object): self._repl.locals.update(self.gencode.usercode.__dict__) self.gencode.usercode.__dict__.update(self._repl.locals) + # Update the context used for autocompletions. + self._autocomplete_context = AutocompleteContext(self.gencode.usercode.__dict__) + # TODO: Whenever schema changes, we need to adjust the ACL resources to remove or rename # tableIds and colIds. @@ -1205,8 +1213,9 @@ class Engine(object): if txt == '$': tweaked_txt = 'rec.' table = self.tables[table_id] - context = {'rec': table.sample_record} - context.update(self.gencode.usercode.__dict__) + + context = self._autocomplete_context.get_context() + context['rec'] = table.sample_record completer = rlcompleter.Completer(context) results = [] @@ -1219,11 +1228,12 @@ class Engine(object): break if skipped_completions.search(result): continue - results.append(result) + results.append(self._autocomplete_context.process_result(result)) + # If we changed the prefix (expanding the $ symbol) we now need to change it back. if tweaked_txt != txt: results = [txt + result[len(tweaked_txt):] for result in results] - results.sort() + results.sort(key=lambda r: r[0] if type(r) == tuple else r) return results def _get_undo_checkpoint(self): diff --git a/sandbox/grist/functions/info.py b/sandbox/grist/functions/info.py index bcac680f..80ecf168 100644 --- a/sandbox/grist/functions/info.py +++ b/sandbox/grist/functions/info.py @@ -9,9 +9,11 @@ import re import column from functions import date # pylint: disable=import-error +from functions.unimplemented import unimplemented from usertypes import AltText # pylint: disable=import-error from records import Record, RecordSet +@unimplemented def ISBLANK(value): """ Returns whether a value refers to an empty cell. It isn't implemented in Grist. To check for an @@ -486,6 +488,7 @@ def NA(): return float('nan') +@unimplemented def TYPE(value): """ Returns a number associated with the type of data passed into the function. This is not @@ -493,6 +496,7 @@ def TYPE(value): """ raise NotImplementedError() +@unimplemented def CELL(info_type, reference): """ Returns the requested information about the specified cell. This is not implemented in Grist diff --git a/sandbox/grist/functions/lookup.py b/sandbox/grist/functions/lookup.py index 796a48e0..ffd409d6 100644 --- a/sandbox/grist/functions/lookup.py +++ b/sandbox/grist/functions/lookup.py @@ -1,57 +1,72 @@ # pylint: disable=redefined-builtin, line-too-long +from unimplemented import unimplemented +@unimplemented def ADDRESS(row, column, absolute_relative_mode, use_a1_notation, sheet): """Returns a cell reference as a string.""" raise NotImplementedError() +@unimplemented def CHOOSE(index, choice1, choice2): """Returns an element from a list of choices based on index.""" raise NotImplementedError() +@unimplemented def COLUMN(cell_reference=None): """Returns the column number of a specified cell, with `A=1`.""" raise NotImplementedError() +@unimplemented def COLUMNS(range): """Returns the number of columns in a specified array or range.""" raise NotImplementedError() +@unimplemented def GETPIVOTDATA(value_name, any_pivot_table_cell, original_column_1, pivot_item_1=None, *args): """Extracts an aggregated value from a pivot table that corresponds to the specified row and column headings.""" raise NotImplementedError() +@unimplemented def HLOOKUP(search_key, range, index, is_sorted): """Horizontal lookup. Searches across the first row of a range for a key and returns the value of a specified cell in the column found.""" raise NotImplementedError() +@unimplemented def HYPERLINK(url, link_label): """Creates a hyperlink inside a cell.""" raise NotImplementedError() +@unimplemented def INDEX(reference, row, column): """Returns the content of a cell, specified by row and column offset.""" raise NotImplementedError() +@unimplemented def INDIRECT(cell_reference_as_string): """Returns a cell reference specified by a string.""" raise NotImplementedError() +@unimplemented def LOOKUP(search_key, search_range_or_search_result_array, result_range=None): """Looks through a row or column for a key and returns the value of the cell in a result range located in the same position as the search row or column.""" raise NotImplementedError() +@unimplemented def MATCH(search_key, range, search_type): """Returns the relative position of an item in a range that matches a specified value.""" raise NotImplementedError() +@unimplemented def OFFSET(cell_reference, offset_rows, offset_columns, height, width): """Returns a range reference shifted a specified number of rows and columns from a starting cell reference.""" raise NotImplementedError() +@unimplemented def ROW(cell_reference): """Returns the row number of a specified cell.""" raise NotImplementedError() +@unimplemented def ROWS(range): """Returns the number of rows in a specified array or range.""" raise NotImplementedError() diff --git a/sandbox/grist/functions/math.py b/sandbox/grist/functions/math.py index 1970ec65..9a8d6cad 100644 --- a/sandbox/grist/functions/math.py +++ b/sandbox/grist/functions/math.py @@ -7,6 +7,7 @@ import operator import random from functions.info import ISNUMBER, ISLOGICAL +from functions.unimplemented import unimplemented import roman # Iterates through elements of iterable arguments, or through individual args when not iterable. @@ -727,6 +728,7 @@ def SQRTPI(value): """ return _math.sqrt(_math.pi * value) +@unimplemented def SUBTOTAL(function_code, range1, range2): """ Returns a subtotal for a vertical range of cells using a specified aggregation function. @@ -755,12 +757,14 @@ def SUM(value1, *more_values): return sum(_chain_numeric_a(value1, *more_values)) +@unimplemented def SUMIF(records, criterion, sum_range): """ Returns a conditional sum across a range. """ raise NotImplementedError() +@unimplemented def SUMIFS(sum_range, criteria_range1, criterion1, *args): """ Returns the sum of a range depending on multiple criteria. @@ -782,6 +786,7 @@ def SUMPRODUCT(array1, *more_arrays): """ return sum(reduce(operator.mul, values) for values in itertools.izip(array1, *more_arrays)) +@unimplemented def SUMSQ(value1, value2): """ Returns the sum of the squares of a series of numbers and/or cells. diff --git a/sandbox/grist/functions/stats.py b/sandbox/grist/functions/stats.py index f13e9a39..a4f56f8a 100644 --- a/sandbox/grist/functions/stats.py +++ b/sandbox/grist/functions/stats.py @@ -3,7 +3,7 @@ from math import _chain, _chain_numeric, _chain_numeric_a from info import ISNUMBER, ISLOGICAL from date import DATE # pylint: disable=unused-import - +from unimplemented import unimplemented def _average(iterable): total, count = 0.0, 0 @@ -24,6 +24,7 @@ def _default_if_empty(iterable, default): yield default +@unimplemented def AVEDEV(value1, value2): """Calculates the average of the magnitudes of deviations of data from a dataset's mean.""" raise NotImplementedError() @@ -95,14 +96,17 @@ def AVERAGE_WEIGHTED(pairs): return sum_value / sum_weight +@unimplemented def AVERAGEIF(criteria_range, criterion, average_range=None): """Returns the average of a range depending on criteria.""" raise NotImplementedError() +@unimplemented def AVERAGEIFS(average_range, criteria_range1, criterion1, *args): """Returns the average of a range depending on multiple criteria.""" raise NotImplementedError() +@unimplemented def BINOMDIST(num_successes, num_trials, prob_success, cumulative): """ Calculates the probability of drawing a certain number of successes (or a maximum number of @@ -111,10 +115,12 @@ def BINOMDIST(num_successes, num_trials, prob_success, cumulative): """ raise NotImplementedError() +@unimplemented def CONFIDENCE(alpha, standard_deviation, pop_size): """Calculates the width of half the confidence interval for a normal distribution.""" raise NotImplementedError() +@unimplemented def CORREL(data_y, data_x): """Calculates r, the Pearson product-moment correlation coefficient of a dataset.""" raise NotImplementedError() @@ -156,22 +162,27 @@ def COUNTA(value, *more_values): return sum(1 for v in _chain(value, *more_values)) +@unimplemented def COVAR(data_y, data_x): """Calculates the covariance of a dataset.""" raise NotImplementedError() +@unimplemented def CRITBINOM(num_trials, prob_success, target_prob): """Calculates the smallest value for which the cumulative binomial distribution is greater than or equal to a specified criteria.""" raise NotImplementedError() +@unimplemented def DEVSQ(value1, value2): """Calculates the sum of squares of deviations based on a sample.""" raise NotImplementedError() +@unimplemented def EXPONDIST(x, lambda_, cumulative): """Returns the value of the exponential distribution function with a specified lambda at a specified value.""" raise NotImplementedError() +@unimplemented def F_DIST(x, degrees_freedom1, degrees_freedom2, cumulative): """ Calculates the left-tailed F probability distribution (degree of diversity) for two data sets @@ -180,6 +191,7 @@ def F_DIST(x, degrees_freedom1, degrees_freedom2, cumulative): """ raise NotImplementedError() +@unimplemented def F_DIST_RT(x, degrees_freedom1, degrees_freedom2): """ Calculates the right-tailed F probability distribution (degree of diversity) for two data sets @@ -188,6 +200,7 @@ def F_DIST_RT(x, degrees_freedom1, degrees_freedom2): """ raise NotImplementedError() +@unimplemented def FDIST(x, degrees_freedom1, degrees_freedom2): """ Calculates the right-tailed F probability distribution (degree of diversity) for two data sets @@ -196,46 +209,57 @@ def FDIST(x, degrees_freedom1, degrees_freedom2): """ raise NotImplementedError() +@unimplemented def FISHER(value): """Returns the Fisher transformation of a specified value.""" raise NotImplementedError() +@unimplemented def FISHERINV(value): """Returns the inverse Fisher transformation of a specified value.""" raise NotImplementedError() +@unimplemented def FORECAST(x, data_y, data_x): """Calculates the expected y-value for a specified x based on a linear regression of a dataset.""" raise NotImplementedError() +@unimplemented def GEOMEAN(value1, value2): """Calculates the geometric mean of a dataset.""" raise NotImplementedError() +@unimplemented def HARMEAN(value1, value2): """Calculates the harmonic mean of a dataset.""" raise NotImplementedError() +@unimplemented def HYPGEOMDIST(num_successes, num_draws, successes_in_pop, pop_size): """Calculates the probability of drawing a certain number of successes in a certain number of tries given a population of a certain size containing a certain number of successes, without replacement of draws.""" raise NotImplementedError() +@unimplemented def INTERCEPT(data_y, data_x): """Calculates the y-value at which the line resulting from linear regression of a dataset will intersect the y-axis (x=0).""" raise NotImplementedError() +@unimplemented def KURT(value1, value2): """Calculates the kurtosis of a dataset, which describes the shape, and in particular the "peakedness" of that dataset.""" raise NotImplementedError() +@unimplemented def LARGE(data, n): """Returns the nth largest element from a data set, where n is user-defined.""" raise NotImplementedError() +@unimplemented def LOGINV(x, mean, standard_deviation): """Returns the value of the inverse log-normal cumulative distribution with given mean and standard deviation at a specified value.""" raise NotImplementedError() +@unimplemented def LOGNORMDIST(x, mean, standard_deviation): """Returns the value of the log-normal cumulative distribution with given mean and standard deviation at a specified value.""" raise NotImplementedError() @@ -364,14 +388,17 @@ def MINA(value, *more_values): return min(_default_if_empty(_chain_numeric_a(value, *more_values), 0)) +@unimplemented def MODE(value1, value2): """Returns the most commonly occurring value in a dataset.""" raise NotImplementedError() +@unimplemented def NEGBINOMDIST(num_failures, num_successes, prob_success): """Calculates the probability of drawing a certain number of failures before a certain number of successes given a probability of success in independent trials.""" raise NotImplementedError() +@unimplemented def NORMDIST(x, mean, standard_deviation, cumulative): """ Returns the value of the normal distribution function (or normal cumulative distribution @@ -379,42 +406,52 @@ def NORMDIST(x, mean, standard_deviation, cumulative): """ raise NotImplementedError() +@unimplemented def NORMINV(x, mean, standard_deviation): """Returns the value of the inverse normal distribution function for a specified value, mean, and standard deviation.""" raise NotImplementedError() +@unimplemented def NORMSDIST(x): """Returns the value of the standard normal cumulative distribution function for a specified value.""" raise NotImplementedError() +@unimplemented def NORMSINV(x): """Returns the value of the inverse standard normal distribution function for a specified value.""" raise NotImplementedError() +@unimplemented def PEARSON(data_y, data_x): """Calculates r, the Pearson product-moment correlation coefficient of a dataset.""" raise NotImplementedError() +@unimplemented def PERCENTILE(data, percentile): """Returns the value at a given percentile of a dataset.""" raise NotImplementedError() +@unimplemented def PERCENTRANK(data, value, significant_digits=None): """Returns the percentage rank (percentile) of a specified value in a dataset.""" raise NotImplementedError() +@unimplemented def PERCENTRANK_EXC(data, value, significant_digits=None): """Returns the percentage rank (percentile) from 0 to 1 exclusive of a specified value in a dataset.""" raise NotImplementedError() +@unimplemented def PERCENTRANK_INC(data, value, significant_digits=None): """Returns the percentage rank (percentile) from 0 to 1 inclusive of a specified value in a dataset.""" raise NotImplementedError() +@unimplemented def PERMUT(n, k): """Returns the number of ways to choose some number of objects from a pool of a given size of objects, considering order.""" raise NotImplementedError() +@unimplemented def POISSON(x, mean, cumulative): """ Returns the value of the Poisson distribution function (or Poisson cumulative distribution @@ -422,42 +459,52 @@ def POISSON(x, mean, cumulative): """ raise NotImplementedError() +@unimplemented def PROB(data, probabilities, low_limit, high_limit=None): """Given a set of values and corresponding probabilities, calculates the probability that a value chosen at random falls between two limits.""" raise NotImplementedError() +@unimplemented def QUARTILE(data, quartile_number): """Returns a value nearest to a specified quartile of a dataset.""" raise NotImplementedError() +@unimplemented def RANK(value, data, is_ascending=None): """Returns the rank of a specified value in a dataset.""" raise NotImplementedError() +@unimplemented def RANK_AVG(value, data, is_ascending=None): """Returns the rank of a specified value in a dataset. If there is more than one entry of the same value in the dataset, the average rank of the entries will be returned.""" raise NotImplementedError() +@unimplemented def RANK_EQ(value, data, is_ascending=None): """Returns the rank of a specified value in a dataset. If there is more than one entry of the same value in the dataset, the top rank of the entries will be returned.""" raise NotImplementedError() +@unimplemented def RSQ(data_y, data_x): """Calculates the square of r, the Pearson product-moment correlation coefficient of a dataset.""" raise NotImplementedError() +@unimplemented def SKEW(value1, value2): """Calculates the skewness of a dataset, which describes the symmetry of that dataset about the mean.""" raise NotImplementedError() +@unimplemented def SLOPE(data_y, data_x): """Calculates the slope of the line resulting from linear regression of a dataset.""" raise NotImplementedError() +@unimplemented def SMALL(data, n): """Returns the nth smallest element from a data set, where n is user-defined.""" raise NotImplementedError() +@unimplemented def STANDARDIZE(value, mean, standard_deviation): """Calculates the normalized equivalent of a random variable given mean and standard deviation of the distribution.""" raise NotImplementedError() @@ -559,50 +606,62 @@ def STDEVPA(value, *more_values): """ return _stddev(list(_chain_numeric_a(value, *more_values)), 0) +@unimplemented def STEYX(data_y, data_x): """Calculates the standard error of the predicted y-value for each x in the regression of a dataset.""" raise NotImplementedError() +@unimplemented def T_INV(probability, degrees_freedom): """Calculates the negative inverse of the one-tailed TDIST function.""" raise NotImplementedError() +@unimplemented def T_INV_2T(probability, degrees_freedom): """Calculates the inverse of the two-tailed TDIST function.""" raise NotImplementedError() +@unimplemented def TDIST(x, degrees_freedom, tails): """Calculates the probability for Student's t-distribution with a given input (x).""" raise NotImplementedError() +@unimplemented def TINV(probability, degrees_freedom): """Calculates the inverse of the two-tailed TDIST function.""" raise NotImplementedError() +@unimplemented def TRIMMEAN(data, exclude_proportion): """Calculates the mean of a dataset excluding some proportion of data from the high and low ends of the dataset.""" raise NotImplementedError() +@unimplemented def TTEST(range1, range2, tails, type): """Returns the probability associated with t-test. Determines whether two samples are likely to have come from the same two underlying populations that have the same mean.""" raise NotImplementedError() +@unimplemented def VAR(value1, value2): """Calculates the variance based on a sample.""" raise NotImplementedError() +@unimplemented def VARA(value1, value2): """Calculates an estimate of variance based on a sample, setting text to the value `0`.""" raise NotImplementedError() +@unimplemented def VARP(value1, value2): """Calculates the variance based on an entire population.""" raise NotImplementedError() +@unimplemented def VARPA(value1, value2): """Calculates the variance based on an entire population, setting text to the value `0`.""" raise NotImplementedError() +@unimplemented def WEIBULL(x, shape, scale, cumulative): """ Returns the value of the Weibull distribution function (or Weibull cumulative distribution @@ -610,6 +669,7 @@ def WEIBULL(x, shape, scale, cumulative): """ raise NotImplementedError() +@unimplemented def ZTEST(data, value, standard_deviation): """Returns the two-tailed P-value of a Z-test with standard distribution.""" raise NotImplementedError() diff --git a/sandbox/grist/functions/text.py b/sandbox/grist/functions/text.py index 5d9655c4..cfec1200 100644 --- a/sandbox/grist/functions/text.py +++ b/sandbox/grist/functions/text.py @@ -5,6 +5,7 @@ import dateutil.parser import numbers import re +from unimplemented import unimplemented from usertypes import AltText # pylint: disable=import-error def CHAR(table_number): @@ -499,6 +500,7 @@ def T(value): str(value) if isinstance(value, AltText) else "") +@unimplemented def TEXT(number, format_type): """ Converts a number into text according to a specified format. It is not yet implemented in diff --git a/sandbox/grist/functions/unimplemented.py b/sandbox/grist/functions/unimplemented.py new file mode 100644 index 00000000..2ca29e2a --- /dev/null +++ b/sandbox/grist/functions/unimplemented.py @@ -0,0 +1,11 @@ +""" +Decorator that marks functions as not implemented. It sets func.unimplemented=True. +Usage: + +@unimplemented +def func(...): + raise NotImplemented +""" +def unimplemented(func): + func.unimplemented = True + return func diff --git a/sandbox/grist/test_completion.py b/sandbox/grist/test_completion.py index ed4e5c3d..9aac7330 100644 --- a/sandbox/grist/test_completion.py +++ b/sandbox/grist/test_completion.py @@ -1,5 +1,4 @@ import testsamples -import testutil import test_engine class TestCompletion(test_engine.EngineTestCase): @@ -24,18 +23,63 @@ class TestCompletion(test_engine.EngineTestCase): def test_function(self): self.assertEqual(self.engine.autocomplete("MEDI", "Address"), - ["MEDIAN("]) + [('MEDIAN', '(value, *more_values)', True)]) + self.assertEqual(self.engine.autocomplete("ma", "Address"), [ + ('MAX', '(value, *more_values)', True), + ('MAXA', '(value, *more_values)', True), + 'map(', + 'math', + 'max(', + ]) def test_member(self): self.assertEqual(self.engine.autocomplete("datetime.tz", "Address"), ["datetime.tzinfo("]) + def test_case_insensitive(self): + self.assertEqual(self.engine.autocomplete("medi", "Address"), + [('MEDIAN', '(value, *more_values)', True)]) + self.assertEqual(self.engine.autocomplete("std", "Address"), [ + ('STDEV', '(value, *more_values)', True), + ('STDEVA', '(value, *more_values)', True), + ('STDEVP', '(value, *more_values)', True), + ('STDEVPA', '(value, *more_values)', True) + ]) + self.assertEqual(self.engine.autocomplete("stu", "Address"), + ["Students"]) + + # Add a table name whose lowercase version conflicts with a builtin. + self.apply_user_action(['AddTable', 'Max', []]) + self.assertEqual(self.engine.autocomplete("max", "Address"), [ + ('MAX', '(value, *more_values)', True), + ('MAXA', '(value, *more_values)', True), + 'Max', + 'max(', + ]) + self.assertEqual(self.engine.autocomplete("MAX", "Address"), [ + ('MAX', '(value, *more_values)', True), + ('MAXA', '(value, *more_values)', True), + ]) + + def test_suggest_globals_and_tables(self): # Should suggest globals and table names. - self.assertEqual(self.engine.autocomplete("ME", "Address"), ['MEDIAN(']) + self.assertEqual(self.engine.autocomplete("ME", "Address"), + [('MEDIAN', '(value, *more_values)', True)]) self.assertEqual(self.engine.autocomplete("Ad", "Address"), ['Address']) - self.assertGreaterEqual(set(self.engine.autocomplete("S", "Address")), - {'Schools', 'Students', 'SUM(', 'STDEV('}) + self.assertGreaterEqual(set(self.engine.autocomplete("S", "Address")), { + 'Schools', + 'Students', + ('SUM', '(value1, *more_values)', True), + ('STDEV', '(value, *more_values)', True), + }) + self.assertGreaterEqual(set(self.engine.autocomplete("s", "Address")), { + 'Schools', + 'Students', + 'sum(', + ('SUM', '(value1, *more_values)', True), + ('STDEV', '(value, *more_values)', True), + }) self.assertEqual(self.engine.autocomplete("Addr", "Schools"), ['Address']) def test_suggest_columns(self): @@ -56,11 +100,16 @@ class TestCompletion(test_engine.EngineTestCase): def test_suggest_lookup_methods(self): # Should suggest lookup formulas for tables. - self.assertEqual(self.engine.autocomplete("Address.", "Students"), - ['Address.all', 'Address.lookupOne(', 'Address.lookupRecords(']) + self.assertEqual(self.engine.autocomplete("Address.", "Students"), [ + 'Address.all', + ('Address.lookupOne', '(colName=, ...)', True), + ('Address.lookupRecords', '(colName=, ...)', True), + ]) - self.assertEqual(self.engine.autocomplete("Address.lookup", "Students"), - ['Address.lookupOne(', 'Address.lookupRecords(']) + self.assertEqual(self.engine.autocomplete("Address.lookup", "Students"), [ + ('Address.lookupOne', '(colName=, ...)', True), + ('Address.lookupRecords', '(colName=, ...)', True), + ]) def test_suggest_column_type_methods(self): # Should treat columns as correct types.