mirror of
https://github.com/gristlabs/grist-core.git
synced 2024-10-27 20:44:07 +00:00
(core) Simple Python 3 compatibility changes
Summary: Changes that move towards python 3 compatibility that are easy to review without much thought Test Plan: The tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2873
This commit is contained in:
parent
cc04c6481a
commit
16f297a250
@ -24,29 +24,29 @@ def get_ts_type(col_type):
|
|||||||
return _ts_types.get(col_type, "CellValue")
|
return _ts_types.get(col_type, "CellValue")
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
print """
|
print("""
|
||||||
/*** THIS FILE IS AUTO-GENERATED BY %s ***/
|
/*** THIS FILE IS AUTO-GENERATED BY %s ***/
|
||||||
// tslint:disable:object-literal-key-quotes
|
// tslint:disable:object-literal-key-quotes
|
||||||
|
|
||||||
export const schema = {
|
export const schema = {
|
||||||
""" % __file__
|
""" % __file__)
|
||||||
|
|
||||||
for table in schema.schema_create_actions():
|
for table in schema.schema_create_actions():
|
||||||
print ' "%s": {' % table.table_id
|
print(' "%s": {' % table.table_id)
|
||||||
for column in table.columns:
|
for column in table.columns:
|
||||||
print ' %-20s: "%s",' % (column['id'], column['type'])
|
print(' %-20s: "%s",' % (column['id'], column['type']))
|
||||||
print ' },\n'
|
print(' },\n')
|
||||||
|
|
||||||
print """};
|
print("""};
|
||||||
|
|
||||||
export interface SchemaTypes {
|
export interface SchemaTypes {
|
||||||
"""
|
""")
|
||||||
for table in schema.schema_create_actions():
|
for table in schema.schema_create_actions():
|
||||||
print ' "%s": {' % table.table_id
|
print(' "%s": {' % table.table_id)
|
||||||
for column in table.columns:
|
for column in table.columns:
|
||||||
print ' %s: %s;' % (column['id'], get_ts_type(column['type']))
|
print(' %s: %s;' % (column['id'], get_ts_type(column['type'])))
|
||||||
print ' };\n'
|
print(' };\n')
|
||||||
print "}"
|
print("}")
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -67,7 +67,7 @@ def prepare_acl_table_renames(docmodel, useractions, table_renames_dict):
|
|||||||
if rule_info.get("tableId") in table_renames_dict:
|
if rule_info.get("tableId") in table_renames_dict:
|
||||||
rule_info["tableId"] = table_renames_dict[rule_info.get("tableId")]
|
rule_info["tableId"] = table_renames_dict[rule_info.get("tableId")]
|
||||||
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
|
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
log.warn("Error examining aclRule: %s" % (e,))
|
log.warn("Error examining aclRule: %s" % (e,))
|
||||||
|
|
||||||
def do_renames():
|
def do_renames():
|
||||||
@ -103,7 +103,7 @@ def prepare_acl_col_renames(docmodel, useractions, col_renames_dict):
|
|||||||
if new_col_id:
|
if new_col_id:
|
||||||
rule_info["lookupColId"] = new_col_id
|
rule_info["lookupColId"] = new_col_id
|
||||||
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
|
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
log.warn("Error examining aclRule: %s" % (e,))
|
log.warn("Error examining aclRule: %s" % (e,))
|
||||||
|
|
||||||
# Go through again checking if anything in ACL formulas is affected by the rename.
|
# Go through again checking if anything in ACL formulas is affected by the rename.
|
||||||
|
@ -104,6 +104,11 @@ class _TreeConverter(ast.NodeVisitor):
|
|||||||
return ["Const", named_constants[node.id]]
|
return ["Const", named_constants[node.id]]
|
||||||
return ["Name", node.id]
|
return ["Name", node.id]
|
||||||
|
|
||||||
|
def visit_Constant(self, node):
|
||||||
|
return ["Const", node.value]
|
||||||
|
|
||||||
|
visit_NameConstant = visit_Constant
|
||||||
|
|
||||||
def visit_Attribute(self, node):
|
def visit_Attribute(self, node):
|
||||||
return ["Attr", self.visit(node.value), node.attr]
|
return ["Attr", self.visit(node.value), node.attr]
|
||||||
|
|
||||||
|
@ -54,9 +54,9 @@ class ActionGroup(object):
|
|||||||
|
|
||||||
def get_repr(self):
|
def get_repr(self):
|
||||||
return {
|
return {
|
||||||
"calc": map(actions.get_action_repr, self.calc),
|
"calc": [actions.get_action_repr(a) for a in self.calc],
|
||||||
"stored": map(actions.get_action_repr, self.stored),
|
"stored": [actions.get_action_repr(a) for a in self.stored],
|
||||||
"undo": map(actions.get_action_repr, self.undo),
|
"undo": [actions.get_action_repr(a) for a in self.undo],
|
||||||
"direct": self.direct,
|
"direct": self.direct,
|
||||||
"retValues": self.retValues
|
"retValues": self.retValues
|
||||||
}
|
}
|
||||||
@ -64,9 +64,9 @@ class ActionGroup(object):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def from_json_obj(cls, data):
|
def from_json_obj(cls, data):
|
||||||
ag = ActionGroup()
|
ag = ActionGroup()
|
||||||
ag.calc = map(actions.action_from_repr, data.get('calc', []))
|
ag.calc = [actions.action_from_repr(a) for a in data.get('calc', [])]
|
||||||
ag.stored = map(actions.action_from_repr, data.get('stored', []))
|
ag.stored = [actions.action_from_repr(a) for a in data.get('stored', [])]
|
||||||
ag.undo = map(actions.action_from_repr, data.get('undo', []))
|
ag.undo = [actions.action_from_repr(a) for a in data.get('undo', [])]
|
||||||
ag.retValues = data.get('retValues', [])
|
ag.retValues = data.get('retValues', [])
|
||||||
return ag
|
return ag
|
||||||
|
|
||||||
|
@ -4,6 +4,8 @@ It's used for collecting calculated values for formula columns.
|
|||||||
"""
|
"""
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
from objtypes import equal_encoding
|
from objtypes import equal_encoding
|
||||||
|
|
||||||
@ -76,7 +78,7 @@ class ActionSummary(object):
|
|||||||
"""
|
"""
|
||||||
if not column_delta:
|
if not column_delta:
|
||||||
return
|
return
|
||||||
full_row_ids = sorted(r for r, (before, after) in column_delta.iteritems()
|
full_row_ids = sorted(r for r, (before, after) in six.iteritems(column_delta)
|
||||||
if not equal_encoding(before, after))
|
if not equal_encoding(before, after))
|
||||||
|
|
||||||
defunct = is_defunct(table_id) or is_defunct(col_id)
|
defunct = is_defunct(table_id) or is_defunct(col_id)
|
||||||
|
@ -8,6 +8,8 @@ When communicating with Node, docActions are represented as arrays [actionName,
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import inspect
|
import inspect
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import objtypes
|
import objtypes
|
||||||
|
|
||||||
def _eq_with_type(self, other):
|
def _eq_with_type(self, other):
|
||||||
@ -86,7 +88,7 @@ def _add_simplify(SingleActionType, BulkActionType):
|
|||||||
else:
|
else:
|
||||||
def get_first(self):
|
def get_first(self):
|
||||||
return SingleActionType(self.table_id, self.row_ids[0],
|
return SingleActionType(self.table_id, self.row_ids[0],
|
||||||
{ key: col[0] for key, col in self.columns.iteritems()})
|
{ key: col[0] for key, col in six.iteritems(self.columns)})
|
||||||
def simplify(self):
|
def simplify(self):
|
||||||
return None if not self.row_ids else (get_first(self) if len(self.row_ids) == 1 else self)
|
return None if not self.row_ids else (get_first(self) if len(self.row_ids) == 1 else self)
|
||||||
|
|
||||||
@ -128,7 +130,7 @@ def convert_recursive_helper(converter, data):
|
|||||||
return convert_recursive_helper(my_convert, data)
|
return convert_recursive_helper(my_convert, data)
|
||||||
"""
|
"""
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
return {converter(k): converter(v) for k, v in data.iteritems()}
|
return {converter(k): converter(v) for k, v in six.iteritems(data)}
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
return [converter(el) for el in data]
|
return [converter(el) for el in data]
|
||||||
elif isinstance(data, tuple):
|
elif isinstance(data, tuple):
|
||||||
@ -142,10 +144,12 @@ def convert_action_values(converter, action):
|
|||||||
"""
|
"""
|
||||||
if isinstance(action, (AddRecord, UpdateRecord)):
|
if isinstance(action, (AddRecord, UpdateRecord)):
|
||||||
return type(action)(action.table_id, action.row_id,
|
return type(action)(action.table_id, action.row_id,
|
||||||
{k: converter(v) for k, v in action.columns.iteritems()})
|
{k: converter(v) for k, v in six.iteritems(action.columns)})
|
||||||
if isinstance(action, (BulkAddRecord, BulkUpdateRecord, ReplaceTableData, TableData)):
|
if isinstance(action, (BulkAddRecord, BulkUpdateRecord, ReplaceTableData, TableData)):
|
||||||
return type(action)(action.table_id, action.row_ids,
|
return type(action)(
|
||||||
{k: map(converter, v) for k, v in action.columns.iteritems()})
|
action.table_id, action.row_ids,
|
||||||
|
{k: [converter(value) for value in values] for k, values in six.iteritems(action.columns)}
|
||||||
|
)
|
||||||
return action
|
return action
|
||||||
|
|
||||||
def convert_recursive_in_action(converter, data):
|
def convert_recursive_in_action(converter, data):
|
||||||
@ -173,7 +177,10 @@ def decode_bulk_values(bulk_values, decoder=objtypes.decode_object):
|
|||||||
Decode objects in values of the form {col_id: array_of_values}, as present in bulk DocActions
|
Decode objects in values of the form {col_id: array_of_values}, as present in bulk DocActions
|
||||||
and UserActions.
|
and UserActions.
|
||||||
"""
|
"""
|
||||||
return {k: map(decoder, v) for (k, v) in bulk_values.iteritems()}
|
return {
|
||||||
|
k: [decoder(value) for value in values]
|
||||||
|
for k, values in six.iteritems(bulk_values)
|
||||||
|
}
|
||||||
|
|
||||||
def transpose_bulk_action(bulk_action):
|
def transpose_bulk_action(bulk_action):
|
||||||
"""
|
"""
|
||||||
|
@ -4,10 +4,12 @@ Helper class for handling formula autocomplete.
|
|||||||
It's intended to use with rlcompleter.Completer. It allows finding global names using
|
It's intended to use with rlcompleter.Completer. It allows finding global names using
|
||||||
lowercase searches, and adds function usage information to some results.
|
lowercase searches, and adds function usage information to some results.
|
||||||
"""
|
"""
|
||||||
import __builtin__
|
from six.moves import builtins
|
||||||
import inspect
|
import inspect
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
# funcname is the function name, e.g. "MAX"
|
# funcname is the function name, e.g. "MAX"
|
||||||
# argspec is the signature, e.g. "(arg, *more_args)"
|
# argspec is the signature, e.g. "(arg, *more_args)"
|
||||||
# isgrist is a boolean for whether this function should be in Grist documentation.
|
# isgrist is a boolean for whether this function should be in Grist documentation.
|
||||||
@ -16,7 +18,7 @@ Completion = namedtuple('Completion', ['funcname', 'argspec', 'isgrist'])
|
|||||||
def is_grist_func(func):
|
def is_grist_func(func):
|
||||||
try:
|
try:
|
||||||
return inspect.getmodule(func).__name__.startswith('functions.')
|
return inspect.getmodule(func).__name__.startswith('functions.')
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
return e
|
return e
|
||||||
|
|
||||||
class AutocompleteContext(object):
|
class AutocompleteContext(object):
|
||||||
@ -24,7 +26,7 @@ class AutocompleteContext(object):
|
|||||||
# rlcompleter is case-sensitive. This is hard to work around while maintaining attribute
|
# rlcompleter is case-sensitive. This is hard to work around while maintaining attribute
|
||||||
# lookups. As a middle ground, we only introduce lowercase versions of all global names.
|
# lookups. As a middle ground, we only introduce lowercase versions of all global names.
|
||||||
self._context = {
|
self._context = {
|
||||||
key: value for key, value in usercode_context.iteritems()
|
key: value for key, value in six.iteritems(usercode_context)
|
||||||
# Don't propose unimplemented functions in autocomplete
|
# Don't propose unimplemented functions in autocomplete
|
||||||
if not (value and callable(value) and getattr(value, 'unimplemented', None))
|
if not (value and callable(value) and getattr(value, 'unimplemented', None))
|
||||||
}
|
}
|
||||||
@ -32,7 +34,7 @@ class AutocompleteContext(object):
|
|||||||
# Prepare detailed Completion objects for functions where we can supply more info.
|
# Prepare detailed Completion objects for functions where we can supply more info.
|
||||||
# TODO It would be nice to include builtin functions too, but getargspec doesn't work there.
|
# TODO It would be nice to include builtin functions too, but getargspec doesn't work there.
|
||||||
self._functions = {}
|
self._functions = {}
|
||||||
for key, value in self._context.iteritems():
|
for key, value in six.iteritems(self._context):
|
||||||
if value and callable(value):
|
if value and callable(value):
|
||||||
argspec = inspect.formatargspec(*inspect.getargspec(value))
|
argspec = inspect.formatargspec(*inspect.getargspec(value))
|
||||||
self._functions[key] = Completion(key, argspec, is_grist_func(value))
|
self._functions[key] = Completion(key, argspec, is_grist_func(value))
|
||||||
@ -47,7 +49,7 @@ class AutocompleteContext(object):
|
|||||||
lower = key.lower()
|
lower = key.lower()
|
||||||
if lower == key:
|
if lower == key:
|
||||||
continue
|
continue
|
||||||
if lower not in self._context and lower not in __builtin__.__dict__:
|
if lower not in self._context and lower not in builtins.__dict__:
|
||||||
self._lowercase[lower] = key
|
self._lowercase[lower] = key
|
||||||
else:
|
else:
|
||||||
# This is still good enough to find a match for, and translate back to the original.
|
# This is still good enough to find a match for, and translate back to the original.
|
||||||
@ -59,7 +61,7 @@ class AutocompleteContext(object):
|
|||||||
self._lowercase[lower] = key
|
self._lowercase[lower] = key
|
||||||
|
|
||||||
# Add the lowercase names to the context, and to the detailed completions in _functions.
|
# Add the lowercase names to the context, and to the detailed completions in _functions.
|
||||||
for lower, key in self._lowercase.iteritems():
|
for lower, key in six.iteritems(self._lowercase):
|
||||||
self._context[lower] = self._context[key]
|
self._context[lower] = self._context[key]
|
||||||
if key in self._functions:
|
if key in self._functions:
|
||||||
self._functions[lower] = self._functions[key]
|
self._functions[lower] = self._functions[key]
|
||||||
|
@ -127,7 +127,7 @@ def _create_syntax_error_code(builder, input_text, err):
|
|||||||
def infer(node):
|
def infer(node):
|
||||||
try:
|
try:
|
||||||
return next(node.infer(), None)
|
return next(node.infer(), None)
|
||||||
except astroid.exceptions.InferenceError, e:
|
except astroid.exceptions.InferenceError as e:
|
||||||
return "InferenceError on %r: %r" % (node, e)
|
return "InferenceError on %r: %r" % (node, e)
|
||||||
|
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ def _is_table(node):
|
|||||||
"""
|
"""
|
||||||
Return true if obj is a class defining a user table.
|
Return true if obj is a class defining a user table.
|
||||||
"""
|
"""
|
||||||
return (isinstance(node, astroid.nodes.Class) and node.decorators and
|
return (isinstance(node, astroid.nodes.ClassDef) and node.decorators and
|
||||||
node.decorators.nodes[0].as_string() == 'grist.UserTable')
|
node.decorators.nodes[0].as_string() == 'grist.UserTable')
|
||||||
|
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ class InferReferenceFormula(InferenceTip):
|
|||||||
Inference helper to treat functions decorated with `grist.formulaType(grist.Reference("Foo"))`
|
Inference helper to treat functions decorated with `grist.formulaType(grist.Reference("Foo"))`
|
||||||
as returning instances of table `Foo`.
|
as returning instances of table `Foo`.
|
||||||
"""
|
"""
|
||||||
node_class = astroid.nodes.Function
|
node_class = astroid.nodes.FunctionDef
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def filter(cls, node):
|
def filter(cls, node):
|
||||||
@ -320,4 +320,4 @@ def parse_grist_names(builder):
|
|||||||
start, end = tok.startpos, tok.endpos
|
start, end = tok.startpos, tok.endpos
|
||||||
parsed_names.append(make_tuple(start, end, obj.name, node.arg))
|
parsed_names.append(make_tuple(start, end, obj.name, node.arg))
|
||||||
|
|
||||||
return filter(None, parsed_names)
|
return [name for name in parsed_names if name]
|
||||||
|
@ -2,6 +2,8 @@ import json
|
|||||||
import types
|
import types
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import depend
|
import depend
|
||||||
import objtypes
|
import objtypes
|
||||||
import usertypes
|
import usertypes
|
||||||
@ -302,7 +304,7 @@ class ChoiceListColumn(BaseColumn):
|
|||||||
def set(self, row_id, value):
|
def set(self, row_id, value):
|
||||||
# When a JSON string is loaded, set it to a tuple parsed from it. When a list is loaded,
|
# When a JSON string is loaded, set it to a tuple parsed from it. When a list is loaded,
|
||||||
# convert to a tuple to keep values immutable.
|
# convert to a tuple to keep values immutable.
|
||||||
if isinstance(value, basestring) and value.startswith('['):
|
if isinstance(value, six.string_types) and value.startswith(u'['):
|
||||||
try:
|
try:
|
||||||
value = tuple(json.loads(value))
|
value = tuple(json.loads(value))
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
import csv
|
import csv
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
# Monkey-patch csv.Sniffer class, in which the quote/delimiter detection has silly bugs in the
|
# Monkey-patch csv.Sniffer class, in which the quote/delimiter detection has silly bugs in the
|
||||||
# regexp that it uses. It also seems poorly-implemented in other ways. We can probably do better
|
# regexp that it uses. It also seems poorly-implemented in other ways. We can probably do better
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import schema
|
|
||||||
import logger
|
import logger
|
||||||
|
import schema
|
||||||
from objtypes import strict_equal
|
from objtypes import strict_equal
|
||||||
|
|
||||||
log = logger.Logger(__name__, logger.INFO)
|
log = logger.Logger(__name__, logger.INFO)
|
||||||
@ -15,7 +17,7 @@ class DocActions(object):
|
|||||||
|
|
||||||
def AddRecord(self, table_id, row_id, column_values):
|
def AddRecord(self, table_id, row_id, column_values):
|
||||||
self.BulkAddRecord(
|
self.BulkAddRecord(
|
||||||
table_id, [row_id], {key: [val] for key, val in column_values.iteritems()})
|
table_id, [row_id], {key: [val] for key, val in six.iteritems(column_values)})
|
||||||
|
|
||||||
def BulkAddRecord(self, table_id, row_ids, column_values):
|
def BulkAddRecord(self, table_id, row_ids, column_values):
|
||||||
table = self._engine.tables[table_id]
|
table = self._engine.tables[table_id]
|
||||||
@ -42,9 +44,9 @@ class DocActions(object):
|
|||||||
# Collect the undo values, and unset all values in the column (i.e. set to defaults), just to
|
# Collect the undo values, and unset all values in the column (i.e. set to defaults), just to
|
||||||
# make sure we don't have stale values hanging around.
|
# make sure we don't have stale values hanging around.
|
||||||
undo_values = {}
|
undo_values = {}
|
||||||
for column in table.all_columns.itervalues():
|
for column in six.itervalues(table.all_columns):
|
||||||
if not column.is_private() and column.col_id != "id":
|
if not column.is_private() and column.col_id != "id":
|
||||||
col_values = map(column.raw_get, row_ids)
|
col_values = [column.raw_get(r) for r in row_ids]
|
||||||
default = column.getdefault()
|
default = column.getdefault()
|
||||||
# If this column had all default values, don't include it into the undo BulkAddRecord.
|
# If this column had all default values, don't include it into the undo BulkAddRecord.
|
||||||
if not all(strict_equal(val, default) for val in col_values):
|
if not all(strict_equal(val, default) for val in col_values):
|
||||||
@ -62,7 +64,7 @@ class DocActions(object):
|
|||||||
|
|
||||||
def UpdateRecord(self, table_id, row_id, columns):
|
def UpdateRecord(self, table_id, row_id, columns):
|
||||||
self.BulkUpdateRecord(
|
self.BulkUpdateRecord(
|
||||||
table_id, [row_id], {key: [val] for key, val in columns.iteritems()})
|
table_id, [row_id], {key: [val] for key, val in six.iteritems(columns)})
|
||||||
|
|
||||||
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
||||||
table = self._engine.tables[table_id]
|
table = self._engine.tables[table_id]
|
||||||
@ -72,9 +74,9 @@ class DocActions(object):
|
|||||||
|
|
||||||
# Load the updated values.
|
# Load the updated values.
|
||||||
undo_values = {}
|
undo_values = {}
|
||||||
for col_id, values in columns.iteritems():
|
for col_id, values in six.iteritems(columns):
|
||||||
col = table.get_column(col_id)
|
col = table.get_column(col_id)
|
||||||
undo_values[col_id] = map(col.raw_get, row_ids)
|
undo_values[col_id] = [col.raw_get(r) for r in row_ids]
|
||||||
for (row_id, value) in zip(row_ids, values):
|
for (row_id, value) in zip(row_ids, values):
|
||||||
col.set(row_id, value)
|
col.set(row_id, value)
|
||||||
|
|
||||||
@ -185,7 +187,7 @@ class DocActions(object):
|
|||||||
log.info("ModifyColumn called which was a noop")
|
log.info("ModifyColumn called which was a noop")
|
||||||
return
|
return
|
||||||
|
|
||||||
undo_col_info = {k: v for k, v in schema.col_to_dict(old, include_id=False).iteritems()
|
undo_col_info = {k: v for k, v in six.iteritems(schema.col_to_dict(old, include_id=False))
|
||||||
if k in col_info}
|
if k in col_info}
|
||||||
|
|
||||||
# Remove the column from the schema, then re-add it, to force creation of a new column object.
|
# Remove the column from the schema, then re-add it, to force creation of a new column object.
|
||||||
@ -249,7 +251,7 @@ class DocActions(object):
|
|||||||
|
|
||||||
# Copy over all columns from the old table to the new.
|
# Copy over all columns from the old table to the new.
|
||||||
new_table = self._engine.tables[new_table_id]
|
new_table = self._engine.tables[new_table_id]
|
||||||
for new_column in new_table.all_columns.itervalues():
|
for new_column in six.itervalues(new_table.all_columns):
|
||||||
if not new_column.is_private():
|
if not new_column.is_private():
|
||||||
new_column.copy_from_column(old_table.get_column(new_column.col_id))
|
new_column.copy_from_column(old_table.get_column(new_column.col_id))
|
||||||
new_table.grow_to_max() # We need to bring formula columns to the right size too.
|
new_table.grow_to_max() # We need to bring formula columns to the right size too.
|
||||||
|
@ -7,6 +7,8 @@ It is similar in purpose to DocModel.js on the client side.
|
|||||||
"""
|
"""
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import records
|
import records
|
||||||
import usertypes
|
import usertypes
|
||||||
import relabeling
|
import relabeling
|
||||||
@ -107,7 +109,7 @@ def enhance_model(model_class):
|
|||||||
extras_class = getattr(MetaTableExtras, model_class.__name__, None)
|
extras_class = getattr(MetaTableExtras, model_class.__name__, None)
|
||||||
if not extras_class:
|
if not extras_class:
|
||||||
return
|
return
|
||||||
for name, member in extras_class.__dict__.iteritems():
|
for name, member in six.iteritems(extras_class.__dict__):
|
||||||
if not name.startswith("__"):
|
if not name.startswith("__"):
|
||||||
member.__name__ = name
|
member.__name__ = name
|
||||||
member.is_private = True
|
member.is_private = True
|
||||||
@ -238,7 +240,7 @@ class DocModel(object):
|
|||||||
table_obj = record_set_or_table._table
|
table_obj = record_set_or_table._table
|
||||||
group_by = record_set_or_table._group_by
|
group_by = record_set_or_table._group_by
|
||||||
if group_by:
|
if group_by:
|
||||||
values.update((k, [v] * count) for k, v in group_by.iteritems() if k not in values)
|
values.update((k, [v] * count) for k, v in six.iteritems(group_by) if k not in values)
|
||||||
else:
|
else:
|
||||||
table_obj = record_set_or_table.table
|
table_obj = record_set_or_table.table
|
||||||
|
|
||||||
@ -281,14 +283,14 @@ def _unify_col_values(col_values, count):
|
|||||||
Helper that converts a dict mapping keys to values or lists of values to all lists. Non-list
|
Helper that converts a dict mapping keys to values or lists of values to all lists. Non-list
|
||||||
values get turned into lists by repeating them count times.
|
values get turned into lists by repeating them count times.
|
||||||
"""
|
"""
|
||||||
assert all(len(v) == count for v in col_values.itervalues() if isinstance(v, list))
|
assert all(len(v) == count for v in six.itervalues(col_values) if isinstance(v, list))
|
||||||
return {k: (v if isinstance(v, list) else [v] * count)
|
return {k: (v if isinstance(v, list) else [v] * count)
|
||||||
for k, v in col_values.iteritems()}
|
for k, v in six.iteritems(col_values)}
|
||||||
|
|
||||||
def _get_col_values_count(col_values):
|
def _get_col_values_count(col_values):
|
||||||
"""
|
"""
|
||||||
Helper that returns the length of the first list in among the values of col_values. If none of
|
Helper that returns the length of the first list in among the values of col_values. If none of
|
||||||
the values is a list, returns 1.
|
the values is a list, returns 1.
|
||||||
"""
|
"""
|
||||||
first_list = next((v for v in col_values.itervalues() if isinstance(v, list)), None)
|
first_list = next((v for v in six.itervalues(col_values) if isinstance(v, list)), None)
|
||||||
return len(first_list) if first_list is not None else 1
|
return len(first_list) if first_list is not None else 1
|
||||||
|
@ -11,6 +11,9 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from collections import namedtuple, OrderedDict, Hashable
|
from collections import namedtuple, OrderedDict, Hashable
|
||||||
|
|
||||||
|
import six
|
||||||
|
from six.moves import zip
|
||||||
from sortedcontainers import SortedSet
|
from sortedcontainers import SortedSet
|
||||||
|
|
||||||
import acl
|
import acl
|
||||||
@ -34,6 +37,7 @@ import repl
|
|||||||
|
|
||||||
log = logger.Logger(__name__, logger.INFO)
|
log = logger.Logger(__name__, logger.INFO)
|
||||||
|
|
||||||
|
if six.PY2:
|
||||||
reload(sys)
|
reload(sys)
|
||||||
sys.setdefaultencoding('utf8')
|
sys.setdefaultencoding('utf8')
|
||||||
|
|
||||||
@ -260,11 +264,11 @@ class Engine(object):
|
|||||||
table = self.tables[data.table_id]
|
table = self.tables[data.table_id]
|
||||||
|
|
||||||
# Clear all columns, whether or not they are present in the data.
|
# Clear all columns, whether or not they are present in the data.
|
||||||
for column in table.all_columns.itervalues():
|
for column in six.itervalues(table.all_columns):
|
||||||
column.clear()
|
column.clear()
|
||||||
|
|
||||||
# Only load columns that aren't stored.
|
# Only load columns that aren't stored.
|
||||||
columns = {col_id: data for (col_id, data) in data.columns.iteritems()
|
columns = {col_id: data for (col_id, data) in six.iteritems(data.columns)
|
||||||
if table.has_column(col_id)}
|
if table.has_column(col_id)}
|
||||||
|
|
||||||
# Add the records.
|
# Add the records.
|
||||||
@ -296,10 +300,10 @@ class Engine(object):
|
|||||||
table.grow_to_max()
|
table.grow_to_max()
|
||||||
|
|
||||||
# Load the new values.
|
# Load the new values.
|
||||||
for col_id, values in column_values.iteritems():
|
for col_id, values in six.iteritems(column_values):
|
||||||
column = table.get_column(col_id)
|
column = table.get_column(col_id)
|
||||||
column.growto(growto_size)
|
column.growto(growto_size)
|
||||||
for row_id, value in itertools.izip(row_ids, values):
|
for row_id, value in zip(row_ids, values):
|
||||||
column.set(row_id, value)
|
column.set(row_id, value)
|
||||||
|
|
||||||
# Invalidate new records to cause the formula columns to get recomputed.
|
# Invalidate new records to cause the formula columns to get recomputed.
|
||||||
@ -314,16 +318,16 @@ class Engine(object):
|
|||||||
|
|
||||||
query_cols = []
|
query_cols = []
|
||||||
if query:
|
if query:
|
||||||
query_cols = [(table.get_column(col_id), values) for (col_id, values) in query.iteritems()]
|
query_cols = [(table.get_column(col_id), values) for (col_id, values) in six.iteritems(query)]
|
||||||
row_ids = [r for r in table.row_ids
|
row_ids = [r for r in table.row_ids
|
||||||
if all((c.raw_get(r) in values) for (c, values) in query_cols)]
|
if all((c.raw_get(r) in values) for (c, values) in query_cols)]
|
||||||
|
|
||||||
for c in table.all_columns.itervalues():
|
for c in six.itervalues(table.all_columns):
|
||||||
# pylint: disable=too-many-boolean-expressions
|
# pylint: disable=too-many-boolean-expressions
|
||||||
if ((formulas or not c.is_formula())
|
if ((formulas or not c.is_formula())
|
||||||
and (private or not c.is_private())
|
and (private or not c.is_private())
|
||||||
and c.col_id != "id" and not column.is_virtual_column(c.col_id)):
|
and c.col_id != "id" and not column.is_virtual_column(c.col_id)):
|
||||||
column_values[c.col_id] = map(c.raw_get, row_ids)
|
column_values[c.col_id] = [c.raw_get(r) for r in row_ids]
|
||||||
|
|
||||||
return actions.TableData(table_id, row_ids, column_values)
|
return actions.TableData(table_id, row_ids, column_values)
|
||||||
|
|
||||||
@ -355,8 +359,11 @@ class Engine(object):
|
|||||||
"""
|
"""
|
||||||
schema_actions = schema.schema_create_actions()
|
schema_actions = schema.schema_create_actions()
|
||||||
table_actions = [_get_table_actions(table) for table in self.docmodel.tables.all]
|
table_actions = [_get_table_actions(table) for table in self.docmodel.tables.all]
|
||||||
record_actions = [self._get_record_actions(table_id) for (table_id,t) in self.tables.iteritems()
|
record_actions = [
|
||||||
if t.next_row_id() > 1]
|
self._get_record_actions(table_id)
|
||||||
|
for (table_id,t) in six.iteritems(self.tables)
|
||||||
|
if t.next_row_id() > 1
|
||||||
|
]
|
||||||
return schema_actions + table_actions + record_actions
|
return schema_actions + table_actions + record_actions
|
||||||
|
|
||||||
# Returns a BulkAddRecord action which can be used to add the currently existing data to an empty
|
# Returns a BulkAddRecord action which can be used to add the currently existing data to an empty
|
||||||
@ -414,10 +421,10 @@ class Engine(object):
|
|||||||
meta_tables = self.fetch_table('_grist_Tables')
|
meta_tables = self.fetch_table('_grist_Tables')
|
||||||
meta_columns = self.fetch_table('_grist_Tables_column')
|
meta_columns = self.fetch_table('_grist_Tables_column')
|
||||||
gen_schema = schema.build_schema(meta_tables, meta_columns)
|
gen_schema = schema.build_schema(meta_tables, meta_columns)
|
||||||
gen_schema_dicts = {k: (t.tableId, dict(t.columns.iteritems()))
|
gen_schema_dicts = {k: (t.tableId, dict(t.columns))
|
||||||
for k, t in gen_schema.iteritems()}
|
for k, t in six.iteritems(gen_schema)}
|
||||||
cur_schema_dicts = {k: (t.tableId, dict(t.columns.iteritems()))
|
cur_schema_dicts = {k: (t.tableId, dict(t.columns))
|
||||||
for k, t in self.schema.iteritems()}
|
for k, t in six.iteritems(self.schema)}
|
||||||
if cur_schema_dicts != gen_schema_dicts:
|
if cur_schema_dicts != gen_schema_dicts:
|
||||||
import pprint
|
import pprint
|
||||||
import difflib
|
import difflib
|
||||||
@ -448,7 +455,7 @@ class Engine(object):
|
|||||||
|
|
||||||
def dump_recompute_map(self):
|
def dump_recompute_map(self):
|
||||||
log.debug("Recompute map (%d nodes):" % len(self.recompute_map))
|
log.debug("Recompute map (%d nodes):" % len(self.recompute_map))
|
||||||
for node, dirty_rows in self.recompute_map.iteritems():
|
for node, dirty_rows in six.iteritems(self.recompute_map):
|
||||||
log.debug(" Node %s: %s" % (node, dirty_rows))
|
log.debug(" Node %s: %s" % (node, dirty_rows))
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
@ -507,7 +514,7 @@ class Engine(object):
|
|||||||
Called at end of _bring_all_up_to_date or _bring_lookups_up_to_date.
|
Called at end of _bring_all_up_to_date or _bring_lookups_up_to_date.
|
||||||
Issues actions for any accumulated cell changes.
|
Issues actions for any accumulated cell changes.
|
||||||
"""
|
"""
|
||||||
for node, changes in self._changes_map.iteritems():
|
for node, changes in six.iteritems(self._changes_map):
|
||||||
table = self.tables[node.table_id]
|
table = self.tables[node.table_id]
|
||||||
col = table.get_column(node.col_id)
|
col = table.get_column(node.col_id)
|
||||||
# If there are changes, save them in out_actions.
|
# If there are changes, save them in out_actions.
|
||||||
@ -876,7 +883,7 @@ class Engine(object):
|
|||||||
table = self.tables[action.table_id]
|
table = self.tables[action.table_id]
|
||||||
new_values = {}
|
new_values = {}
|
||||||
extra_actions = []
|
extra_actions = []
|
||||||
for col_id, values in column_values.iteritems():
|
for col_id, values in six.iteritems(column_values):
|
||||||
col_obj = table.get_column(col_id)
|
col_obj = table.get_column(col_id)
|
||||||
values = [col_obj.convert(val) for val in values]
|
values = [col_obj.convert(val) for val in values]
|
||||||
|
|
||||||
@ -895,7 +902,7 @@ class Engine(object):
|
|||||||
# above does it for columns explicitly mentioned; this section does it for the other
|
# above does it for columns explicitly mentioned; this section does it for the other
|
||||||
# columns, using their default values as input to prepare_new_values().
|
# columns, using their default values as input to prepare_new_values().
|
||||||
ignore_data = isinstance(action, actions.ReplaceTableData)
|
ignore_data = isinstance(action, actions.ReplaceTableData)
|
||||||
for col_id, col_obj in table.all_columns.iteritems():
|
for col_id, col_obj in six.iteritems(table.all_columns):
|
||||||
if col_id in column_values or column.is_virtual_column(col_id) or col_obj.is_formula():
|
if col_id in column_values or column.is_virtual_column(col_id) or col_obj.is_formula():
|
||||||
continue
|
continue
|
||||||
defaults = [col_obj.getdefault() for r in row_ids]
|
defaults = [col_obj.getdefault() for r in row_ids]
|
||||||
@ -922,7 +929,7 @@ class Engine(object):
|
|||||||
table = self.tables[action.table_id]
|
table = self.tables[action.table_id]
|
||||||
|
|
||||||
# Collect for each column the Column object and a list of new values.
|
# Collect for each column the Column object and a list of new values.
|
||||||
cols = [(table.get_column(col_id), values) for (col_id, values) in column_values.iteritems()]
|
cols = [(table.get_column(col_id), values) for (col_id, values) in six.iteritems(column_values)]
|
||||||
|
|
||||||
# In comparisons below, we rely here on Python's "==" operator to check for equality. After a
|
# In comparisons below, we rely here on Python's "==" operator to check for equality. After a
|
||||||
# type conversion, it may compare the new type to the old, e.g. 1 == 1.0 == True. It's
|
# type conversion, it may compare the new type to the old, e.g. 1 == 1.0 == True. It's
|
||||||
@ -988,18 +995,18 @@ class Engine(object):
|
|||||||
old_tables = self.tables
|
old_tables = self.tables
|
||||||
|
|
||||||
self.tables = {}
|
self.tables = {}
|
||||||
for table_id, user_table in self.gencode.usercode.__dict__.iteritems():
|
for table_id, user_table in six.iteritems(self.gencode.usercode.__dict__):
|
||||||
if isinstance(user_table, table_module.UserTable):
|
if isinstance(user_table, table_module.UserTable):
|
||||||
self.tables[table_id] = (old_tables.get(table_id) or table_module.Table(table_id, self))
|
self.tables[table_id] = (old_tables.get(table_id) or table_module.Table(table_id, self))
|
||||||
|
|
||||||
# Now update the table model for each table, and tie it to its UserTable object.
|
# Now update the table model for each table, and tie it to its UserTable object.
|
||||||
for table_id, table in self.tables.iteritems():
|
for table_id, table in six.iteritems(self.tables):
|
||||||
user_table = getattr(self.gencode.usercode, table_id)
|
user_table = getattr(self.gencode.usercode, table_id)
|
||||||
self._update_table_model(table, user_table)
|
self._update_table_model(table, user_table)
|
||||||
user_table._set_table_impl(table)
|
user_table._set_table_impl(table)
|
||||||
|
|
||||||
# For any tables that are gone, use self._update_table_model to clean them up.
|
# For any tables that are gone, use self._update_table_model to clean them up.
|
||||||
for table_id, table in old_tables.iteritems():
|
for table_id, table in six.iteritems(old_tables):
|
||||||
if table_id not in self.tables:
|
if table_id not in self.tables:
|
||||||
self._update_table_model(table, None)
|
self._update_table_model(table, None)
|
||||||
self._repl.locals.pop(table_id, None)
|
self._repl.locals.pop(table_id, None)
|
||||||
@ -1032,8 +1039,8 @@ class Engine(object):
|
|||||||
table._rebuild_model(user_table)
|
table._rebuild_model(user_table)
|
||||||
new_columns = table.all_columns
|
new_columns = table.all_columns
|
||||||
|
|
||||||
added_col_ids = new_columns.viewkeys() - old_columns.viewkeys()
|
added_col_ids = six.viewkeys(new_columns) - six.viewkeys(old_columns)
|
||||||
deleted_col_ids = old_columns.viewkeys() - new_columns.viewkeys()
|
deleted_col_ids = six.viewkeys(old_columns) - six.viewkeys(new_columns)
|
||||||
|
|
||||||
# Invalidate the columns that got added and anything that depends on them.
|
# Invalidate the columns that got added and anything that depends on them.
|
||||||
if added_col_ids:
|
if added_col_ids:
|
||||||
@ -1101,7 +1108,7 @@ class Engine(object):
|
|||||||
if self._schema_updated:
|
if self._schema_updated:
|
||||||
self.assert_schema_consistent()
|
self.assert_schema_consistent()
|
||||||
|
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
# Save full exception info, so that we can rethrow accurately even if undo also fails.
|
# Save full exception info, so that we can rethrow accurately even if undo also fails.
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
# If we get an exception, we should revert all changes applied so far, to keep things
|
# If we get an exception, we should revert all changes applied so far, to keep things
|
||||||
@ -1249,7 +1256,7 @@ class Engine(object):
|
|||||||
(len_calc, len_stored, len_undo, len_ret) = checkpoint
|
(len_calc, len_stored, len_undo, len_ret) = checkpoint
|
||||||
undo_actions = self.out_actions.undo[len_undo:]
|
undo_actions = self.out_actions.undo[len_undo:]
|
||||||
log.info("Reverting %d doc actions" % len(undo_actions))
|
log.info("Reverting %d doc actions" % len(undo_actions))
|
||||||
self.user_actions.ApplyUndoActions(map(actions.get_action_repr, undo_actions))
|
self.user_actions.ApplyUndoActions([actions.get_action_repr(a) for a in undo_actions])
|
||||||
del self.out_actions.calc[len_calc:]
|
del self.out_actions.calc[len_calc:]
|
||||||
del self.out_actions.stored[len_stored:]
|
del self.out_actions.stored[len_stored:]
|
||||||
del self.out_actions.direct[len_stored:]
|
del self.out_actions.direct[len_stored:]
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
# pylint: disable=wildcard-import
|
# pylint: disable=wildcard-import
|
||||||
from date import *
|
from .date import *
|
||||||
from info import *
|
from .info import *
|
||||||
from logical import *
|
from .logical import *
|
||||||
from lookup import *
|
from .lookup import *
|
||||||
from math import *
|
from .math import *
|
||||||
from stats import *
|
from .stats import *
|
||||||
from text import *
|
from .text import *
|
||||||
from schedule import *
|
from .schedule import *
|
||||||
|
|
||||||
# Export all uppercase names, for use with `from functions import *`.
|
# Export all uppercase names, for use with `from functions import *`.
|
||||||
__all__ = [k for k in dir() if not k.startswith('_') and k.isupper()]
|
__all__ = [k for k in dir() if not k.startswith('_') and k.isupper()]
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import calendar
|
import calendar
|
||||||
import datetime
|
import datetime
|
||||||
import dateutil.parser
|
import dateutil.parser
|
||||||
|
import six
|
||||||
|
|
||||||
import moment
|
import moment
|
||||||
import docmodel
|
import docmodel
|
||||||
|
|
||||||
@ -16,7 +18,7 @@ def _make_datetime(value):
|
|||||||
return datetime.datetime.combine(value, datetime.time())
|
return datetime.datetime.combine(value, datetime.time())
|
||||||
elif isinstance(value, datetime.time):
|
elif isinstance(value, datetime.time):
|
||||||
return datetime.datetime.combine(datetime.date.today(), value)
|
return datetime.datetime.combine(datetime.date.today(), value)
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, six.string_types):
|
||||||
return dateutil.parser.parse(value)
|
return dateutil.parser.parse(value)
|
||||||
else:
|
else:
|
||||||
raise ValueError('Invalid date %r' % (value,))
|
raise ValueError('Invalid date %r' % (value,))
|
||||||
|
@ -7,6 +7,8 @@ import math
|
|||||||
import numbers
|
import numbers
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import column
|
import column
|
||||||
from functions import date # pylint: disable=import-error
|
from functions import date # pylint: disable=import-error
|
||||||
from functions.unimplemented import unimplemented
|
from functions.unimplemented import unimplemented
|
||||||
@ -217,7 +219,7 @@ def ISTEXT(value):
|
|||||||
>>> ISTEXT(datetime.date(2011, 1, 1))
|
>>> ISTEXT(datetime.date(2011, 1, 1))
|
||||||
False
|
False
|
||||||
"""
|
"""
|
||||||
return isinstance(value, (basestring, AltText))
|
return isinstance(value, (six.string_types, AltText))
|
||||||
|
|
||||||
|
|
||||||
# Regexp for matching email. See ISEMAIL for justification.
|
# Regexp for matching email. See ISEMAIL for justification.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from info import lazy_value_or_error, is_error
|
from .info import lazy_value_or_error, is_error
|
||||||
from usertypes import AltText # pylint: disable=unused-import,import-error
|
from usertypes import AltText # pylint: disable=unused-import,import-error
|
||||||
|
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ def IF(logical_expression, value_if_true, value_if_false):
|
|||||||
0.0
|
0.0
|
||||||
|
|
||||||
More tests:
|
More tests:
|
||||||
>>> IF(True, lambda: (1/0), lambda: (17))
|
>>> IF(True, lambda: (1/0), lambda: (17)) # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||||
Traceback (most recent call last):
|
Traceback (most recent call last):
|
||||||
...
|
...
|
||||||
ZeroDivisionError: integer division or modulo by zero
|
ZeroDivisionError: integer division or modulo by zero
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
# pylint: disable=redefined-builtin, line-too-long
|
# pylint: disable=redefined-builtin, line-too-long
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
import os
|
import os
|
||||||
from urllib import urlencode
|
|
||||||
import urlparse
|
import six
|
||||||
from unimplemented import unimplemented
|
from six.moves import urllib_parse
|
||||||
|
from .unimplemented import unimplemented
|
||||||
|
|
||||||
@unimplemented
|
@unimplemented
|
||||||
def ADDRESS(row, column, absolute_relative_mode, use_a1_notation, sheet):
|
def ADDRESS(row, column, absolute_relative_mode, use_a1_notation, sheet):
|
||||||
@ -112,16 +113,16 @@ def SELF_HYPERLINK(label=None, page=None, **kwargs):
|
|||||||
if page:
|
if page:
|
||||||
txt += "/p/{}".format(page)
|
txt += "/p/{}".format(page)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
parts = list(urlparse.urlparse(txt))
|
parts = list(urllib_parse.urlparse(txt))
|
||||||
query = OrderedDict(urlparse.parse_qsl(parts[4]))
|
query = OrderedDict(urllib_parse.parse_qsl(parts[4]))
|
||||||
for [key, value] in kwargs.iteritems():
|
for [key, value] in sorted(six.iteritems(kwargs)):
|
||||||
key_parts = key.split('LinkKey_')
|
key_parts = key.split('LinkKey_')
|
||||||
if len(key_parts) == 2 and key_parts[0] == '':
|
if len(key_parts) == 2 and key_parts[0] == '':
|
||||||
query[key_parts[1] + '_'] = value
|
query[key_parts[1] + '_'] = value
|
||||||
else:
|
else:
|
||||||
raise TypeError("unexpected keyword argument '{}' (not of form LinkKey_NAME)".format(key))
|
raise TypeError("unexpected keyword argument '{}' (not of form LinkKey_NAME)".format(key))
|
||||||
parts[4] = urlencode(query)
|
parts[4] = urllib_parse.urlencode(query)
|
||||||
txt = urlparse.urlunparse(parts)
|
txt = urllib_parse.urlunparse(parts)
|
||||||
if label:
|
if label:
|
||||||
txt = "{} {}".format(label, txt)
|
txt = "{} {}".format(label, txt)
|
||||||
return txt
|
return txt
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
# pylint: disable=unused-argument
|
# pylint: disable=unused-argument
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import itertools
|
|
||||||
import math as _math
|
import math as _math
|
||||||
import operator
|
import operator
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import uuid
|
import uuid
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
from six.moves import zip, xrange
|
||||||
|
import six
|
||||||
|
|
||||||
from functions.info import ISNUMBER, ISLOGICAL
|
from functions.info import ISNUMBER, ISLOGICAL
|
||||||
from functions.unimplemented import unimplemented
|
from functions.unimplemented import unimplemented
|
||||||
@ -358,7 +361,7 @@ def INT(value):
|
|||||||
return int(_math.floor(value))
|
return int(_math.floor(value))
|
||||||
|
|
||||||
def _lcm(a, b):
|
def _lcm(a, b):
|
||||||
return a * b / _gcd(a, b)
|
return a * b // _gcd(a, b)
|
||||||
|
|
||||||
def LCM(value1, *more_values):
|
def LCM(value1, *more_values):
|
||||||
"""
|
"""
|
||||||
@ -790,7 +793,7 @@ def SUMPRODUCT(array1, *more_arrays):
|
|||||||
>>> SUMPRODUCT([-0.25, -0.25], [-2, -2], [-3, -3])
|
>>> SUMPRODUCT([-0.25, -0.25], [-2, -2], [-3, -3])
|
||||||
-3.0
|
-3.0
|
||||||
"""
|
"""
|
||||||
return sum(reduce(operator.mul, values) for values in itertools.izip(array1, *more_arrays))
|
return sum(reduce(operator.mul, values) for values in zip(array1, *more_arrays))
|
||||||
|
|
||||||
@unimplemented
|
@unimplemented
|
||||||
def SUMSQ(value1, value2):
|
def SUMSQ(value1, value2):
|
||||||
@ -842,4 +845,7 @@ def TRUNC(value, places=0):
|
|||||||
|
|
||||||
def UUID():
|
def UUID():
|
||||||
"""Generate a random UUID-formatted string identifier."""
|
"""Generate a random UUID-formatted string identifier."""
|
||||||
|
if six.PY2:
|
||||||
return str(uuid.UUID(bytes=[chr(random.randrange(0, 256)) for _ in xrange(0, 16)], version=4))
|
return str(uuid.UUID(bytes=[chr(random.randrange(0, 256)) for _ in xrange(0, 16)], version=4))
|
||||||
|
else:
|
||||||
|
return str(uuid.UUID(bytes=bytes([random.randrange(0, 256) for _ in range(0, 16)]), version=4))
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import re
|
import re
|
||||||
from date import DATEADD, NOW, DTIME
|
from .date import DATEADD, NOW, DTIME
|
||||||
from moment_parse import MONTH_NAMES, DAY_NAMES
|
from moment_parse import MONTH_NAMES, DAY_NAMES
|
||||||
|
|
||||||
# Limit exports to schedule, so that upper-case constants like MONTH_NAMES, DAY_NAMES don't end up
|
# Limit exports to schedule, so that upper-case constants like MONTH_NAMES, DAY_NAMES don't end up
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# pylint: disable=redefined-builtin, line-too-long, unused-argument
|
# pylint: disable=redefined-builtin, line-too-long, unused-argument
|
||||||
|
|
||||||
from math import _chain, _chain_numeric, _chain_numeric_a
|
from .math import _chain, _chain_numeric, _chain_numeric_a
|
||||||
from info import ISNUMBER, ISLOGICAL
|
from .info import ISNUMBER, ISLOGICAL
|
||||||
from date import DATE # pylint: disable=unused-import
|
from .date import DATE # pylint: disable=unused-import
|
||||||
from unimplemented import unimplemented
|
from .unimplemented import unimplemented
|
||||||
|
|
||||||
def _average(iterable):
|
def _average(iterable):
|
||||||
total, count = 0.0, 0
|
total, count = 0.0, 0
|
||||||
@ -326,7 +326,7 @@ def MEDIAN(value, *more_values):
|
|||||||
3
|
3
|
||||||
>>> MEDIAN(3, 5, 1, 4, 2)
|
>>> MEDIAN(3, 5, 1, 4, 2)
|
||||||
3
|
3
|
||||||
>>> MEDIAN(xrange(10))
|
>>> MEDIAN(range(10))
|
||||||
4.5
|
4.5
|
||||||
>>> MEDIAN("Hello", "123", DATE(2015, 1, 1), 12.3)
|
>>> MEDIAN("Hello", "123", DATE(2015, 1, 1), 12.3)
|
||||||
12.3
|
12.3
|
||||||
@ -340,9 +340,9 @@ def MEDIAN(value, *more_values):
|
|||||||
raise ValueError("MEDIAN requires at least one number")
|
raise ValueError("MEDIAN requires at least one number")
|
||||||
count = len(values)
|
count = len(values)
|
||||||
if count % 2 == 0:
|
if count % 2 == 0:
|
||||||
return (values[count / 2 - 1] + values[count / 2]) / 2.0
|
return (values[count // 2 - 1] + values[count // 2]) / 2.0
|
||||||
else:
|
else:
|
||||||
return values[(count - 1) / 2]
|
return values[(count - 1) // 2]
|
||||||
|
|
||||||
|
|
||||||
def MIN(value, *more_values):
|
def MIN(value, *more_values):
|
||||||
|
@ -4,7 +4,7 @@ import timeit
|
|||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import moment
|
import moment
|
||||||
import schedule
|
from . import schedule
|
||||||
from functions.date import DTIME
|
from functions.date import DTIME
|
||||||
from functions import date as _date
|
from functions import date as _date
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ class TestSchedule(unittest.TestCase):
|
|||||||
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "seconds"), "2018-09-04 14:38:11")
|
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "seconds"), "2018-09-04 14:38:11")
|
||||||
self.assertDate(RDU(DT("2018-09-04 14:38:11") - TICK, "seconds"), "2018-09-04 14:38:10")
|
self.assertDate(RDU(DT("2018-09-04 14:38:11") - TICK, "seconds"), "2018-09-04 14:38:10")
|
||||||
|
|
||||||
with self.assertRaisesRegexp(ValueError, r"Invalid unit inches"):
|
with self.assertRaisesRegex(ValueError, r"Invalid unit inches"):
|
||||||
RDU(DT("2018-09-04 14:38:11"), "inches")
|
RDU(DT("2018-09-04 14:38:11"), "inches")
|
||||||
|
|
||||||
def test_round_down_to_unit_tz(self):
|
def test_round_down_to_unit_tz(self):
|
||||||
@ -99,11 +99,11 @@ class TestSchedule(unittest.TestCase):
|
|||||||
self.assertEqual(schedule._parse_interval("25-months"), (25, "months"))
|
self.assertEqual(schedule._parse_interval("25-months"), (25, "months"))
|
||||||
self.assertEqual(schedule._parse_interval("3-day"), (3, "days"))
|
self.assertEqual(schedule._parse_interval("3-day"), (3, "days"))
|
||||||
self.assertEqual(schedule._parse_interval("2-hour"), (2, "hours"))
|
self.assertEqual(schedule._parse_interval("2-hour"), (2, "hours"))
|
||||||
with self.assertRaisesRegexp(ValueError, "Not a valid interval"):
|
with self.assertRaisesRegex(ValueError, "Not a valid interval"):
|
||||||
schedule._parse_interval("1Year")
|
schedule._parse_interval("1Year")
|
||||||
with self.assertRaisesRegexp(ValueError, "Not a valid interval"):
|
with self.assertRaisesRegex(ValueError, "Not a valid interval"):
|
||||||
schedule._parse_interval("1y")
|
schedule._parse_interval("1y")
|
||||||
with self.assertRaisesRegexp(ValueError, "Unknown unit"):
|
with self.assertRaisesRegex(ValueError, "Unknown unit"):
|
||||||
schedule._parse_interval("1-daily")
|
schedule._parse_interval("1-daily")
|
||||||
|
|
||||||
def test_parse_slot(self):
|
def test_parse_slot(self):
|
||||||
@ -145,41 +145,41 @@ class TestSchedule(unittest.TestCase):
|
|||||||
|
|
||||||
def test_parse_slot_errors(self):
|
def test_parse_slot_errors(self):
|
||||||
# Test failures with duplicate units
|
# Test failures with duplicate units
|
||||||
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
|
with self.assertRaisesRegex(ValueError, 'Duplicate unit'):
|
||||||
schedule._parse_slot('+1d +2d', 'weeks')
|
schedule._parse_slot('+1d +2d', 'weeks')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
|
with self.assertRaisesRegex(ValueError, 'Duplicate unit'):
|
||||||
schedule._parse_slot('9:30am +2H', 'days')
|
schedule._parse_slot('9:30am +2H', 'days')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
|
with self.assertRaisesRegex(ValueError, 'Duplicate unit'):
|
||||||
schedule._parse_slot('/15 +1d', 'months')
|
schedule._parse_slot('/15 +1d', 'months')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
|
with self.assertRaisesRegex(ValueError, 'Duplicate unit'):
|
||||||
schedule._parse_slot('Feb-1 12:30pm +20M', 'years')
|
schedule._parse_slot('Feb-1 12:30pm +20M', 'years')
|
||||||
|
|
||||||
# Test failures with improper slot types
|
# Test failures with improper slot types
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot.*for unit'):
|
||||||
schedule._parse_slot('Feb-1', 'weeks')
|
schedule._parse_slot('Feb-1', 'weeks')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot.*for unit'):
|
||||||
schedule._parse_slot('Monday', 'months')
|
schedule._parse_slot('Monday', 'months')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot.*for unit'):
|
||||||
schedule._parse_slot('4/15', 'hours')
|
schedule._parse_slot('4/15', 'hours')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot.*for unit'):
|
||||||
schedule._parse_slot('/1', 'years')
|
schedule._parse_slot('/1', 'years')
|
||||||
|
|
||||||
# Test failures with outright invalid slot syntax.
|
# Test failures with outright invalid slot syntax.
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot'):
|
||||||
schedule._parse_slot('Feb:1', 'weeks')
|
schedule._parse_slot('Feb:1', 'weeks')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot'):
|
||||||
schedule._parse_slot('/1d', 'months')
|
schedule._parse_slot('/1d', 'months')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot'):
|
||||||
schedule._parse_slot('10', 'hours')
|
schedule._parse_slot('10', 'hours')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
|
with self.assertRaisesRegex(ValueError, 'Invalid slot'):
|
||||||
schedule._parse_slot('H1', 'years')
|
schedule._parse_slot('H1', 'years')
|
||||||
|
|
||||||
# Test failures with unknown values
|
# Test failures with unknown values
|
||||||
with self.assertRaisesRegexp(ValueError, 'Unknown month'):
|
with self.assertRaisesRegex(ValueError, 'Unknown month'):
|
||||||
schedule._parse_slot('februarium-1', 'years')
|
schedule._parse_slot('februarium-1', 'years')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Unknown day of the week'):
|
with self.assertRaisesRegex(ValueError, 'Unknown day of the week'):
|
||||||
schedule._parse_slot('snu', 'weeks')
|
schedule._parse_slot('snu', 'weeks')
|
||||||
with self.assertRaisesRegexp(ValueError, 'Unknown unit'):
|
with self.assertRaisesRegex(ValueError, 'Unknown unit'):
|
||||||
schedule._parse_slot('+1t', 'hours')
|
schedule._parse_slot('+1t', 'hours')
|
||||||
|
|
||||||
def test_schedule(self):
|
def test_schedule(self):
|
||||||
@ -250,14 +250,14 @@ from datetime import datetime
|
|||||||
]
|
]
|
||||||
self.assertEqual(timing_schedule_full(), expected_result)
|
self.assertEqual(timing_schedule_full(), expected_result)
|
||||||
t = min(timeit.repeat(stmt="t.timing_schedule_full()", setup=setup, number=N, repeat=3))
|
t = min(timeit.repeat(stmt="t.timing_schedule_full()", setup=setup, number=N, repeat=3))
|
||||||
print "\n*** SCHEDULE call with 4 points: %.2f us" % (t * 1000000 / N)
|
print("\n*** SCHEDULE call with 4 points: %.2f us" % (t * 1000000 / N))
|
||||||
|
|
||||||
t = min(timeit.repeat(stmt="t.timing_schedule_init()", setup=setup, number=N, repeat=3))
|
t = min(timeit.repeat(stmt="t.timing_schedule_init()", setup=setup, number=N, repeat=3))
|
||||||
print "*** Schedule constructor: %.2f us" % (t * 1000000 / N)
|
print("*** Schedule constructor: %.2f us" % (t * 1000000 / N))
|
||||||
|
|
||||||
self.assertEqual(timing_schedule_series(), expected_result)
|
self.assertEqual(timing_schedule_series(), expected_result)
|
||||||
t = min(timeit.repeat(stmt="t.timing_schedule_series()", setup=setup, number=N, repeat=3))
|
t = min(timeit.repeat(stmt="t.timing_schedule_series()", setup=setup, number=N, repeat=3))
|
||||||
print "*** Schedule series with 4 points: %.2f us" % (t * 1000000 / N)
|
print("*** Schedule series with 4 points: %.2f us" % (t * 1000000 / N))
|
||||||
|
|
||||||
def timing_schedule_full():
|
def timing_schedule_full():
|
||||||
return list(schedule.SCHEDULE("weekly: Mo 10:30am, We 10:30pm",
|
return list(schedule.SCHEDULE("weekly: Mo 10:30am, We 10:30pm",
|
||||||
|
@ -5,7 +5,11 @@ import dateutil.parser
|
|||||||
import numbers
|
import numbers
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from unimplemented import unimplemented
|
import six
|
||||||
|
from six import unichr
|
||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
|
from .unimplemented import unimplemented
|
||||||
from usertypes import AltText # pylint: disable=import-error
|
from usertypes import AltText # pylint: disable=import-error
|
||||||
|
|
||||||
def CHAR(table_number):
|
def CHAR(table_number):
|
||||||
@ -478,7 +482,7 @@ def SUBSTITUTE(text, old_text, new_text, instance_num=None):
|
|||||||
if not old_text:
|
if not old_text:
|
||||||
return text
|
return text
|
||||||
|
|
||||||
if not isinstance(new_text, basestring):
|
if not isinstance(new_text, six.string_types):
|
||||||
new_text = str(new_text)
|
new_text = str(new_text)
|
||||||
|
|
||||||
if instance_num is None:
|
if instance_num is None:
|
||||||
|
@ -19,6 +19,8 @@ import re
|
|||||||
import imp
|
import imp
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import codebuilder
|
import codebuilder
|
||||||
from column import is_visible_column
|
from column import is_visible_column
|
||||||
import summary
|
import summary
|
||||||
@ -123,7 +125,7 @@ class GenCode(object):
|
|||||||
source_table_id = summary.decode_summary_table_name(table_id)
|
source_table_id = summary.decode_summary_table_name(table_id)
|
||||||
|
|
||||||
# Sort columns by "isFormula" to output all data columns before all formula columns.
|
# Sort columns by "isFormula" to output all data columns before all formula columns.
|
||||||
columns = sorted(table_info.columns.itervalues(), key=lambda c: c.isFormula)
|
columns = sorted(six.itervalues(table_info.columns), key=lambda c: c.isFormula)
|
||||||
if filter_for_user:
|
if filter_for_user:
|
||||||
columns = [c for c in columns if is_visible_column(c.colId)]
|
columns = [c for c in columns if is_visible_column(c.colId)]
|
||||||
parts = ["@grist.UserTable\nclass %s:\n" % table_id]
|
parts = ["@grist.UserTable\nclass %s:\n" % table_id]
|
||||||
@ -136,9 +138,9 @@ class GenCode(object):
|
|||||||
if summary_tables:
|
if summary_tables:
|
||||||
# Include summary formulas, for the user's information.
|
# Include summary formulas, for the user's information.
|
||||||
formulas = OrderedDict((c.colId, c) for s in summary_tables
|
formulas = OrderedDict((c.colId, c) for s in summary_tables
|
||||||
for c in s.columns.itervalues() if c.isFormula)
|
for c in six.itervalues(s.columns) if c.isFormula)
|
||||||
parts.append(indent(textbuilder.Text("\nclass _Summary:\n")))
|
parts.append(indent(textbuilder.Text("\nclass _Summary:\n")))
|
||||||
for col_info in formulas.itervalues():
|
for col_info in six.itervalues(formulas):
|
||||||
parts.append(indent(self._make_field(col_info, table_id), levels=2))
|
parts.append(indent(self._make_field(col_info, table_id), levels=2))
|
||||||
|
|
||||||
return textbuilder.Combiner(parts)
|
return textbuilder.Combiner(parts)
|
||||||
@ -147,7 +149,7 @@ class GenCode(object):
|
|||||||
"""Regenerates the code text and usercode module from upated document schema."""
|
"""Regenerates the code text and usercode module from upated document schema."""
|
||||||
# Collect summary tables to group them by source table.
|
# Collect summary tables to group them by source table.
|
||||||
summary_tables = {}
|
summary_tables = {}
|
||||||
for table_info in schema.itervalues():
|
for table_info in six.itervalues(schema):
|
||||||
source_table_id = summary.decode_summary_table_name(table_info.tableId)
|
source_table_id = summary.decode_summary_table_name(table_info.tableId)
|
||||||
if source_table_id:
|
if source_table_id:
|
||||||
summary_tables.setdefault(source_table_id, []).append(table_info)
|
summary_tables.setdefault(source_table_id, []).append(table_info)
|
||||||
@ -156,7 +158,7 @@ class GenCode(object):
|
|||||||
"from functions import * # global uppercase functions\n" +
|
"from functions import * # global uppercase functions\n" +
|
||||||
"import datetime, math, re # modules commonly needed in formulas\n"]
|
"import datetime, math, re # modules commonly needed in formulas\n"]
|
||||||
userparts = fullparts[:]
|
userparts = fullparts[:]
|
||||||
for table_info in schema.itervalues():
|
for table_info in six.itervalues(schema):
|
||||||
fullparts.append("\n\n")
|
fullparts.append("\n\n")
|
||||||
fullparts.append(self._make_table_model(table_info, summary_tables.get(table_info.tableId)))
|
fullparts.append(self._make_table_model(table_info, summary_tables.get(table_info.tableId)))
|
||||||
if not _is_special_table(table_info.tableId):
|
if not _is_special_table(table_info.tableId):
|
||||||
@ -191,5 +193,5 @@ def _is_special_table(table_id):
|
|||||||
def exec_module_text(module_text):
|
def exec_module_text(module_text):
|
||||||
# pylint: disable=exec-used
|
# pylint: disable=exec-used
|
||||||
mod = imp.new_module("usercode")
|
mod = imp.new_module("usercode")
|
||||||
exec module_text in mod.__dict__
|
exec(module_text, mod.__dict__)
|
||||||
return mod
|
return mod
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
|
|
||||||
def _is_array(obj):
|
def _is_array(obj):
|
||||||
return isinstance(obj, list)
|
return isinstance(obj, list)
|
||||||
|
|
||||||
|
@ -287,7 +287,8 @@ class ImportActions(object):
|
|||||||
src_col_id = _import_transform_col_prefix + curr_col["colId"]
|
src_col_id = _import_transform_col_prefix + curr_col["colId"]
|
||||||
log.debug("Copying from: " + src_col_id)
|
log.debug("Copying from: " + src_col_id)
|
||||||
|
|
||||||
column_data[curr_col["colId"]] = map(hidden_table.get_column(src_col_id).raw_get, row_ids)
|
src_col = hidden_table.get_column(src_col_id)
|
||||||
|
column_data[curr_col["colId"]] = [src_col.raw_get(r) for r in row_ids]
|
||||||
|
|
||||||
|
|
||||||
# ========= Cleanup, Prepare new table (if needed), insert data
|
# ========= Cleanup, Prepare new table (if needed), insert data
|
||||||
|
@ -8,7 +8,7 @@ class TestMessyTables(unittest.TestCase):
|
|||||||
def test_any_tableset(self):
|
def test_any_tableset(self):
|
||||||
path = os.path.join(os.path.dirname(__file__),
|
path = os.path.join(os.path.dirname(__file__),
|
||||||
"fixtures", "nyc_schools_progress_report_ec_2013.xlsx")
|
"fixtures", "nyc_schools_progress_report_ec_2013.xlsx")
|
||||||
with open(path, "r") as f:
|
with open(path, "rb") as f:
|
||||||
table_set = messytables.any.any_tableset(f, extension=os.path.splitext(path)[1])
|
table_set = messytables.any.any_tableset(f, extension=os.path.splitext(path)[1])
|
||||||
|
|
||||||
self.assertIsInstance(table_set, messytables.XLSTableSet)
|
self.assertIsInstance(table_set, messytables.XLSTableSet)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import six
|
||||||
|
|
||||||
import column
|
import column
|
||||||
import depend
|
import depend
|
||||||
import records
|
import records
|
||||||
@ -85,7 +87,7 @@ class LookupMapColumn(column.BaseColumn):
|
|||||||
def _invalidate_affected(self, affected_keys):
|
def _invalidate_affected(self, affected_keys):
|
||||||
# For each known relation, figure out which referring rows are affected, and invalidate them.
|
# For each known relation, figure out which referring rows are affected, and invalidate them.
|
||||||
# The engine will notice that there have been more invalidations, and recompute things again.
|
# The engine will notice that there have been more invalidations, and recompute things again.
|
||||||
for node, rel in self._lookup_relations.iteritems():
|
for node, rel in six.iteritems(self._lookup_relations):
|
||||||
affected_rows = rel.get_affected_rows_by_keys(affected_keys)
|
affected_rows = rel.get_affected_rows_by_keys(affected_keys)
|
||||||
self._engine.invalidate_records(node.table_id, affected_rows, col_ids=(node.col_id,))
|
self._engine.invalidate_records(node.table_id, affected_rows, col_ids=(node.col_id,))
|
||||||
|
|
||||||
|
@ -9,6 +9,8 @@ sys.path.append('thirdparty')
|
|||||||
import marshal
|
import marshal
|
||||||
import functools
|
import functools
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from acl_formula import parse_acl_formula
|
from acl_formula import parse_acl_formula
|
||||||
import actions
|
import actions
|
||||||
import sandbox
|
import sandbox
|
||||||
@ -86,7 +88,7 @@ def main():
|
|||||||
@export
|
@export
|
||||||
def fetch_meta_tables(formulas=True):
|
def fetch_meta_tables(formulas=True):
|
||||||
return {table_id: actions.get_action_repr(table_data)
|
return {table_id: actions.get_action_repr(table_data)
|
||||||
for (table_id, table_data) in eng.fetch_meta_tables(formulas).iteritems()}
|
for (table_id, table_data) in six.iteritems(eng.fetch_meta_tables(formulas))}
|
||||||
|
|
||||||
@export
|
@export
|
||||||
def load_meta_tables(meta_tables, meta_columns):
|
def load_meta_tables(meta_tables, meta_columns):
|
||||||
@ -100,8 +102,8 @@ def main():
|
|||||||
@export
|
@export
|
||||||
def create_migrations(all_tables, metadata_only=False):
|
def create_migrations(all_tables, metadata_only=False):
|
||||||
doc_actions = migrations.create_migrations(
|
doc_actions = migrations.create_migrations(
|
||||||
{t: table_data_from_db(t, data) for t, data in all_tables.iteritems()}, metadata_only)
|
{t: table_data_from_db(t, data) for t, data in six.iteritems(all_tables)}, metadata_only)
|
||||||
return map(actions.get_action_repr, doc_actions)
|
return [actions.get_action_repr(action) for action in doc_actions]
|
||||||
|
|
||||||
@export
|
@export
|
||||||
def get_version():
|
def get_version():
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import six
|
||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import identifiers
|
import identifiers
|
||||||
import schema
|
import schema
|
||||||
@ -56,12 +59,12 @@ def create_migrations(all_tables, metadata_only=False):
|
|||||||
user_schema = schema.build_schema(all_tables['_grist_Tables'],
|
user_schema = schema.build_schema(all_tables['_grist_Tables'],
|
||||||
all_tables['_grist_Tables_column'],
|
all_tables['_grist_Tables_column'],
|
||||||
include_builtin=False)
|
include_builtin=False)
|
||||||
for t in user_schema.itervalues():
|
for t in six.itervalues(user_schema):
|
||||||
tdset.apply_doc_action(actions.AddTable(t.tableId, schema.cols_to_dict_list(t.columns)))
|
tdset.apply_doc_action(actions.AddTable(t.tableId, schema.cols_to_dict_list(t.columns)))
|
||||||
|
|
||||||
# For each old table/column, construct an AddTable action using the current schema.
|
# For each old table/column, construct an AddTable action using the current schema.
|
||||||
new_schema = {a.table_id: a for a in schema.schema_create_actions()}
|
new_schema = {a.table_id: a for a in schema.schema_create_actions()}
|
||||||
for table_id, data in sorted(all_tables.iteritems()):
|
for table_id, data in sorted(six.iteritems(all_tables)):
|
||||||
# User tables should already be in tdset; the rest must be metadata tables.
|
# User tables should already be in tdset; the rest must be metadata tables.
|
||||||
# (If metadata_only is true, there is simply nothing to skip here.)
|
# (If metadata_only is true, there is simply nothing to skip here.)
|
||||||
if table_id not in tdset.all_tables:
|
if table_id not in tdset.all_tables:
|
||||||
@ -94,7 +97,7 @@ def get_last_migration_version():
|
|||||||
"""
|
"""
|
||||||
Returns the last schema version number for which we have a migration defined.
|
Returns the last schema version number for which we have a migration defined.
|
||||||
"""
|
"""
|
||||||
return max(all_migrations.iterkeys())
|
return max(all_migrations)
|
||||||
|
|
||||||
def migration(schema_version, need_all_tables=False):
|
def migration(schema_version, need_all_tables=False):
|
||||||
"""
|
"""
|
||||||
@ -330,16 +333,16 @@ def migration7(tdset):
|
|||||||
# - It doesn't fix types of Reference columns that refer to old-style summary tables
|
# - It doesn't fix types of Reference columns that refer to old-style summary tables
|
||||||
# (if the user created some such columns manually).
|
# (if the user created some such columns manually).
|
||||||
|
|
||||||
doc_actions = filter(None, [
|
doc_actions = [action for action in [
|
||||||
maybe_add_column(tdset, '_grist_Tables', 'summarySourceTable', 'Ref:_grist_Tables'),
|
maybe_add_column(tdset, '_grist_Tables', 'summarySourceTable', 'Ref:_grist_Tables'),
|
||||||
maybe_add_column(tdset, '_grist_Tables_column', 'summarySourceCol', 'Ref:_grist_Tables_column')
|
maybe_add_column(tdset, '_grist_Tables_column', 'summarySourceCol', 'Ref:_grist_Tables_column')
|
||||||
])
|
] if action]
|
||||||
|
|
||||||
# Maps tableRef to Table object.
|
# Maps tableRef to Table object.
|
||||||
tables_map = {t.id: t for t in actions.transpose_bulk_action(tdset.all_tables['_grist_Tables'])}
|
tables_map = {t.id: t for t in actions.transpose_bulk_action(tdset.all_tables['_grist_Tables'])}
|
||||||
|
|
||||||
# Maps tableName to tableRef
|
# Maps tableName to tableRef
|
||||||
table_name_to_ref = {t.tableId: t.id for t in tables_map.itervalues()}
|
table_name_to_ref = {t.tableId: t.id for t in six.itervalues(tables_map)}
|
||||||
|
|
||||||
# List of Column objects
|
# List of Column objects
|
||||||
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
|
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
|
||||||
@ -362,14 +365,14 @@ def migration7(tdset):
|
|||||||
# Summary tables used to be named as "Summary_<SourceName>_<ColRef1>_<ColRef2>". This regular
|
# Summary tables used to be named as "Summary_<SourceName>_<ColRef1>_<ColRef2>". This regular
|
||||||
# expression parses that.
|
# expression parses that.
|
||||||
summary_re = re.compile(r'^Summary_(\w+?)((?:_\d+)*)$')
|
summary_re = re.compile(r'^Summary_(\w+?)((?:_\d+)*)$')
|
||||||
for t in tables_map.itervalues():
|
for t in six.itervalues(tables_map):
|
||||||
m = summary_re.match(t.tableId)
|
m = summary_re.match(t.tableId)
|
||||||
if not m or m.group(1) not in table_name_to_ref:
|
if not m or m.group(1) not in table_name_to_ref:
|
||||||
continue
|
continue
|
||||||
# We have a valid summary table.
|
# We have a valid summary table.
|
||||||
source_table_name = m.group(1)
|
source_table_name = m.group(1)
|
||||||
source_table_ref = table_name_to_ref[source_table_name]
|
source_table_ref = table_name_to_ref[source_table_name]
|
||||||
groupby_colrefs = map(int, m.group(2).strip("_").split("_"))
|
groupby_colrefs = [int(x) for x in m.group(2).strip("_").split("_")]
|
||||||
# Prepare a new-style name for the summary table. Be sure not to conflict with existing tables
|
# Prepare a new-style name for the summary table. Be sure not to conflict with existing tables
|
||||||
# or with each other (i.e. don't rename multiple tables to the same name).
|
# or with each other (i.e. don't rename multiple tables to the same name).
|
||||||
new_name = summary.encode_summary_table_name(source_table_name)
|
new_name = summary.encode_summary_table_name(source_table_name)
|
||||||
|
@ -3,10 +3,11 @@ from collections import namedtuple
|
|||||||
import marshal
|
import marshal
|
||||||
from time import time
|
from time import time
|
||||||
import bisect
|
import bisect
|
||||||
import itertools
|
|
||||||
import os
|
import os
|
||||||
import moment_parse
|
import moment_parse
|
||||||
import iso8601
|
import iso8601
|
||||||
|
import six
|
||||||
|
from six.moves import zip
|
||||||
|
|
||||||
# This is prepared by sandbox/install_tz.py
|
# This is prepared by sandbox/install_tz.py
|
||||||
ZoneRecord = namedtuple("ZoneRecord", ("name", "abbrs", "offsets", "untils"))
|
ZoneRecord = namedtuple("ZoneRecord", ("name", "abbrs", "offsets", "untils"))
|
||||||
@ -92,7 +93,7 @@ class tz(object):
|
|||||||
self._tzinfo = tzinfo(zonelabel)
|
self._tzinfo = tzinfo(zonelabel)
|
||||||
if isinstance(dt, datetime):
|
if isinstance(dt, datetime):
|
||||||
timestamp = dt_to_ts(dt.replace(tzinfo=self._tzinfo)) * 1000
|
timestamp = dt_to_ts(dt.replace(tzinfo=self._tzinfo)) * 1000
|
||||||
elif isinstance(dt, (float, int, long)):
|
elif isinstance(dt, (float, six.integer_types)):
|
||||||
timestamp = dt
|
timestamp = dt
|
||||||
else:
|
else:
|
||||||
raise TypeError("'dt' should be a datetime object or a numeric type")
|
raise TypeError("'dt' should be a datetime object or a numeric type")
|
||||||
@ -181,7 +182,7 @@ class Zone(object):
|
|||||||
# "Until" times adjusted by the corresponding offsets. These are used in translating from
|
# "Until" times adjusted by the corresponding offsets. These are used in translating from
|
||||||
# datetime to absolute timestamp.
|
# datetime to absolute timestamp.
|
||||||
self.offset_untils = [until - offset * 60000 for (until, offset) in
|
self.offset_untils = [until - offset * 60000 for (until, offset) in
|
||||||
itertools.izip(self.untils, self.offsets)]
|
zip(self.untils, self.offsets)]
|
||||||
# Cache of TzInfo objects for this Zone, used by get_tzinfo(). There could be multiple TzInfo
|
# Cache of TzInfo objects for this Zone, used by get_tzinfo(). There could be multiple TzInfo
|
||||||
# objects, one for each possible offset, but their behavior only differs for ambiguous time.
|
# objects, one for each possible offset, but their behavior only differs for ambiguous time.
|
||||||
self._tzinfo = {}
|
self._tzinfo = {}
|
||||||
|
@ -17,6 +17,7 @@ from math import isnan
|
|||||||
|
|
||||||
import moment
|
import moment
|
||||||
import records
|
import records
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
class UnmarshallableError(ValueError):
|
class UnmarshallableError(ValueError):
|
||||||
@ -186,9 +187,9 @@ def encode_object(value):
|
|||||||
# Represent RecordSet (e.g. result of lookupRecords) in the same way as a RecordList.
|
# Represent RecordSet (e.g. result of lookupRecords) in the same way as a RecordList.
|
||||||
return ['L'] + [encode_object(int(item)) for item in value]
|
return ['L'] + [encode_object(int(item)) for item in value]
|
||||||
elif isinstance(value, dict):
|
elif isinstance(value, dict):
|
||||||
if not all(isinstance(key, basestring) for key in value):
|
if not all(isinstance(key, six.string_types) for key in value):
|
||||||
raise UnmarshallableError("Dict with non-string keys")
|
raise UnmarshallableError("Dict with non-string keys")
|
||||||
return ['O', {key: encode_object(val) for key, val in value.iteritems()}]
|
return ['O', {key: encode_object(val) for key, val in six.iteritems(value)}]
|
||||||
elif value == _pending_sentinel:
|
elif value == _pending_sentinel:
|
||||||
return ['P']
|
return ['P']
|
||||||
elif value == _censored_sentinel:
|
elif value == _censored_sentinel:
|
||||||
@ -230,7 +231,7 @@ def decode_object(value):
|
|||||||
elif code == 'L':
|
elif code == 'L':
|
||||||
return [decode_object(item) for item in args]
|
return [decode_object(item) for item in args]
|
||||||
elif code == 'O':
|
elif code == 'O':
|
||||||
return {decode_object(key): decode_object(val) for key, val in args[0].iteritems()}
|
return {decode_object(key): decode_object(val) for key, val in six.iteritems(args[0])}
|
||||||
elif code == 'P':
|
elif code == 'P':
|
||||||
return _pending_sentinel
|
return _pending_sentinel
|
||||||
elif code == 'C':
|
elif code == 'C':
|
||||||
|
@ -97,6 +97,8 @@ class Record(object):
|
|||||||
def __nonzero__(self):
|
def __nonzero__(self):
|
||||||
return bool(self._row_id)
|
return bool(self._row_id)
|
||||||
|
|
||||||
|
__bool__ = __nonzero__
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "%s[%s]" % (self._table.table_id, self._row_id)
|
return "%s[%s]" % (self._table.table_id, self._row_id)
|
||||||
|
|
||||||
@ -144,6 +146,8 @@ class RecordSet(object):
|
|||||||
def __nonzero__(self):
|
def __nonzero__(self):
|
||||||
return bool(self._row_ids)
|
return bool(self._row_ids)
|
||||||
|
|
||||||
|
__bool__ = __nonzero__
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (isinstance(other, RecordSet) and
|
return (isinstance(other, RecordSet) and
|
||||||
(self._table, self._row_ids, self._group_by, self._sort_by) ==
|
(self._table, self._row_ids, self._group_by, self._sort_by) ==
|
||||||
|
@ -44,6 +44,7 @@ import itertools
|
|||||||
import math
|
import math
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
|
from six.moves import zip, xrange
|
||||||
from sortedcontainers import SortedList, SortedListWithKey
|
from sortedcontainers import SortedList, SortedListWithKey
|
||||||
|
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ def prepare_inserts_dumb(sortedlist, keys):
|
|||||||
ins_groups.append((len(sortedlist), 0))
|
ins_groups.append((len(sortedlist), 0))
|
||||||
for index, ins_count in ins_groups:
|
for index, ins_count in ins_groups:
|
||||||
adj_count = index - prev_index
|
adj_count = index - prev_index
|
||||||
adjustments.extend(itertools.izip(xrange(prev_index, index),
|
adjustments.extend(zip(xrange(prev_index, index),
|
||||||
frange_from(next_key, adj_count)))
|
frange_from(next_key, adj_count)))
|
||||||
next_key += adj_count
|
next_key += adj_count
|
||||||
insertions.extend(frange_from(next_key, ins_count))
|
insertions.extend(frange_from(next_key, ins_count))
|
||||||
@ -233,7 +234,7 @@ class ListWithAdjustments(object):
|
|||||||
prev_keys.sort()
|
prev_keys.sort()
|
||||||
new_keys = get_range(new_begin_key, new_end_key, count)
|
new_keys = get_range(new_begin_key, new_end_key, count)
|
||||||
|
|
||||||
for (old_key, is_insert, i), new_key in itertools.izip(prev_keys, new_keys):
|
for (old_key, is_insert, i), new_key in zip(prev_keys, new_keys):
|
||||||
if is_insert:
|
if is_insert:
|
||||||
self._insertions.remove(old_key)
|
self._insertions.remove(old_key)
|
||||||
self._insertions.add(new_key)
|
self._insertions.add(new_key)
|
||||||
@ -303,7 +304,7 @@ def all_distinct(iterable):
|
|||||||
"""
|
"""
|
||||||
a, b = itertools.tee(iterable)
|
a, b = itertools.tee(iterable)
|
||||||
next(b, None)
|
next(b, None)
|
||||||
return all(x != y for x, y in itertools.izip(a, b))
|
return all(x != y for x, y in zip(a, b))
|
||||||
|
|
||||||
|
|
||||||
def range_around_float(x, i):
|
def range_around_float(x, i):
|
||||||
|
@ -57,7 +57,7 @@ class REPLInterpreter(code.InteractiveInterpreter):
|
|||||||
# like get/set attr and have that hurt us
|
# like get/set attr and have that hurt us
|
||||||
sys.stdout = user_output
|
sys.stdout = user_output
|
||||||
sys.stderr = user_output
|
sys.stderr = user_output
|
||||||
exec code in self.locals
|
exec(code, self.locals)
|
||||||
except:
|
except:
|
||||||
# bare except to catch absolutely all things the user can throw
|
# bare except to catch absolutely all things the user can throw
|
||||||
self.showtraceback()
|
self.showtraceback()
|
||||||
|
@ -14,6 +14,8 @@ import sys
|
|||||||
import unittest
|
import unittest
|
||||||
sys.path.append('/thirdparty')
|
sys.path.append('/thirdparty')
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Change to the directory of this file (/grist in sandbox), to discover everything under it.
|
# Change to the directory of this file (/grist in sandbox), to discover everything under it.
|
||||||
os.chdir(os.path.dirname(__file__))
|
os.chdir(os.path.dirname(__file__))
|
||||||
@ -23,7 +25,9 @@ def main():
|
|||||||
if "--xunit" in argv:
|
if "--xunit" in argv:
|
||||||
import xmlrunner
|
import xmlrunner
|
||||||
argv.remove("--xunit")
|
argv.remove("--xunit")
|
||||||
utf8_stdout = codecs.getwriter('utf8')(sys.stdout)
|
utf8_stdout = sys.stdout
|
||||||
|
if six.PY2:
|
||||||
|
utf8_stdout = codecs.getwriter('utf8')(utf8_stdout)
|
||||||
test_runner = xmlrunner.XMLTestRunner(stream=utf8_stdout)
|
test_runner = xmlrunner.XMLTestRunner(stream=utf8_stdout)
|
||||||
|
|
||||||
if all(arg.startswith("-") for arg in argv[1:]):
|
if all(arg.startswith("-") for arg in argv[1:]):
|
||||||
|
@ -38,8 +38,8 @@ class Sandbox(object):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._functions = {}
|
self._functions = {}
|
||||||
self._external_input = os.fdopen(3, "r", 64*1024)
|
self._external_input = os.fdopen(3, "rb", 64*1024)
|
||||||
self._external_output = os.fdopen(4, "w", 64*1024)
|
self._external_output = os.fdopen(4, "wb", 64*1024)
|
||||||
|
|
||||||
def _send_to_js(self, msgCode, msgBody):
|
def _send_to_js(self, msgCode, msgBody):
|
||||||
# (Note that marshal version 2 is the default; we specify it explicitly for clarity. The
|
# (Note that marshal version 2 is the default; we specify it explicitly for clarity. The
|
||||||
|
@ -10,6 +10,9 @@ Before changing this file, please review:
|
|||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
from collections import OrderedDict, namedtuple
|
from collections import OrderedDict, namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
|
|
||||||
SCHEMA_VERSION = 21
|
SCHEMA_VERSION = 21
|
||||||
@ -310,7 +313,7 @@ def cols_to_dict_list(cols):
|
|||||||
|
|
||||||
def clone_schema(schema):
|
def clone_schema(schema):
|
||||||
return OrderedDict((t, SchemaTable(s.tableId, s.columns.copy()))
|
return OrderedDict((t, SchemaTable(s.tableId, s.columns.copy()))
|
||||||
for (t, s) in schema.iteritems())
|
for (t, s) in six.iteritems(schema))
|
||||||
|
|
||||||
def build_schema(meta_tables, meta_columns, include_builtin=True):
|
def build_schema(meta_tables, meta_columns, include_builtin=True):
|
||||||
"""
|
"""
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import logger
|
import logger
|
||||||
log = logger.Logger(__name__, logger.INFO)
|
log = logger.Logger(__name__, logger.INFO)
|
||||||
|
|
||||||
@ -17,7 +20,7 @@ def _make_col_info(col=None, **values):
|
|||||||
|
|
||||||
def _get_colinfo_dict(col_info, with_id=False):
|
def _get_colinfo_dict(col_info, with_id=False):
|
||||||
"""Return a dict suitable to use with AddColumn or AddTable (when with_id=True) actions."""
|
"""Return a dict suitable to use with AddColumn or AddTable (when with_id=True) actions."""
|
||||||
col_values = {k: v for k, v in col_info._asdict().iteritems() if v is not None and k != 'colId'}
|
col_values = {k: v for k, v in six.iteritems(col_info._asdict()) if v is not None and k != 'colId'}
|
||||||
if with_id:
|
if with_id:
|
||||||
col_values['id'] = col_info.colId
|
col_values['id'] = col_info.colId
|
||||||
return col_values
|
return col_values
|
||||||
@ -78,9 +81,10 @@ def _update_sort_spec(sort_spec, old_table, new_table):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
old_sort_spec = json.loads(sort_spec)
|
old_sort_spec = json.loads(sort_spec)
|
||||||
new_sort_spec = filter(None, [adjust(col_spec) for col_spec in old_sort_spec])
|
new_sort_spec = [adjust(col_spec) for col_spec in old_sort_spec]
|
||||||
|
new_sort_spec = [col_spec for col_spec in new_sort_spec if col_spec]
|
||||||
return json.dumps(new_sort_spec, separators=(',', ':'))
|
return json.dumps(new_sort_spec, separators=(',', ':'))
|
||||||
except Exception, e:
|
except Exception:
|
||||||
log.warn("update_summary_section: can't parse sortColRefs JSON; clearing sortColRefs")
|
log.warn("update_summary_section: can't parse sortColRefs JSON; clearing sortColRefs")
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
import collections
|
import collections
|
||||||
import types
|
import types
|
||||||
|
|
||||||
|
import six
|
||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
import column
|
import column
|
||||||
import depend
|
import depend
|
||||||
import docmodel
|
import docmodel
|
||||||
@ -211,7 +214,7 @@ class Table(object):
|
|||||||
new_cols['id'] = self._id_column
|
new_cols['id'] = self._id_column
|
||||||
|
|
||||||
# List of Columns in the same order as they appear in the generated Model definition.
|
# List of Columns in the same order as they appear in the generated Model definition.
|
||||||
col_items = [c for c in self.Model.__dict__.iteritems() if not c[0].startswith("_")]
|
col_items = [c for c in six.iteritems(self.Model.__dict__) if not c[0].startswith("_")]
|
||||||
col_items.sort(key=lambda c: self._get_sort_order(c[1]))
|
col_items.sort(key=lambda c: self._get_sort_order(c[1]))
|
||||||
|
|
||||||
for col_id, col_model in col_items:
|
for col_id, col_model in col_items:
|
||||||
@ -219,11 +222,11 @@ class Table(object):
|
|||||||
new_cols[col_id] = self._create_or_update_col(col_id, col_model, default_func)
|
new_cols[col_id] = self._create_or_update_col(col_id, col_model, default_func)
|
||||||
|
|
||||||
# Used for auto-completion as a record with correct properties of correct types.
|
# Used for auto-completion as a record with correct properties of correct types.
|
||||||
self.sample_record = _make_sample_record(self.table_id, new_cols.itervalues())
|
self.sample_record = _make_sample_record(self.table_id, six.itervalues(new_cols))
|
||||||
|
|
||||||
# Note that we reuse previous special columns like lookup maps, since those not affected by
|
# Note that we reuse previous special columns like lookup maps, since those not affected by
|
||||||
# column changes should stay the same. These get removed when unneeded using other means.
|
# column changes should stay the same. These get removed when unneeded using other means.
|
||||||
new_cols.update(sorted(self._special_cols.iteritems()))
|
new_cols.update(sorted(six.iteritems(self._special_cols)))
|
||||||
|
|
||||||
# Set the new columns.
|
# Set the new columns.
|
||||||
self.all_columns = new_cols
|
self.all_columns = new_cols
|
||||||
@ -289,7 +292,7 @@ class Table(object):
|
|||||||
"""
|
"""
|
||||||
return ((0, col_model._creation_order)
|
return ((0, col_model._creation_order)
|
||||||
if not isinstance(col_model, types.FunctionType) else
|
if not isinstance(col_model, types.FunctionType) else
|
||||||
(1, col_model.func_code.co_firstlineno))
|
(1, col_model.__code__.co_firstlineno))
|
||||||
|
|
||||||
def next_row_id(self):
|
def next_row_id(self):
|
||||||
"""
|
"""
|
||||||
@ -302,7 +305,7 @@ class Table(object):
|
|||||||
Resizes all columns as needed so that all valid row_ids are valid indices into all columns.
|
Resizes all columns as needed so that all valid row_ids are valid indices into all columns.
|
||||||
"""
|
"""
|
||||||
size = self.row_ids.max() + 1
|
size = self.row_ids.max() + 1
|
||||||
for col_obj in self.all_columns.itervalues():
|
for col_obj in six.itervalues(self.all_columns):
|
||||||
col_obj.growto(size)
|
col_obj.growto(size)
|
||||||
|
|
||||||
def get_column(self, col_id):
|
def get_column(self, col_id):
|
||||||
@ -325,7 +328,7 @@ class Table(object):
|
|||||||
"""
|
"""
|
||||||
# The tuple of keys used determines the LookupMap we need.
|
# The tuple of keys used determines the LookupMap we need.
|
||||||
sort_by = kwargs.pop('sort_by', None)
|
sort_by = kwargs.pop('sort_by', None)
|
||||||
col_ids = tuple(sorted(kwargs.iterkeys()))
|
col_ids = tuple(sorted(kwargs))
|
||||||
key = tuple(kwargs[c] for c in col_ids)
|
key = tuple(kwargs[c] for c in col_ids)
|
||||||
|
|
||||||
lookup_map = self._get_lookup_map(col_ids)
|
lookup_map = self._get_lookup_map(col_ids)
|
||||||
@ -383,7 +386,7 @@ class Table(object):
|
|||||||
# TODO: It should use indices, to avoid linear searching
|
# TODO: It should use indices, to avoid linear searching
|
||||||
# TODO: It should create dependencies as needed when used from formulas.
|
# TODO: It should create dependencies as needed when used from formulas.
|
||||||
# TODO: It should return Record instead, for convenience of user formulas
|
# TODO: It should return Record instead, for convenience of user formulas
|
||||||
col_values = [(self.all_columns[col_id], value) for (col_id, value) in kwargs.iteritems()]
|
col_values = [(self.all_columns[col_id], value) for (col_id, value) in six.iteritems(kwargs)]
|
||||||
for row_id in self.row_ids:
|
for row_id in self.row_ids:
|
||||||
if all(col.raw_get(row_id) == value for col, value in col_values):
|
if all(col.raw_get(row_id) == value for col, value in col_values):
|
||||||
return row_id
|
return row_id
|
||||||
@ -398,7 +401,7 @@ class Table(object):
|
|||||||
# TODO: It should use indices, to avoid linear searching
|
# TODO: It should use indices, to avoid linear searching
|
||||||
# TODO: It should create dependencies as needed when used from formulas.
|
# TODO: It should create dependencies as needed when used from formulas.
|
||||||
# TODO: It should return Record instead, for convenience of user formulas
|
# TODO: It should return Record instead, for convenience of user formulas
|
||||||
col_values = [(self.all_columns[col_id], value) for (col_id, value) in kwargs.iteritems()]
|
col_values = [(self.all_columns[col_id], value) for (col_id, value) in six.iteritems(kwargs)]
|
||||||
for row_id in self.row_ids:
|
for row_id in self.row_ids:
|
||||||
if all(col.raw_get(row_id) == value for col, value in col_values):
|
if all(col.raw_get(row_id) == value for col, value in col_values):
|
||||||
yield row_id
|
yield row_id
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
from itertools import izip
|
from six.moves import zip as izip
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
from usertypes import get_type_default
|
from usertypes import get_type_default
|
||||||
|
|
||||||
@ -29,7 +31,7 @@ class TableDataSet(object):
|
|||||||
def apply_doc_action(self, action):
|
def apply_doc_action(self, action):
|
||||||
try:
|
try:
|
||||||
getattr(self, action.__class__.__name__)(*action)
|
getattr(self, action.__class__.__name__)(*action)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
log.warn("ERROR applying action %s: %s" % (action, e))
|
log.warn("ERROR applying action %s: %s" % (action, e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -48,12 +50,12 @@ class TableDataSet(object):
|
|||||||
# Actions on records.
|
# Actions on records.
|
||||||
#----------------------------------------
|
#----------------------------------------
|
||||||
def AddRecord(self, table_id, row_id, columns):
|
def AddRecord(self, table_id, row_id, columns):
|
||||||
self.BulkAddRecord(table_id, [row_id], {key: [val] for key, val in columns.iteritems()})
|
self.BulkAddRecord(table_id, [row_id], {key: [val] for key, val in six.iteritems(columns)})
|
||||||
|
|
||||||
def BulkAddRecord(self, table_id, row_ids, columns):
|
def BulkAddRecord(self, table_id, row_ids, columns):
|
||||||
table_data = self.all_tables[table_id]
|
table_data = self.all_tables[table_id]
|
||||||
table_data.row_ids.extend(row_ids)
|
table_data.row_ids.extend(row_ids)
|
||||||
for col, values in table_data.columns.iteritems():
|
for col, values in six.iteritems(table_data.columns):
|
||||||
if col in columns:
|
if col in columns:
|
||||||
values.extend(columns[col])
|
values.extend(columns[col])
|
||||||
else:
|
else:
|
||||||
@ -67,19 +69,19 @@ class TableDataSet(object):
|
|||||||
def BulkRemoveRecord(self, table_id, row_ids):
|
def BulkRemoveRecord(self, table_id, row_ids):
|
||||||
table_data = self.all_tables[table_id]
|
table_data = self.all_tables[table_id]
|
||||||
remove_set = set(row_ids)
|
remove_set = set(row_ids)
|
||||||
for col, values in table_data.columns.iteritems():
|
for col, values in six.iteritems(table_data.columns):
|
||||||
values[:] = [v for r, v in izip(table_data.row_ids, values) if r not in remove_set]
|
values[:] = [v for r, v in izip(table_data.row_ids, values) if r not in remove_set]
|
||||||
table_data.row_ids[:] = [r for r in table_data.row_ids if r not in remove_set]
|
table_data.row_ids[:] = [r for r in table_data.row_ids if r not in remove_set]
|
||||||
|
|
||||||
def UpdateRecord(self, table_id, row_id, columns):
|
def UpdateRecord(self, table_id, row_id, columns):
|
||||||
self.BulkUpdateRecord(
|
self.BulkUpdateRecord(
|
||||||
table_id, [row_id], {key: [val] for key, val in columns.iteritems()})
|
table_id, [row_id], {key: [val] for key, val in six.iteritems(columns)})
|
||||||
|
|
||||||
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
||||||
table_data = self.all_tables[table_id]
|
table_data = self.all_tables[table_id]
|
||||||
rowid_map = {r:i for i, r in enumerate(table_data.row_ids)}
|
rowid_map = {r:i for i, r in enumerate(table_data.row_ids)}
|
||||||
table_indices = [rowid_map[r] for r in row_ids]
|
table_indices = [rowid_map[r] for r in row_ids]
|
||||||
for col, values in columns.iteritems():
|
for col, values in six.iteritems(columns):
|
||||||
if col in table_data.columns:
|
if col in table_data.columns:
|
||||||
col_values = table_data.columns[col]
|
col_values = table_data.columns[col]
|
||||||
for i, v in izip(table_indices, values):
|
for i, v in izip(table_indices, values):
|
||||||
@ -88,7 +90,7 @@ class TableDataSet(object):
|
|||||||
def ReplaceTableData(self, table_id, row_ids, columns):
|
def ReplaceTableData(self, table_id, row_ids, columns):
|
||||||
table_data = self.all_tables[table_id]
|
table_data = self.all_tables[table_id]
|
||||||
del table_data.row_ids[:]
|
del table_data.row_ids[:]
|
||||||
for col, values in table_data.columns.iteritems():
|
for col, values in six.iteritems(table_data.columns):
|
||||||
del values[:]
|
del values[:]
|
||||||
self.BulkAddRecord(table_id, row_ids, columns)
|
self.BulkAddRecord(table_id, row_ids, columns)
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import unittest
|
|||||||
from acl_formula import parse_acl_formula
|
from acl_formula import parse_acl_formula
|
||||||
import test_engine
|
import test_engine
|
||||||
|
|
||||||
|
|
||||||
class TestACLFormula(unittest.TestCase):
|
class TestACLFormula(unittest.TestCase):
|
||||||
def test_basic(self):
|
def test_basic(self):
|
||||||
# Test a few basic formulas and structures, hitting everything we expect to support
|
# Test a few basic formulas and structures, hitting everything we expect to support
|
||||||
@ -104,19 +105,19 @@ class TestACLFormula(unittest.TestCase):
|
|||||||
self.assertRaises(SyntaxError, parse_acl_formula, "def foo(): pass")
|
self.assertRaises(SyntaxError, parse_acl_formula, "def foo(): pass")
|
||||||
|
|
||||||
# Unsupported node type
|
# Unsupported node type
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "max(rec)")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "max(rec)")
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "user.id in {1, 2, 3}")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "user.id in {1, 2, 3}")
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "1 if user.IsAnon else 2")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "1 if user.IsAnon else 2")
|
||||||
|
|
||||||
# Unsupported operation
|
# Unsupported operation
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "1 | 2")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "1 | 2")
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "1 << 2")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "1 << 2")
|
||||||
self.assertRaisesRegexp(ValueError, r'Unsupported syntax', parse_acl_formula, "~test")
|
self.assertRaisesRegex(ValueError, r'Unsupported syntax', parse_acl_formula, "~test")
|
||||||
|
|
||||||
# Syntax error
|
# Syntax error
|
||||||
self.assertRaises(SyntaxError, parse_acl_formula, "[(]")
|
self.assertRaises(SyntaxError, parse_acl_formula, "[(]")
|
||||||
self.assertRaises(SyntaxError, parse_acl_formula, "user.id in (1,2))")
|
self.assertRaises(SyntaxError, parse_acl_formula, "user.id in (1,2))")
|
||||||
self.assertRaisesRegexp(SyntaxError, r'invalid syntax on line 1 col 9', parse_acl_formula, "foo and !bar")
|
self.assertRaisesRegex(SyntaxError, r'invalid syntax on line 1 col 9', parse_acl_formula, "foo and !bar")
|
||||||
|
|
||||||
class TestACLFormulaUserActions(test_engine.EngineTestCase):
|
class TestACLFormulaUserActions(test_engine.EngineTestCase):
|
||||||
def test_acl_actions(self):
|
def test_acl_actions(self):
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import codebuilder
|
import codebuilder
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
def make_body(formula, default=None):
|
def make_body(formula, default=None):
|
||||||
return codebuilder.make_formula_body(formula, default).get_text()
|
return codebuilder.make_formula_body(formula, default).get_text()
|
||||||
@ -122,7 +124,7 @@ for a in rec:
|
|||||||
return rec
|
return rec
|
||||||
""")
|
""")
|
||||||
|
|
||||||
self.assertRegexpMatches(make_body(body),
|
self.assertRegex(make_body(body),
|
||||||
r"raise SyntaxError\('Grist disallows assignment" +
|
r"raise SyntaxError\('Grist disallows assignment" +
|
||||||
r" to the special variable \"rec\" on line 4 col 7'\)")
|
r" to the special variable \"rec\" on line 4 col 7'\)")
|
||||||
|
|
||||||
@ -135,8 +137,8 @@ return rec
|
|||||||
self.assertEqual(make_body(u"'résumé' + $foo"), u"return 'résumé' + rec.foo")
|
self.assertEqual(make_body(u"'résumé' + $foo"), u"return 'résumé' + rec.foo")
|
||||||
|
|
||||||
# Check the return type of make_body()
|
# Check the return type of make_body()
|
||||||
self.assertEqual(type(make_body("foo")), unicode)
|
self.assertEqual(type(make_body("foo")), six.text_type)
|
||||||
self.assertEqual(type(make_body(u"foo")), unicode)
|
self.assertEqual(type(make_body(u"foo")), six.text_type)
|
||||||
|
|
||||||
|
|
||||||
def test_wrap_logical(self):
|
def test_wrap_logical(self):
|
||||||
|
@ -338,7 +338,7 @@ class TestColumnActions(test_engine.EngineTestCase):
|
|||||||
self.init_sample_data()
|
self.init_sample_data()
|
||||||
|
|
||||||
# Test that we cannot remove group-by columns from summary tables directly.
|
# Test that we cannot remove group-by columns from summary tables directly.
|
||||||
with self.assertRaisesRegexp(ValueError, "cannot remove .* group-by"):
|
with self.assertRaisesRegex(ValueError, "cannot remove .* group-by"):
|
||||||
self.apply_user_action(["BulkRemoveRecord", '_grist_Tables_column', [20,18]])
|
self.apply_user_action(["BulkRemoveRecord", '_grist_Tables_column', [20,18]])
|
||||||
|
|
||||||
# Test that group-by columns in summary tables get removed.
|
# Test that group-by columns in summary tables get removed.
|
||||||
|
@ -244,6 +244,6 @@ class TestDocModel(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
# Verify that positions are set such that the order is what we asked for.
|
# Verify that positions are set such that the order is what we asked for.
|
||||||
student_columns = self.engine.docmodel.tables.lookupOne(tableId='Students').columns
|
student_columns = self.engine.docmodel.tables.lookupOne(tableId='Students').columns
|
||||||
self.assertEqual(map(int, student_columns), [1,2,4,5,6,25,22,23])
|
self.assertEqual(list(map(int, student_columns)), [1,2,4,5,6,25,22,23])
|
||||||
school_columns = self.engine.docmodel.tables.lookupOne(tableId='Schools').columns
|
school_columns = self.engine.docmodel.tables.lookupOne(tableId='Schools').columns
|
||||||
self.assertEqual(map(int, school_columns), [24,10,12])
|
self.assertEqual(list(map(int, school_columns)), [24,10,12])
|
||||||
|
@ -4,6 +4,8 @@ import json
|
|||||||
import unittest
|
import unittest
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import column
|
import column
|
||||||
import engine
|
import engine
|
||||||
@ -22,6 +24,8 @@ View = namedtuple('View', 'id sections')
|
|||||||
Section = namedtuple('Section', 'id parentKey tableRef fields')
|
Section = namedtuple('Section', 'id parentKey tableRef fields')
|
||||||
Field = namedtuple('Field', 'id colRef')
|
Field = namedtuple('Field', 'id colRef')
|
||||||
|
|
||||||
|
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
|
||||||
|
unittest.TestCase.assertRegex = unittest.TestCase.assertRegexpMatches
|
||||||
|
|
||||||
class EngineTestCase(unittest.TestCase):
|
class EngineTestCase(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
@ -152,9 +156,9 @@ class EngineTestCase(unittest.TestCase):
|
|||||||
# Convert observed and expected actions into a comparable form.
|
# Convert observed and expected actions into a comparable form.
|
||||||
for k in self.action_group_action_fields:
|
for k in self.action_group_action_fields:
|
||||||
if k in observed:
|
if k in observed:
|
||||||
observed[k] = map(get_comparable_repr, observed[k])
|
observed[k] = [get_comparable_repr(v) for v in observed[k]]
|
||||||
if k in expected:
|
if k in expected:
|
||||||
expected[k] = map(get_comparable_repr, expected[k])
|
expected[k] = [get_comparable_repr(v) for v in expected[k]]
|
||||||
|
|
||||||
if observed != expected:
|
if observed != expected:
|
||||||
o_lines = self._formatActionGroup(observed)
|
o_lines = self._formatActionGroup(observed)
|
||||||
@ -192,13 +196,13 @@ class EngineTestCase(unittest.TestCase):
|
|||||||
output = {t: self.engine.fetch_table(t) for t in self.engine.schema}
|
output = {t: self.engine.fetch_table(t) for t in self.engine.schema}
|
||||||
output = testutil.replace_nans(output)
|
output = testutil.replace_nans(output)
|
||||||
output = actions.encode_objects(output)
|
output = actions.encode_objects(output)
|
||||||
print ''.join(self._getEngineDataLines(output))
|
print(''.join(self._getEngineDataLines(output)))
|
||||||
|
|
||||||
def dump_actions(self, out_actions):
|
def dump_actions(self, out_actions):
|
||||||
"""
|
"""
|
||||||
Prints out_actions in human-readable format, for help in writing / debugging tets.
|
Prints out_actions in human-readable format, for help in writing / debugging tets.
|
||||||
"""
|
"""
|
||||||
print "\n".join(self._formatActionGroup(out_actions.__dict__))
|
print("\n".join(self._formatActionGroup(out_actions.__dict__)))
|
||||||
|
|
||||||
def assertTableData(self, table_name, data=[], cols="all", rows="all", sort=None):
|
def assertTableData(self, table_name, data=[], cols="all", rows="all", sort=None):
|
||||||
"""
|
"""
|
||||||
@ -237,7 +241,7 @@ class EngineTestCase(unittest.TestCase):
|
|||||||
if sort:
|
if sort:
|
||||||
row_ids.sort(key=lambda r: sort(table.get_record(r)))
|
row_ids.sort(key=lambda r: sort(table.get_record(r)))
|
||||||
|
|
||||||
observed_col_data = {c.col_id: map(c.raw_get, row_ids) for c in columns if c.col_id != "id"}
|
observed_col_data = {c.col_id: [c.raw_get(r) for r in row_ids] for c in columns if c.col_id != "id"}
|
||||||
observed = actions.TableData(table_name, row_ids, observed_col_data)
|
observed = actions.TableData(table_name, row_ids, observed_col_data)
|
||||||
self.assertEqualDocData({table_name: observed}, {table_name: expected},
|
self.assertEqualDocData({table_name: observed}, {table_name: expected},
|
||||||
col_names=col_names)
|
col_names=col_names)
|
||||||
@ -286,7 +290,7 @@ class EngineTestCase(unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
schema = sample["SCHEMA"]
|
schema = sample["SCHEMA"]
|
||||||
self.engine.load_meta_tables(schema['_grist_Tables'], schema['_grist_Tables_column'])
|
self.engine.load_meta_tables(schema['_grist_Tables'], schema['_grist_Tables_column'])
|
||||||
for data in sample["DATA"].itervalues():
|
for data in six.itervalues(sample["DATA"]):
|
||||||
self.engine.load_table(data)
|
self.engine.load_table(data)
|
||||||
self.engine.load_done()
|
self.engine.load_done()
|
||||||
|
|
||||||
@ -426,11 +430,11 @@ class TestEngine(EngineTestCase):
|
|||||||
sample = self.samples[data.pop("USE_SAMPLE")]
|
sample = self.samples[data.pop("USE_SAMPLE")]
|
||||||
expected_data = sample["DATA"].copy()
|
expected_data = sample["DATA"].copy()
|
||||||
expected_data.update({t: testutil.table_data_from_rows(t, tdata[0], tdata[1:])
|
expected_data.update({t: testutil.table_data_from_rows(t, tdata[0], tdata[1:])
|
||||||
for (t, tdata) in data.iteritems()})
|
for (t, tdata) in six.iteritems(data)})
|
||||||
self.assertCorrectEngineData(expected_data)
|
self.assertCorrectEngineData(expected_data)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unrecognized step %s in test script" % step)
|
raise ValueError("Unrecognized step %s in test script" % step)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
prefix = "LINE %s: " % line
|
prefix = "LINE %s: " % line
|
||||||
e.args = (prefix + e.args[0],) + e.args[1:] if e.args else (prefix,)
|
e.args = (prefix + e.args[0],) + e.args[1:] if e.args else (prefix,)
|
||||||
raise
|
raise
|
||||||
@ -526,7 +530,7 @@ class TestEngine(EngineTestCase):
|
|||||||
# Simulate an error inside a DocAction, and make sure we restore the schema (don't leave it in
|
# Simulate an error inside a DocAction, and make sure we restore the schema (don't leave it in
|
||||||
# inconsistent with metadata).
|
# inconsistent with metadata).
|
||||||
self.load_sample(testutil.parse_test_sample(self.sample1))
|
self.load_sample(testutil.parse_test_sample(self.sample1))
|
||||||
with self.assertRaisesRegexp(AttributeError, r"'BAD'"):
|
with self.assertRaisesRegex(AttributeError, r"'BAD'"):
|
||||||
self.add_column('Address', 'bad', isFormula=False, type="BAD")
|
self.add_column('Address', 'bad', isFormula=False, type="BAD")
|
||||||
self.engine.assert_schema_consistent()
|
self.engine.assert_schema_consistent()
|
||||||
|
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
Tests that formula error messages (traceback) are correct
|
Tests that formula error messages (traceback) are correct
|
||||||
"""
|
"""
|
||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import depend
|
import depend
|
||||||
import test_engine
|
import test_engine
|
||||||
import testutil
|
import testutil
|
||||||
@ -48,14 +51,19 @@ else:
|
|||||||
self.assertIsInstance(exc.error, type_)
|
self.assertIsInstance(exc.error, type_)
|
||||||
self.assertEqual(str(exc.error), message)
|
self.assertEqual(str(exc.error), message)
|
||||||
if tracebackRegexp:
|
if tracebackRegexp:
|
||||||
self.assertRegexpMatches(exc.details, tracebackRegexp)
|
self.assertRegex(exc.details, tracebackRegexp)
|
||||||
|
|
||||||
def test_formula_errors(self):
|
def test_formula_errors(self):
|
||||||
self.load_sample(self.sample)
|
self.load_sample(self.sample)
|
||||||
|
|
||||||
|
if six.PY2:
|
||||||
self.assertFormulaError(self.engine.get_formula_error('Math', 'excel_formula', 3),
|
self.assertFormulaError(self.engine.get_formula_error('Math', 'excel_formula', 3),
|
||||||
TypeError, 'SQRT() takes exactly 1 argument (2 given)',
|
TypeError, 'SQRT() takes exactly 1 argument (2 given)',
|
||||||
r"TypeError: SQRT\(\) takes exactly 1 argument \(2 given\)")
|
r"TypeError: SQRT\(\) takes exactly 1 argument \(2 given\)")
|
||||||
|
else:
|
||||||
|
self.assertFormulaError(self.engine.get_formula_error('Math', 'excel_formula', 3),
|
||||||
|
TypeError, 'SQRT() takes 1 positional argument but 2 were given',
|
||||||
|
r"TypeError: SQRT\(\) takes 1 positional argument but 2 were given")
|
||||||
|
|
||||||
self.assertFormulaError(self.engine.get_formula_error('Math', 'built_in_formula', 3),
|
self.assertFormulaError(self.engine.get_formula_error('Math', 'built_in_formula', 3),
|
||||||
TypeError, "'int' object is not iterable")
|
TypeError, "'int' object is not iterable")
|
||||||
|
@ -2,6 +2,8 @@ import unittest
|
|||||||
import difflib
|
import difflib
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
import gencode
|
import gencode
|
||||||
import identifiers
|
import identifiers
|
||||||
import records
|
import records
|
||||||
|
@ -36,10 +36,10 @@ class TestGpath(unittest.TestCase):
|
|||||||
self.assertEqual(self.obj["hello"], "blah")
|
self.assertEqual(self.obj["hello"], "blah")
|
||||||
|
|
||||||
def test_set_strict(self):
|
def test_set_strict(self):
|
||||||
with self.assertRaisesRegexp(Exception, r"non-existent"):
|
with self.assertRaisesRegex(Exception, r"non-existent"):
|
||||||
gpath.place(self.obj, ["bar", 4], 17)
|
gpath.place(self.obj, ["bar", 4], 17)
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r"not a plain object"):
|
with self.assertRaisesRegex(Exception, r"not a plain object"):
|
||||||
gpath.place(self.obj, ["foo", 0], 17)
|
gpath.place(self.obj, ["foo", 0], 17)
|
||||||
|
|
||||||
|
|
||||||
@ -54,13 +54,13 @@ class TestGpath(unittest.TestCase):
|
|||||||
["asdf", {"bar": 1}, {"bar": 2}, "hello", {"baz": 3}, "world"])
|
["asdf", {"bar": 1}, {"bar": 2}, "hello", {"baz": 3}, "world"])
|
||||||
|
|
||||||
def test_insert_strict(self):
|
def test_insert_strict(self):
|
||||||
with self.assertRaisesRegexp(Exception, r'not an array'):
|
with self.assertRaisesRegex(Exception, r'not an array'):
|
||||||
gpath.insert(self.obj, ["foo"], "asdf")
|
gpath.insert(self.obj, ["foo"], "asdf")
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.insert(self.obj, ["foo", -1], 17)
|
gpath.insert(self.obj, ["foo", -1], 17)
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.insert(self.obj, ["foo", "foo"], 17)
|
gpath.insert(self.obj, ["foo", "foo"], 17)
|
||||||
|
|
||||||
def test_update(self):
|
def test_update(self):
|
||||||
@ -75,13 +75,13 @@ class TestGpath(unittest.TestCase):
|
|||||||
|
|
||||||
def test_update_strict(self):
|
def test_update_strict(self):
|
||||||
"""update should be strict"""
|
"""update should be strict"""
|
||||||
with self.assertRaisesRegexp(Exception, r'non-existent'):
|
with self.assertRaisesRegex(Exception, r'non-existent'):
|
||||||
gpath.update(self.obj, ["bar", 4], 17)
|
gpath.update(self.obj, ["bar", 4], 17)
|
||||||
with self.assertRaisesRegexp(Exception, r'not an array'):
|
with self.assertRaisesRegex(Exception, r'not an array'):
|
||||||
gpath.update(self.obj, ["foo"], 17)
|
gpath.update(self.obj, ["foo"], 17)
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.update(self.obj, ["foo", -1], 17)
|
gpath.update(self.obj, ["foo", -1], 17)
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.update(self.obj, ["foo", None], 17)
|
gpath.update(self.obj, ["foo", None], 17)
|
||||||
|
|
||||||
def test_remove(self):
|
def test_remove(self):
|
||||||
@ -96,13 +96,13 @@ class TestGpath(unittest.TestCase):
|
|||||||
|
|
||||||
def test_remove_strict(self):
|
def test_remove_strict(self):
|
||||||
"""remove should be strict"""
|
"""remove should be strict"""
|
||||||
with self.assertRaisesRegexp(Exception, r'non-existent'):
|
with self.assertRaisesRegex(Exception, r'non-existent'):
|
||||||
gpath.remove(self.obj, ["bar", 4])
|
gpath.remove(self.obj, ["bar", 4])
|
||||||
with self.assertRaisesRegexp(Exception, r'not an array'):
|
with self.assertRaisesRegex(Exception, r'not an array'):
|
||||||
gpath.remove(self.obj, ["foo"])
|
gpath.remove(self.obj, ["foo"])
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.remove(self.obj, ["foo", -1])
|
gpath.remove(self.obj, ["foo", -1])
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid.*index'):
|
with self.assertRaisesRegex(Exception, r'invalid.*index'):
|
||||||
gpath.remove(self.obj, ["foo", None])
|
gpath.remove(self.obj, ["foo", None])
|
||||||
|
|
||||||
def test_glob(self):
|
def test_glob(self):
|
||||||
@ -112,7 +112,7 @@ class TestGpath(unittest.TestCase):
|
|||||||
self.assertEqual(gpath.place(self.obj, ["foo", "*", "bar"], 17), 3)
|
self.assertEqual(gpath.place(self.obj, ["foo", "*", "bar"], 17), 3)
|
||||||
self.assertEqual(self.obj["foo"], [{"bar": 17}, {"bar": 17}, {"baz": 3, "bar": 17}])
|
self.assertEqual(self.obj["foo"], [{"bar": 17}, {"bar": 17}, {"baz": 3, "bar": 17}])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r'non-existent object at \/foo\/\*\/bad'):
|
with self.assertRaisesRegex(Exception, r'non-existent object at \/foo\/\*\/bad'):
|
||||||
gpath.place(self.obj, ["foo", "*", "bad", "test"], 10)
|
gpath.place(self.obj, ["foo", "*", "bad", "test"], 10)
|
||||||
|
|
||||||
self.assertEqual(gpath.update(self.obj, ["foo", "*"], "hello"), 3)
|
self.assertEqual(gpath.update(self.obj, ["foo", "*"], "hello"), 3)
|
||||||
@ -120,9 +120,9 @@ class TestGpath(unittest.TestCase):
|
|||||||
|
|
||||||
def test_glob_strict_wildcard(self):
|
def test_glob_strict_wildcard(self):
|
||||||
"""should only support tail wildcard for updates"""
|
"""should only support tail wildcard for updates"""
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid array index'):
|
with self.assertRaisesRegex(Exception, r'invalid array index'):
|
||||||
gpath.remove(self.obj, ["foo", "*"])
|
gpath.remove(self.obj, ["foo", "*"])
|
||||||
with self.assertRaisesRegexp(Exception, r'invalid array index'):
|
with self.assertRaisesRegex(Exception, r'invalid array index'):
|
||||||
gpath.insert(self.obj, ["foo", "*"], 1)
|
gpath.insert(self.obj, ["foo", "*"], 1)
|
||||||
|
|
||||||
def test_glob_wildcard_keys(self):
|
def test_glob_wildcard_keys(self):
|
||||||
@ -132,7 +132,7 @@ class TestGpath(unittest.TestCase):
|
|||||||
self.assertEqual(gpath.place(self.obj, ["foo", 0, "*"], 17), 1)
|
self.assertEqual(gpath.place(self.obj, ["foo", 0, "*"], 17), 1)
|
||||||
self.assertEqual(self.obj["foo"], [{"bar": 1, '*': 17}, {"bar": 2}, {"baz": 3}])
|
self.assertEqual(self.obj["foo"], [{"bar": 1, '*': 17}, {"bar": 2}, {"baz": 3}])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r'non-existent'):
|
with self.assertRaisesRegex(Exception, r'non-existent'):
|
||||||
gpath.place(self.obj, ["*", 0, "bar"], 17)
|
gpath.place(self.obj, ["*", 0, "bar"], 17)
|
||||||
|
|
||||||
def test_glob_nested(self):
|
def test_glob_nested(self):
|
||||||
|
@ -13,7 +13,7 @@ class TestImportActions(test_engine.EngineTestCase):
|
|||||||
{'id': 'Zip', 'type': 'Int'}]])
|
{'id': 'Zip', 'type': 'Int'}]])
|
||||||
self.apply_user_action(['BulkAddRecord', 'Source', [1, 2], {'Name': ['John', 'Alison'],
|
self.apply_user_action(['BulkAddRecord', 'Source', [1, 2], {'Name': ['John', 'Alison'],
|
||||||
'City': ['New York', 'Boston'],
|
'City': ['New York', 'Boston'],
|
||||||
'Zip': [03011, 07003]}])
|
'Zip': [3011, 7003]}])
|
||||||
self.assertTableData('_grist_Tables_column', cols="subset", data=[
|
self.assertTableData('_grist_Tables_column', cols="subset", data=[
|
||||||
["id", "colId", "type", "isFormula", "formula"],
|
["id", "colId", "type", "isFormula", "formula"],
|
||||||
[1, "manualSort", "ManualSortPos", False, ""],
|
[1, "manualSort", "ManualSortPos", False, ""],
|
||||||
@ -79,8 +79,8 @@ class TestImportActions(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
self.assertTableData('Source', cols="all", data=[
|
self.assertTableData('Source', cols="all", data=[
|
||||||
["id", "Name", "City", "Zip", "gristHelper_Import_Name", "gristHelper_Import_City", "manualSort"],
|
["id", "Name", "City", "Zip", "gristHelper_Import_Name", "gristHelper_Import_City", "manualSort"],
|
||||||
[1, "John", "New York", 03011, "John", "New York", 1.0],
|
[1, "John", "New York", 3011, "John", "New York", 1.0],
|
||||||
[2, "Alison", "Boston", 07003, "Alison", "Boston", 2.0],
|
[2, "Alison", "Boston", 7003, "Alison", "Boston", 2.0],
|
||||||
])
|
])
|
||||||
|
|
||||||
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
||||||
@ -107,8 +107,8 @@ class TestImportActions(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
self.assertTableData('Source', cols="all", data=[
|
self.assertTableData('Source', cols="all", data=[
|
||||||
["id", "Name", "City", "Zip", "gristHelper_Import_State", "manualSort"],
|
["id", "Name", "City", "Zip", "gristHelper_Import_State", "manualSort"],
|
||||||
[1, "John", "New York", 03011, "", 1.0],
|
[1, "John", "New York", 3011, "", 1.0],
|
||||||
[2, "Alison", "Boston", 07003, "", 2.0],
|
[2, "Alison", "Boston", 7003, "", 2.0],
|
||||||
])
|
])
|
||||||
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
||||||
[1, 1, [1, 2, 3]],
|
[1, 1, [1, 2, 3]],
|
||||||
@ -168,8 +168,8 @@ class TestImportActions(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
self.assertTableData('Source', cols="all", data=[
|
self.assertTableData('Source', cols="all", data=[
|
||||||
["id", "Name", "City", "Zip", "gristHelper_Import_Name", "gristHelper_Import_City", "gristHelper_Import_Zip", "manualSort"],
|
["id", "Name", "City", "Zip", "gristHelper_Import_Name", "gristHelper_Import_City", "gristHelper_Import_Zip", "manualSort"],
|
||||||
[1, "John", "New York", 03011, "John", "New York", 03011, 1.0],
|
[1, "John", "New York", 3011, "John", "New York", 3011, 1.0],
|
||||||
[2, "Alison", "Boston", 07003, "Alison", "Boston", 07003, 2.0],
|
[2, "Alison", "Boston", 7003, "Alison", "Boston", 7003, 2.0],
|
||||||
])
|
])
|
||||||
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
self.assertPartialData("_grist_Views_section", ["id", "tableRef", 'fields'], [
|
||||||
[1, 1, [1, 2, 3]],
|
[1, 1, [1, 2, 3]],
|
||||||
|
@ -3,6 +3,10 @@ import string
|
|||||||
import timeit
|
import timeit
|
||||||
import unittest
|
import unittest
|
||||||
from collections import Hashable
|
from collections import Hashable
|
||||||
|
|
||||||
|
import six
|
||||||
|
from six.moves import xrange
|
||||||
|
|
||||||
import match_counter
|
import match_counter
|
||||||
from testutil import repeat_until_passes
|
from testutil import repeat_until_passes
|
||||||
|
|
||||||
@ -22,7 +26,7 @@ class MatchCounterOther(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
matches = 0
|
matches = 0
|
||||||
for v, n in self.sample_counts.iteritems():
|
for v, n in six.iteritems(self.sample_counts):
|
||||||
if n > 0:
|
if n > 0:
|
||||||
matches += 1
|
matches += 1
|
||||||
self.sample_counts[v] = 0
|
self.sample_counts[v] = 0
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import schema
|
import schema
|
||||||
import table_data_set
|
import table_data_set
|
||||||
@ -23,7 +25,7 @@ class TestMigrations(unittest.TestCase):
|
|||||||
|
|
||||||
# Figure out the missing actions.
|
# Figure out the missing actions.
|
||||||
doc_actions = []
|
doc_actions = []
|
||||||
for table_id in sorted(current_schema.viewkeys() | migrated_schema.viewkeys()):
|
for table_id in sorted(six.viewkeys(current_schema) | six.viewkeys(migrated_schema)):
|
||||||
if table_id not in migrated_schema:
|
if table_id not in migrated_schema:
|
||||||
doc_actions.append(actions.AddTable(table_id, current_schema[table_id].values()))
|
doc_actions.append(actions.AddTable(table_id, current_schema[table_id].values()))
|
||||||
elif table_id not in current_schema:
|
elif table_id not in current_schema:
|
||||||
@ -31,7 +33,7 @@ class TestMigrations(unittest.TestCase):
|
|||||||
else:
|
else:
|
||||||
current_cols = current_schema[table_id]
|
current_cols = current_schema[table_id]
|
||||||
migrated_cols = migrated_schema[table_id]
|
migrated_cols = migrated_schema[table_id]
|
||||||
for col_id in sorted(current_cols.viewkeys() | migrated_cols.viewkeys()):
|
for col_id in sorted(six.viewkeys(current_cols) | six.viewkeys(migrated_cols)):
|
||||||
if col_id not in migrated_cols:
|
if col_id not in migrated_cols:
|
||||||
doc_actions.append(actions.AddColumn(table_id, col_id, current_cols[col_id]))
|
doc_actions.append(actions.AddColumn(table_id, col_id, current_cols[col_id]))
|
||||||
elif col_id not in current_cols:
|
elif col_id not in current_cols:
|
||||||
@ -39,7 +41,7 @@ class TestMigrations(unittest.TestCase):
|
|||||||
else:
|
else:
|
||||||
current_info = current_cols[col_id]
|
current_info = current_cols[col_id]
|
||||||
migrated_info = migrated_cols[col_id]
|
migrated_info = migrated_cols[col_id]
|
||||||
delta = {k: v for k, v in current_info.iteritems() if v != migrated_info.get(k)}
|
delta = {k: v for k, v in six.iteritems(current_info) if v != migrated_info.get(k)}
|
||||||
if delta:
|
if delta:
|
||||||
doc_actions.append(actions.ModifyColumn(table_id, col_id, delta))
|
doc_actions.append(actions.ModifyColumn(table_id, col_id, delta))
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ class TestMoment(unittest.TestCase):
|
|||||||
[datetime(1979, 10, 28, 6, 0, 0), 309938400000, "EST", 300, 1, 0],
|
[datetime(1979, 10, 28, 6, 0, 0), 309938400000, "EST", 300, 1, 0],
|
||||||
# - 2037 -
|
# - 2037 -
|
||||||
[datetime(2037, 3, 8, 6, 59, 59), 2120108399000, "EST", 300, 1, 59],
|
[datetime(2037, 3, 8, 6, 59, 59), 2120108399000, "EST", 300, 1, 59],
|
||||||
[datetime(2037, 03, 8, 7, 0, 0), 2120108400000, "EDT", 240, 3, 0],
|
[datetime(2037, 3, 8, 7, 0, 0), 2120108400000, "EDT", 240, 3, 0],
|
||||||
[datetime(2037, 11, 1, 5, 59, 59), 2140667999000, "EDT", 240, 1, 59]
|
[datetime(2037, 11, 1, 5, 59, 59), 2140667999000, "EDT", 240, 1, 59]
|
||||||
]
|
]
|
||||||
new_york_errors = [
|
new_york_errors = [
|
||||||
@ -255,7 +255,7 @@ class TestMoment(unittest.TestCase):
|
|||||||
|
|
||||||
def test_dt_to_ds(self):
|
def test_dt_to_ds(self):
|
||||||
# Verify that dt_to_ts works for both naive and aware datetime objects.
|
# Verify that dt_to_ts works for both naive and aware datetime objects.
|
||||||
value_dt = datetime(2015, 03, 14, 0, 0) # In UTC
|
value_dt = datetime(2015, 3, 14, 0, 0) # In UTC
|
||||||
value_sec = 1426291200
|
value_sec = 1426291200
|
||||||
tzla = moment.get_zone('America/Los_Angeles')
|
tzla = moment.get_zone('America/Los_Angeles')
|
||||||
def format_utc(ts):
|
def format_utc(ts):
|
||||||
@ -287,7 +287,7 @@ class TestMoment(unittest.TestCase):
|
|||||||
self.assertEqual(value_dt_aware.strftime(fmt), '2015-02-13 20:00:00 EST')
|
self.assertEqual(value_dt_aware.strftime(fmt), '2015-02-13 20:00:00 EST')
|
||||||
|
|
||||||
def test_date_to_ts(self):
|
def test_date_to_ts(self):
|
||||||
d = date(2015, 03, 14)
|
d = date(2015, 3, 14)
|
||||||
tzla = moment.get_zone('America/Los_Angeles')
|
tzla = moment.get_zone('America/Los_Angeles')
|
||||||
def format_utc(ts):
|
def format_utc(ts):
|
||||||
return moment.ts_to_dt(ts, moment.get_zone('UTC')).strftime(fmt)
|
return moment.ts_to_dt(ts, moment.get_zone('UTC')).strftime(fmt)
|
||||||
|
@ -7,6 +7,7 @@ import objtypes
|
|||||||
import testsamples
|
import testsamples
|
||||||
import testutil
|
import testutil
|
||||||
import test_engine
|
import test_engine
|
||||||
|
from objtypes import RecordStub
|
||||||
|
|
||||||
log = logger.Logger(__name__, logger.INFO)
|
log = logger.Logger(__name__, logger.INFO)
|
||||||
|
|
||||||
@ -18,84 +19,84 @@ class TestRecordFunc(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
def test_record_self(self):
|
def test_record_self(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Schools", "Foo", formula='repr(RECORD(rec))')
|
self.add_column("Schools", "Foo", formula='RECORD(rec)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': Address[11], 'id': 1, 'name': 'Columbia'}"],
|
[1, {'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'}],
|
||||||
[2, "{'address': Address[12], 'id': 2, 'name': 'Columbia'}"],
|
[2, {'address': RecordStub('Address', 12), 'id': 2, 'name': 'Columbia'}],
|
||||||
[3, "{'address': Address[13], 'id': 3, 'name': 'Yale'}"],
|
[3, {'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'}],
|
||||||
[4, "{'address': Address[14], 'id': 4, 'name': 'Yale'}"],
|
[4, {'address': RecordStub('Address', 14), 'id': 4, 'name': 'Yale'}],
|
||||||
])
|
])
|
||||||
|
|
||||||
# A change to data is reflected
|
# A change to data is reflected
|
||||||
self.update_record("Schools", 3, name="UConn")
|
self.update_record("Schools", 3, name="UConn")
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': Address[11], 'id': 1, 'name': 'Columbia'}"],
|
[1, {'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'}],
|
||||||
[2, "{'address': Address[12], 'id': 2, 'name': 'Columbia'}"],
|
[2, {'address': RecordStub('Address', 12), 'id': 2, 'name': 'Columbia'}],
|
||||||
[3, "{'address': Address[13], 'id': 3, 'name': 'UConn'}"],
|
[3, {'address': RecordStub('Address', 13), 'id': 3, 'name': 'UConn'}],
|
||||||
[4, "{'address': Address[14], 'id': 4, 'name': 'Yale'}"],
|
[4, {'address': RecordStub('Address', 14), 'id': 4, 'name': 'Yale'}],
|
||||||
])
|
])
|
||||||
|
|
||||||
# A column addition is reflected
|
# A column addition is reflected
|
||||||
self.add_column("Schools", "Bar", formula='len($name)')
|
self.add_column("Schools", "Bar", formula='len($name)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': Address[11], 'Bar': 8, 'id': 1, 'name': 'Columbia'}"],
|
[1, {'address': RecordStub('Address', 11), 'Bar': 8, 'id': 1, 'name': 'Columbia'}],
|
||||||
[2, "{'address': Address[12], 'Bar': 8, 'id': 2, 'name': 'Columbia'}"],
|
[2, {'address': RecordStub('Address', 12), 'Bar': 8, 'id': 2, 'name': 'Columbia'}],
|
||||||
[3, "{'address': Address[13], 'Bar': 5, 'id': 3, 'name': 'UConn'}"],
|
[3, {'address': RecordStub('Address', 13), 'Bar': 5, 'id': 3, 'name': 'UConn'}],
|
||||||
[4, "{'address': Address[14], 'Bar': 4, 'id': 4, 'name': 'Yale'}"],
|
[4, {'address': RecordStub('Address', 14), 'Bar': 4, 'id': 4, 'name': 'Yale'}],
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_reference(self):
|
def test_reference(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Schools", "Foo", formula='repr(RECORD($address))')
|
self.add_column("Schools", "Foo", formula='RECORD($address)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'city': 'New York', 'id': 11}"],
|
[1, {'city': 'New York', 'id': 11}],
|
||||||
[2, "{'city': 'Colombia', 'id': 12}"],
|
[2, {'city': 'Colombia', 'id': 12}],
|
||||||
[3, "{'city': 'New Haven', 'id': 13}"],
|
[3, {'city': 'New Haven', 'id': 13}],
|
||||||
[4, "{'city': 'West Haven', 'id': 14}"],
|
[4, {'city': 'West Haven', 'id': 14}],
|
||||||
])
|
])
|
||||||
|
|
||||||
# A change to referenced data is still reflected; try a different kind of change here
|
# A change to referenced data is still reflected; try a different kind of change here
|
||||||
self.apply_user_action(["RenameColumn", "Address", "city", "ciudad"])
|
self.apply_user_action(["RenameColumn", "Address", "city", "ciudad"])
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'ciudad': 'New York', 'id': 11}"],
|
[1, {'ciudad': 'New York', 'id': 11}],
|
||||||
[2, "{'ciudad': 'Colombia', 'id': 12}"],
|
[2, {'ciudad': 'Colombia', 'id': 12}],
|
||||||
[3, "{'ciudad': 'New Haven', 'id': 13}"],
|
[3, {'ciudad': 'New Haven', 'id': 13}],
|
||||||
[4, "{'ciudad': 'West Haven', 'id': 14}"],
|
[4, {'ciudad': 'West Haven', 'id': 14}],
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_record_expand_refs(self):
|
def test_record_expand_refs(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Schools", "Foo", formula='repr(RECORD(rec, expand_refs=1))')
|
self.add_column("Schools", "Foo", formula='RECORD(rec, expand_refs=1)')
|
||||||
self.add_column("Address", "student", type="Ref:Students")
|
self.add_column("Address", "student", type="Ref:Students")
|
||||||
self.update_record("Address", 12, student=6)
|
self.update_record("Address", 12, student=6)
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': {'city': 'New York', 'id': 11, 'student': Students[0]}," +
|
[1, {'address': {'city': 'New York', 'id': 11, 'student': RecordStub("Students", 0)},
|
||||||
" 'id': 1, 'name': 'Columbia'}"],
|
'id': 1, 'name': 'Columbia'}],
|
||||||
[2, "{'address': {'city': 'Colombia', 'id': 12, 'student': Students[6]}," +
|
[2, {'address': {'city': 'Colombia', 'id': 12, 'student': RecordStub("Students", 6)},
|
||||||
" 'id': 2, 'name': 'Columbia'}"],
|
'id': 2, 'name': 'Columbia'}],
|
||||||
[3, "{'address': {'city': 'New Haven', 'id': 13, 'student': Students[0]}," +
|
[3, {'address': {'city': 'New Haven', 'id': 13, 'student': RecordStub("Students", 0)},
|
||||||
" 'id': 3, 'name': 'Yale'}"],
|
'id': 3, 'name': 'Yale'}],
|
||||||
[4, "{'address': {'city': 'West Haven', 'id': 14, 'student': Students[0]}," +
|
[4, {'address': {'city': 'West Haven', 'id': 14, 'student': RecordStub("Students", 0)},
|
||||||
" 'id': 4, 'name': 'Yale'}"],
|
'id': 4, 'name': 'Yale'}],
|
||||||
])
|
])
|
||||||
|
|
||||||
self.modify_column("Schools", "Foo", formula='repr(RECORD(rec, expand_refs=2))')
|
self.modify_column("Schools", "Foo", formula='RECORD(rec, expand_refs=2)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': {'city': 'New York', 'id': 11, 'student': None}," +
|
[1, {'address': {'city': 'New York', 'id': 11, 'student': None},
|
||||||
" 'id': 1, 'name': 'Columbia'}"],
|
'id': 1, 'name': 'Columbia'}],
|
||||||
[2, "{'address': {'city': 'Colombia', 'id': 12, " +
|
[2, {'address': {'city': 'Colombia', 'id': 12,
|
||||||
"'student': {'firstName': 'Gerald', 'schoolName': 'Yale', 'lastName': 'Ford', " +
|
'student': {'firstName': 'Gerald', 'schoolName': 'Yale', 'lastName': 'Ford',
|
||||||
"'schoolCities': 'New Haven:West Haven', 'schoolIds': '3:4', 'id': 6}}," +
|
'schoolCities': 'New Haven:West Haven', 'schoolIds': '3:4', 'id': 6}},
|
||||||
" 'id': 2, 'name': 'Columbia'}"],
|
'id': 2, 'name': 'Columbia'}],
|
||||||
[3, "{'address': {'city': 'New Haven', 'id': 13, 'student': None}," +
|
[3, {'address': {'city': 'New Haven', 'id': 13, 'student': None},
|
||||||
" 'id': 3, 'name': 'Yale'}"],
|
'id': 3, 'name': 'Yale'}],
|
||||||
[4, "{'address': {'city': 'West Haven', 'id': 14, 'student': None}," +
|
[4, {'address': {'city': 'West Haven', 'id': 14, 'student': None},
|
||||||
" 'id': 4, 'name': 'Yale'}"],
|
'id': 4, 'name': 'Yale'}],
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_record_date_options(self):
|
def test_record_date_options(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Schools", "Foo", formula='repr(RECORD(rec, expand_refs=1))')
|
self.add_column("Schools", "Foo", formula='RECORD(rec, expand_refs=1)')
|
||||||
self.add_column("Address", "DT", type='DateTime')
|
self.add_column("Address", "DT", type='DateTime')
|
||||||
self.add_column("Address", "D", type='Date', formula="$DT and $DT.date()")
|
self.add_column("Address", "D", type='Date', formula="$DT and $DT.date()")
|
||||||
self.update_records("Address", ['id', 'DT'], [
|
self.update_records("Address", ['id', 'DT'], [
|
||||||
@ -106,61 +107,57 @@ class TestRecordFunc(test_engine.EngineTestCase):
|
|||||||
d1 = datetime.datetime(2020, 9, 13, 8, 26, 40, tzinfo=moment.tzinfo('America/New_York'))
|
d1 = datetime.datetime(2020, 9, 13, 8, 26, 40, tzinfo=moment.tzinfo('America/New_York'))
|
||||||
d2 = datetime.datetime(2017, 7, 13, 22, 40, tzinfo=moment.tzinfo('America/New_York'))
|
d2 = datetime.datetime(2017, 7, 13, 22, 40, tzinfo=moment.tzinfo('America/New_York'))
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': {'city': 'New York', 'DT': %s, 'id': 11, 'D': %s}, " %
|
[1, {'address': {'city': 'New York', 'DT': d1, 'id': 11, 'D': d1.date()},
|
||||||
(repr(d1), repr(d1.date())) +
|
'id': 1, 'name': 'Columbia'}],
|
||||||
"'id': 1, 'name': 'Columbia'}"],
|
[2, {'address': {'city': 'Colombia', 'DT': None, 'id': 12, 'D': None},
|
||||||
[2, "{'address': {'city': 'Colombia', 'DT': None, 'id': 12, 'D': None}, " +
|
'id': 2, 'name': 'Columbia'}],
|
||||||
"'id': 2, 'name': 'Columbia'}"],
|
[3, {'address': {'city': 'New Haven', 'DT': d2, 'id': 13, 'D': d2.date()},
|
||||||
[3, "{'address': {'city': 'New Haven', 'DT': %s, 'id': 13, 'D': %s}, " %
|
'id': 3, 'name': 'Yale'}],
|
||||||
(repr(d2), repr(d2.date())) +
|
[4, {'address': {'city': 'West Haven', 'DT': None, 'id': 14, 'D': None},
|
||||||
"'id': 3, 'name': 'Yale'}"],
|
'id': 4, 'name': 'Yale'}],
|
||||||
[4, "{'address': {'city': 'West Haven', 'DT': None, 'id': 14, 'D': None}, " +
|
|
||||||
"'id': 4, 'name': 'Yale'}"],
|
|
||||||
])
|
])
|
||||||
|
|
||||||
self.modify_column("Schools", "Foo",
|
self.modify_column("Schools", "Foo",
|
||||||
formula='repr(RECORD(rec, expand_refs=1, dates_as_iso=True))')
|
formula='RECORD(rec, expand_refs=1, dates_as_iso=True)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'address': {'city': 'New York', 'DT': '%s', 'id': 11, 'D': '%s'}, " %
|
[1, {'address': {'city': 'New York', 'DT': d1.isoformat(), 'id': 11, 'D': d1.date().isoformat()},
|
||||||
(d1.isoformat(), d1.date().isoformat()) +
|
'id': 1, 'name': 'Columbia'}],
|
||||||
"'id': 1, 'name': 'Columbia'}"],
|
[2, {'address': {'city': 'Colombia', 'DT': None, 'id': 12, 'D': None},
|
||||||
[2, "{'address': {'city': 'Colombia', 'DT': None, 'id': 12, 'D': None}, " +
|
'id': 2, 'name': 'Columbia'}],
|
||||||
"'id': 2, 'name': 'Columbia'}"],
|
[3, {'address': {'city': 'New Haven', 'DT': d2.isoformat(), 'id': 13, 'D': d2.date().isoformat()},
|
||||||
[3, "{'address': {'city': 'New Haven', 'DT': '%s', 'id': 13, 'D': '%s'}, " %
|
'id': 3, 'name': 'Yale'}],
|
||||||
(d2.isoformat(), d2.date().isoformat()) +
|
[4, {'address': {'city': 'West Haven', 'DT': None, 'id': 14, 'D': None},
|
||||||
"'id': 3, 'name': 'Yale'}"],
|
'id': 4, 'name': 'Yale'}],
|
||||||
[4, "{'address': {'city': 'West Haven', 'DT': None, 'id': 14, 'D': None}, " +
|
|
||||||
"'id': 4, 'name': 'Yale'}"],
|
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_record_set(self):
|
def test_record_set(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Students", "schools", formula='Schools.lookupRecords(name=$schoolName)')
|
self.add_column("Students", "schools", formula='Schools.lookupRecords(name=$schoolName)')
|
||||||
self.add_column("Students", "Foo", formula='repr(RECORD($schools))')
|
self.add_column("Students", "Foo", formula='RECORD($schools)')
|
||||||
self.assertPartialData("Students", ["id", "Foo"], [
|
self.assertPartialData("Students", ["id", "Foo"], [
|
||||||
[1, "[{'address': Address[11], 'id': 1, 'name': 'Columbia'}," +
|
[1, [{'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'},
|
||||||
" {'address': Address[12], 'id': 2, 'name': 'Columbia'}]"],
|
{'address': RecordStub('Address', 12), 'id': 2, 'name': 'Columbia'}]],
|
||||||
[2, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}," +
|
[2, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'},
|
||||||
" {'address': Address[14], 'id': 4, 'name': 'Yale'}]"],
|
{'address': RecordStub('Address', 14), 'id': 4, 'name': 'Yale'}]],
|
||||||
[3, "[{'address': Address[11], 'id': 1, 'name': 'Columbia'}," +
|
[3, [{'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'},
|
||||||
" {'address': Address[12], 'id': 2, 'name': 'Columbia'}]"],
|
{'address': RecordStub('Address', 12), 'id': 2, 'name': 'Columbia'}]],
|
||||||
[4, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}," +
|
[4, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'},
|
||||||
" {'address': Address[14], 'id': 4, 'name': 'Yale'}]"],
|
{'address': RecordStub('Address', 14), 'id': 4, 'name': 'Yale'}]],
|
||||||
[5, "[]"],
|
[5, []],
|
||||||
[6, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}," +
|
[6, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'},
|
||||||
" {'address': Address[14], 'id': 4, 'name': 'Yale'}]"],
|
{'address': RecordStub('Address', 14), 'id': 4, 'name': 'Yale'}]],
|
||||||
])
|
])
|
||||||
|
|
||||||
# Try a field with filtered lookupRecords result, as an iterable.
|
# Try a field with filtered lookupRecords result, as an iterable.
|
||||||
self.modify_column("Students", "Foo",
|
self.modify_column("Students", "Foo",
|
||||||
formula='repr(RECORD(s for s in $schools if s.address.city.startswith("New")))')
|
formula='RECORD(s for s in $schools if s.address.city.startswith("New"))')
|
||||||
self.assertPartialData("Students", ["id", "Foo"], [
|
self.assertPartialData("Students", ["id", "Foo"], [
|
||||||
[1, "[{'address': Address[11], 'id': 1, 'name': 'Columbia'}]"],
|
[1, [{'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'}]],
|
||||||
[2, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}]"],
|
[2, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'}]],
|
||||||
[3, "[{'address': Address[11], 'id': 1, 'name': 'Columbia'}]"],
|
[3, [{'address': RecordStub('Address', 11), 'id': 1, 'name': 'Columbia'}]],
|
||||||
[4, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}]"],
|
[4, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'}]],
|
||||||
[5, "[]"],
|
[5, []],
|
||||||
[6, "[{'address': Address[13], 'id': 3, 'name': 'Yale'}]"],
|
[6, [{'address': RecordStub('Address', 13), 'id': 3, 'name': 'Yale'}]],
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_record_bad_calls(self):
|
def test_record_bad_calls(self):
|
||||||
@ -172,25 +169,25 @@ class TestRecordFunc(test_engine.EngineTestCase):
|
|||||||
[3, objtypes.RaisedException(ValueError())],
|
[3, objtypes.RaisedException(ValueError())],
|
||||||
[4, objtypes.RaisedException(ValueError())],
|
[4, objtypes.RaisedException(ValueError())],
|
||||||
])
|
])
|
||||||
self.modify_column("Schools", "Foo", formula='repr(RECORD([rec] if $id == 2 else $id))')
|
self.modify_column("Schools", "Foo", formula='repr(sorted(RECORD(rec if $id == 2 else $id).items()))')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, objtypes.RaisedException(ValueError())],
|
[1, objtypes.RaisedException(ValueError())],
|
||||||
[2, "[{'address': Address[12], 'id': 2, 'name': 'Columbia'}]"],
|
[2, "[('address', Address[12]), ('id', 2), ('name', 'Columbia')]"],
|
||||||
[3, objtypes.RaisedException(ValueError())],
|
[3, objtypes.RaisedException(ValueError())],
|
||||||
[4, objtypes.RaisedException(ValueError())],
|
[4, objtypes.RaisedException(ValueError())],
|
||||||
])
|
])
|
||||||
self.assertEqual(self.engine.get_formula_error('Schools', 'Foo', 1).error.message,
|
self.assertEqual(str(self.engine.get_formula_error('Schools', 'Foo', 1).error),
|
||||||
'RECORD() requires a Record or an iterable of Records')
|
'RECORD() requires a Record or an iterable of Records')
|
||||||
|
|
||||||
def test_record_error_cells(self):
|
def test_record_error_cells(self):
|
||||||
self.load_sample(testsamples.sample_students)
|
self.load_sample(testsamples.sample_students)
|
||||||
self.add_column("Schools", "Foo", formula='repr(RECORD($address))')
|
self.add_column("Schools", "Foo", formula='RECORD($address)')
|
||||||
self.add_column("Address", "Bar", formula='$id/($id%2)')
|
self.add_column("Address", "Bar", formula='$id//($id%2)')
|
||||||
self.assertPartialData("Schools", ["id", "Foo"], [
|
self.assertPartialData("Schools", ["id", "Foo"], [
|
||||||
[1, "{'city': 'New York', 'Bar': 11, 'id': 11}"],
|
[1, {'city': 'New York', 'Bar': 11, 'id': 11}],
|
||||||
[2, "{'city': 'Colombia', 'Bar': None, 'id': 12, " +
|
[2, {'city': 'Colombia', 'Bar': None, 'id': 12,
|
||||||
"'_error_': {'Bar': 'ZeroDivisionError: integer division or modulo by zero'}}"],
|
'_error_': {'Bar': 'ZeroDivisionError: integer division or modulo by zero'}}],
|
||||||
[3, "{'city': 'New Haven', 'Bar': 13, 'id': 13}"],
|
[3, {'city': 'New Haven', 'Bar': 13, 'id': 13}],
|
||||||
[4, "{'city': 'West Haven', 'Bar': None, 'id': 14, " +
|
[4, {'city': 'West Haven', 'Bar': None, 'id': 14,
|
||||||
"'_error_': {'Bar': 'ZeroDivisionError: integer division or modulo by zero'}}"],
|
'_error_': {'Bar': 'ZeroDivisionError: integer division or modulo by zero'}}],
|
||||||
])
|
])
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import relabeling
|
import relabeling
|
||||||
|
|
||||||
from sortedcontainers import SortedListWithKey
|
from sortedcontainers import SortedListWithKey
|
||||||
from itertools import izip
|
from six.moves import zip as izip, xrange
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -264,7 +265,7 @@ class TestRelabeling(unittest.TestCase):
|
|||||||
slist.insert_items([(i, float('-inf')), (-i, float('inf'))])
|
slist.insert_items([(i, float('-inf')), (-i, float('inf'))])
|
||||||
|
|
||||||
self.assertEqual(slist.get_values(),
|
self.assertEqual(slist.get_values(),
|
||||||
rev_range(2000) + [v for v,k in initial] + range(0, -2000, -1))
|
rev_range(2000) + [v for v,k in initial] + list(xrange(0, -2000, -1)))
|
||||||
#print slist.num_update_events, slist.num_updated_keys
|
#print slist.num_update_events, slist.num_updated_keys
|
||||||
self.assertLess(slist.avg_updated_keys(), 3)
|
self.assertLess(slist.avg_updated_keys(), 3)
|
||||||
self.assertLess(slist.num_update_events, 80)
|
self.assertLess(slist.num_update_events, 80)
|
||||||
@ -276,7 +277,7 @@ class TestRelabeling(unittest.TestCase):
|
|||||||
slist.insert_items([(i, ins_item.key)])
|
slist.insert_items([(i, ins_item.key)])
|
||||||
|
|
||||||
# Check the end result
|
# Check the end result
|
||||||
self.assertEqual(slist.get_values(), ['a', 'b'] + range(1000) + ['c', 'd'])
|
self.assertEqual(slist.get_values(), ['a', 'b'] + list(xrange(1000)) + ['c', 'd'])
|
||||||
self.assertAlmostEqual(slist.avg_updated_keys(), 3.5, delta=1)
|
self.assertAlmostEqual(slist.avg_updated_keys(), 3.5, delta=1)
|
||||||
self.assertLess(slist.num_update_events, 40)
|
self.assertLess(slist.num_update_events, 40)
|
||||||
|
|
||||||
@ -299,7 +300,7 @@ class TestRelabeling(unittest.TestCase):
|
|||||||
ins_item = slist.find_value('c')
|
ins_item = slist.find_value('c')
|
||||||
for i in xrange(1000):
|
for i in xrange(1000):
|
||||||
slist.insert_items([(i, ins_item.key)], prepare_inserts=r.prepare_inserts_dumb)
|
slist.insert_items([(i, ins_item.key)], prepare_inserts=r.prepare_inserts_dumb)
|
||||||
self.assertEqual(slist.get_values(), ['a', 'b'] + range(1000) + ['c', 'd'])
|
self.assertEqual(slist.get_values(), ['a', 'b'] + list(xrange(1000)) + ['c', 'd'])
|
||||||
self.assertGreater(slist.avg_updated_keys(), 8)
|
self.assertGreater(slist.avg_updated_keys(), 8)
|
||||||
|
|
||||||
def test_renumber_right_dumb(self):
|
def test_renumber_right_dumb(self):
|
||||||
@ -334,7 +335,7 @@ class TestRelabeling(unittest.TestCase):
|
|||||||
# aL1, al0, al1, a, ar1, ar0, aR1, ...
|
# aL1, al0, al1, a, ar1, ar0, aR1, ...
|
||||||
# aL1, al0, aL2, al1, al2, a, ar2, ar1, aR2, ar0, aR1, ...
|
# aL1, al0, aL2, al1, al2, a, ar2, ar1, aR2, ar0, aR1, ...
|
||||||
def left_half(val):
|
def left_half(val):
|
||||||
half = range(2*N - 1)
|
half = list(xrange(2*N - 1))
|
||||||
half[0::2] = ['%sL%d' % (val, i) for i in xrange(1, N + 1)]
|
half[0::2] = ['%sL%d' % (val, i) for i in xrange(1, N + 1)]
|
||||||
half[1::2] = ['%sl%d' % (val, i) for i in xrange(0, N - 1)]
|
half[1::2] = ['%sl%d' % (val, i) for i in xrange(0, N - 1)]
|
||||||
half[-1] = '%sl%d' % (val, N - 1)
|
half[-1] = '%sl%d' % (val, N - 1)
|
||||||
@ -355,7 +356,7 @@ class TestRelabeling(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
def rev_range(n):
|
def rev_range(n):
|
||||||
return list(reversed(range(n)))
|
return list(reversed(list(xrange(n))))
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -31,7 +31,7 @@ class TestSummary2(test_engine.EngineTestCase):
|
|||||||
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
|
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
|
||||||
|
|
||||||
# Check that we cannot add a non-formula column.
|
# Check that we cannot add a non-formula column.
|
||||||
with self.assertRaisesRegexp(ValueError, r'non-formula column'):
|
with self.assertRaisesRegex(ValueError, r'non-formula column'):
|
||||||
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "average",
|
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "average",
|
||||||
{"type": "Text", "isFormula": False}])
|
{"type": "Text", "isFormula": False}])
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ class TestSummary2(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
# Check that we cannot rename a summary group-by column. (Perhaps it's better to raise an
|
# Check that we cannot rename a summary group-by column. (Perhaps it's better to raise an
|
||||||
# exception, but currently we translate the invalid request to a no-op.)
|
# exception, but currently we translate the invalid request to a no-op.)
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot modify .* group-by'):
|
with self.assertRaisesRegex(ValueError, r'Cannot modify .* group-by'):
|
||||||
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "s"])
|
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "s"])
|
||||||
|
|
||||||
# Verify all data. We'll repeat this after renamings to make sure there are no errors.
|
# Verify all data. We'll repeat this after renamings to make sure there are no errors.
|
||||||
@ -353,46 +353,46 @@ class TestSummary2(test_engine.EngineTestCase):
|
|||||||
])
|
])
|
||||||
|
|
||||||
# (1) no adding/removing/renaming non-formula columns.
|
# (1) no adding/removing/renaming non-formula columns.
|
||||||
with self.assertRaisesRegexp(ValueError, r'non-formula column'):
|
with self.assertRaisesRegex(ValueError, r'non-formula column'):
|
||||||
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo",
|
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo",
|
||||||
{"type": "Numeric", "isFormula": False}])
|
{"type": "Numeric", "isFormula": False}])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(ValueError, r'group-by column'):
|
with self.assertRaisesRegex(ValueError, r'group-by column'):
|
||||||
self.apply_user_action(["RemoveColumn", "GristSummary_7_Address", "state"])
|
self.apply_user_action(["RemoveColumn", "GristSummary_7_Address", "state"])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot modify .* group-by'):
|
with self.assertRaisesRegex(ValueError, r'Cannot modify .* group-by'):
|
||||||
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "st"])
|
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "st"])
|
||||||
|
|
||||||
# (2) no converting between formula/non-formula
|
# (2) no converting between formula/non-formula
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot change .* formula and data'):
|
with self.assertRaisesRegex(ValueError, r'Cannot change .* formula and data'):
|
||||||
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "amount",
|
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "amount",
|
||||||
{"isFormula": False}])
|
{"isFormula": False}])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot change .* formula and data'):
|
with self.assertRaisesRegex(ValueError, r'Cannot change .* formula and data'):
|
||||||
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "state",
|
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "state",
|
||||||
{"isFormula": True}])
|
{"isFormula": True}])
|
||||||
|
|
||||||
# (3) no editing values in non-formula columns
|
# (3) no editing values in non-formula columns
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot enter data .* group-by'):
|
with self.assertRaisesRegex(ValueError, r'Cannot enter data .* group-by'):
|
||||||
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 6, {"state": "ny"}])
|
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 6, {"state": "ny"}])
|
||||||
|
|
||||||
# (4) no removing rows (this is questionable b/c empty rows might be OK to remove)
|
# (4) no removing rows (this is questionable b/c empty rows might be OK to remove)
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot remove record .* summary'):
|
with self.assertRaisesRegex(ValueError, r'Cannot remove record .* summary'):
|
||||||
self.apply_user_action(["RemoveRecord", "GristSummary_7_Address", 6])
|
self.apply_user_action(["RemoveRecord", "GristSummary_7_Address", 6])
|
||||||
|
|
||||||
# (5) no renaming summary tables.
|
# (5) no renaming summary tables.
|
||||||
with self.assertRaisesRegexp(ValueError, r'cannot rename .* summary'):
|
with self.assertRaisesRegex(ValueError, r'cannot rename .* summary'):
|
||||||
self.apply_user_action(["RenameTable", "GristSummary_7_Address", "GristSummary_hello"])
|
self.apply_user_action(["RenameTable", "GristSummary_7_Address", "GristSummary_hello"])
|
||||||
|
|
||||||
# Check that we can add an empty column, then set a formula for it.
|
# Check that we can add an empty column, then set a formula for it.
|
||||||
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo", {}])
|
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo", {}])
|
||||||
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "foo", {"formula": "1+1"}])
|
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "foo", {"formula": "1+1"}])
|
||||||
with self.assertRaisesRegexp(ValueError, "Can't save .* to formula"):
|
with self.assertRaisesRegex(ValueError, "Can't save .* to formula"):
|
||||||
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo": "hello"}])
|
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo": "hello"}])
|
||||||
|
|
||||||
# But we cannot add an empty column, then add a value to it.
|
# But we cannot add an empty column, then add a value to it.
|
||||||
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo2", {}])
|
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo2", {}])
|
||||||
with self.assertRaisesRegexp(ValueError, r'Cannot change .* between formula and data'):
|
with self.assertRaisesRegex(ValueError, r'Cannot change .* between formula and data'):
|
||||||
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo2": "hello"}])
|
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo2": "hello"}])
|
||||||
|
|
||||||
self.assertTableData('GristSummary_7_Address', cols="all", data=[
|
self.assertTableData('GristSummary_7_Address', cols="all", data=[
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import schema
|
import schema
|
||||||
import table_data_set
|
import table_data_set
|
||||||
@ -37,7 +39,7 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
if a.table_id not in self._table_data_set.all_tables:
|
if a.table_id not in self._table_data_set.all_tables:
|
||||||
self._table_data_set.apply_doc_action(a)
|
self._table_data_set.apply_doc_action(a)
|
||||||
|
|
||||||
for a in sample["SCHEMA"].itervalues():
|
for a in six.itervalues(sample["SCHEMA"]):
|
||||||
self._table_data_set.BulkAddRecord(*a)
|
self._table_data_set.BulkAddRecord(*a)
|
||||||
|
|
||||||
# Create AddTable actions for each table described in the metadata.
|
# Create AddTable actions for each table described in the metadata.
|
||||||
@ -61,11 +63,11 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
})
|
})
|
||||||
|
|
||||||
# Sort the columns in the schema according to the parentPos field from the column records.
|
# Sort the columns in the schema according to the parentPos field from the column records.
|
||||||
for action in add_tables.itervalues():
|
for action in six.itervalues(add_tables):
|
||||||
action.columns.sort(key=lambda r: r["parentPos"])
|
action.columns.sort(key=lambda r: r["parentPos"])
|
||||||
self._table_data_set.AddTable(*action)
|
self._table_data_set.AddTable(*action)
|
||||||
|
|
||||||
for a in sample["DATA"].itervalues():
|
for a in six.itervalues(sample["DATA"]):
|
||||||
self._table_data_set.ReplaceTableData(*a)
|
self._table_data_set.ReplaceTableData(*a)
|
||||||
|
|
||||||
|
|
||||||
@ -92,11 +94,11 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
if "USE_SAMPLE" in data:
|
if "USE_SAMPLE" in data:
|
||||||
expected_data = self.samples[data.pop("USE_SAMPLE")]["DATA"].copy()
|
expected_data = self.samples[data.pop("USE_SAMPLE")]["DATA"].copy()
|
||||||
expected_data.update({t: testutil.table_data_from_rows(t, tdata[0], tdata[1:])
|
expected_data.update({t: testutil.table_data_from_rows(t, tdata[0], tdata[1:])
|
||||||
for (t, tdata) in data.iteritems()})
|
for (t, tdata) in six.iteritems(data)})
|
||||||
self._verify_data(expected_data)
|
self._verify_data(expected_data)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unrecognized step %s in test script" % step)
|
raise ValueError("Unrecognized step %s in test script" % step)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
new_args0 = "LINE %s: %s" % (line, e.args[0])
|
new_args0 = "LINE %s: %s" % (line, e.args[0])
|
||||||
e.args = (new_args0,) + e.args[1:]
|
e.args = (new_args0,) + e.args[1:]
|
||||||
raise
|
raise
|
||||||
@ -117,7 +119,7 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
|
|
||||||
def _verify_data(self, expected_data, ignore_formulas=False):
|
def _verify_data(self, expected_data, ignore_formulas=False):
|
||||||
observed_data = {t: self._prep_data(*data)
|
observed_data = {t: self._prep_data(*data)
|
||||||
for t, data in self._table_data_set.all_tables.iteritems()
|
for t, data in six.iteritems(self._table_data_set.all_tables)
|
||||||
if not t.startswith("_grist_")}
|
if not t.startswith("_grist_")}
|
||||||
if ignore_formulas:
|
if ignore_formulas:
|
||||||
observed_data = self._strip_formulas(observed_data)
|
observed_data = self._strip_formulas(observed_data)
|
||||||
@ -125,7 +127,7 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
|
|
||||||
if observed_data != expected_data:
|
if observed_data != expected_data:
|
||||||
lines = []
|
lines = []
|
||||||
for table in sorted(observed_data.viewkeys() | expected_data.viewkeys()):
|
for table in sorted(six.viewkeys(observed_data) | six.viewkeys(expected_data)):
|
||||||
if table not in expected_data:
|
if table not in expected_data:
|
||||||
lines.append("*** Table %s observed but not expected\n" % table)
|
lines.append("*** Table %s observed but not expected\n" % table)
|
||||||
elif table not in observed_data:
|
elif table not in observed_data:
|
||||||
@ -141,11 +143,11 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
self.fail("\n" + "".join(lines))
|
self.fail("\n" + "".join(lines))
|
||||||
|
|
||||||
def _strip_formulas(self, all_data):
|
def _strip_formulas(self, all_data):
|
||||||
return {t: self._strip_formulas_table(*data) for t, data in all_data.iteritems()}
|
return {t: self._strip_formulas_table(*data) for t, data in six.iteritems(all_data)}
|
||||||
|
|
||||||
def _strip_formulas_table(self, table_id, row_ids, columns):
|
def _strip_formulas_table(self, table_id, row_ids, columns):
|
||||||
return actions.TableData(table_id, row_ids, {
|
return actions.TableData(table_id, row_ids, {
|
||||||
col_id: col for col_id, col in columns.iteritems()
|
col_id: col for col_id, col in six.iteritems(columns)
|
||||||
if not self._table_data_set.get_col_info(table_id, col_id)["isFormula"]
|
if not self._table_data_set.get_col_info(table_id, col_id)["isFormula"]
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -155,7 +157,7 @@ class TestTableDataSet(unittest.TestCase):
|
|||||||
return [v for r, v in sorted(zip(row_ids, col))]
|
return [v for r, v in sorted(zip(row_ids, col))]
|
||||||
|
|
||||||
sorted_data = actions.TableData(table_id, sorted(row_ids),
|
sorted_data = actions.TableData(table_id, sorted(row_ids),
|
||||||
{c: sort(col) for c, col in columns.iteritems()})
|
{c: sort(col) for c, col in six.iteritems(columns)})
|
||||||
return actions.encode_objects(testutil.replace_nans(sorted_data))
|
return actions.encode_objects(testutil.replace_nans(sorted_data))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -9,7 +9,7 @@ class TestTextBuilder(unittest.TestCase):
|
|||||||
def test_validate_patch(self):
|
def test_validate_patch(self):
|
||||||
text = "To be or not to be"
|
text = "To be or not to be"
|
||||||
patch = make_patch(text, 3, 8, "SEE OR")
|
patch = make_patch(text, 3, 8, "SEE OR")
|
||||||
self.assertEquals(textbuilder.validate_patch(text, patch), None)
|
self.assertEqual(textbuilder.validate_patch(text, patch), None)
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
textbuilder.validate_patch('X' + text, patch)
|
textbuilder.validate_patch('X' + text, patch)
|
||||||
|
|
||||||
@ -19,15 +19,15 @@ class TestTextBuilder(unittest.TestCase):
|
|||||||
patches = make_regexp_patches(t1.get_text(), re.compile(r'be|to', re.I),
|
patches = make_regexp_patches(t1.get_text(), re.compile(r'be|to', re.I),
|
||||||
lambda m: (m.group() + m.group()).upper())
|
lambda m: (m.group() + m.group()).upper())
|
||||||
t2 = textbuilder.Replacer(t1, patches)
|
t2 = textbuilder.Replacer(t1, patches)
|
||||||
self.assertEquals(t2.get_text(), "TOTO BEBE or not\n TOTO BEBE?\n")
|
self.assertEqual(t2.get_text(), "TOTO BEBE or not\n TOTO BEBE?\n")
|
||||||
self.assertEquals(t2.map_back_patch(make_patch(t2.get_text(), 0, 4, "xxx")),
|
self.assertEqual(t2.map_back_patch(make_patch(t2.get_text(), 0, 4, "xxx")),
|
||||||
(t1.get_text(), value, Patch(0, 2, "To", "xxx")))
|
(t1.get_text(), value, Patch(0, 2, "To", "xxx")))
|
||||||
self.assertEquals(t2.map_back_patch(make_patch(t2.get_text(), 5, 9, "xxx")),
|
self.assertEqual(t2.map_back_patch(make_patch(t2.get_text(), 5, 9, "xxx")),
|
||||||
(t1.get_text(), value, Patch(3, 5, "be", "xxx")))
|
(t1.get_text(), value, Patch(3, 5, "be", "xxx")))
|
||||||
self.assertEquals(t2.map_back_patch(make_patch(t2.get_text(), 18, 23, "xxx")),
|
self.assertEqual(t2.map_back_patch(make_patch(t2.get_text(), 18, 23, "xxx")),
|
||||||
(t1.get_text(), value, Patch(14, 17, " to", "xxx")))
|
(t1.get_text(), value, Patch(14, 17, " to", "xxx")))
|
||||||
# Match the entire second line
|
# Match the entire second line
|
||||||
self.assertEquals(t2.map_back_patch(make_patch(t2.get_text(), 17, 29, "xxx")),
|
self.assertEqual(t2.map_back_patch(make_patch(t2.get_text(), 17, 29, "xxx")),
|
||||||
(t1.get_text(), value, Patch(13, 21, " to be?", "xxx")))
|
(t1.get_text(), value, Patch(13, 21, " to be?", "xxx")))
|
||||||
|
|
||||||
def test_combiner(self):
|
def test_combiner(self):
|
||||||
|
@ -26,7 +26,7 @@ class TestUndo(test_engine.EngineTestCase):
|
|||||||
# Now undo just the first action. The list of undo DocActions for it does not mention the
|
# Now undo just the first action. The list of undo DocActions for it does not mention the
|
||||||
# newly added column, and fails to clean it up. This would leave the doc in an inconsistent
|
# newly added column, and fails to clean it up. This would leave the doc in an inconsistent
|
||||||
# state, and we should not allow it.
|
# state, and we should not allow it.
|
||||||
with self.assertRaisesRegexp(AssertionError,
|
with self.assertRaisesRegex(AssertionError,
|
||||||
re.compile(r"Internal schema inconsistent.*'NewCol'", re.S)):
|
re.compile(r"Internal schema inconsistent.*'NewCol'", re.S)):
|
||||||
self.apply_undo_actions(out_actions1.undo)
|
self.apply_undo_actions(out_actions1.undo)
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ class TestUndo(test_engine.EngineTestCase):
|
|||||||
# In practice it's harmless: properly calculated fields get restored correct, and the private
|
# In practice it's harmless: properly calculated fields get restored correct, and the private
|
||||||
# metadata fields get brought up-to-date when used via Record interface, which is what we do
|
# metadata fields get brought up-to-date when used via Record interface, which is what we do
|
||||||
# using this assertEqual().
|
# using this assertEqual().
|
||||||
self.assertEqual([[r.id, r.tableId, map(int, r.columns)]
|
self.assertEqual([[r.id, r.tableId, list(map(int, r.columns))]
|
||||||
for r in self.engine.docmodel.tables.table.filter_records()], [
|
for r in self.engine.docmodel.tables.table.filter_records()], [
|
||||||
[1, "Students", [1,2,4,5,6]],
|
[1, "Students", [1,2,4,5,6]],
|
||||||
[2, "Schools", [10,12]],
|
[2, "Schools", [10,12]],
|
||||||
@ -73,7 +73,7 @@ class TestUndo(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
# The undo failed, and data should look as before the undo.
|
# The undo failed, and data should look as before the undo.
|
||||||
self.engine.assert_schema_consistent()
|
self.engine.assert_schema_consistent()
|
||||||
self.assertEqual([[r.id, r.tableId, map(int, r.columns)]
|
self.assertEqual([[r.id, r.tableId, list(map(int, r.columns))]
|
||||||
for r in self.engine.docmodel.tables.table.filter_records()], [
|
for r in self.engine.docmodel.tables.table.filter_records()], [
|
||||||
[1, "Students", [1,2,4,5,6]],
|
[1, "Students", [1,2,4,5,6]],
|
||||||
[2, "Schools", [10,12]],
|
[2, "Schools", [10,12]],
|
||||||
|
@ -689,7 +689,7 @@ class TestUserActions(test_engine.EngineTestCase):
|
|||||||
|
|
||||||
# Simple failure: bad action (last argument should be a dict). It shouldn't cause any actions
|
# Simple failure: bad action (last argument should be a dict). It shouldn't cause any actions
|
||||||
# in the first place, just raise an exception about the argument being an int.
|
# in the first place, just raise an exception about the argument being an int.
|
||||||
with self.assertRaisesRegexp(AttributeError, r"'int'"):
|
with self.assertRaisesRegex(AttributeError, r"'int'"):
|
||||||
self.apply_user_action(['AddColumn', 'Address', "A", 17])
|
self.apply_user_action(['AddColumn', 'Address', "A", 17])
|
||||||
|
|
||||||
# Do some successful actions, just to make sure we know what they look like.
|
# Do some successful actions, just to make sure we know what they look like.
|
||||||
@ -699,14 +699,14 @@ class TestUserActions(test_engine.EngineTestCase):
|
|||||||
)])
|
)])
|
||||||
|
|
||||||
# More complicated: here some actions should succeed, but get reverted when a later one fails.
|
# More complicated: here some actions should succeed, but get reverted when a later one fails.
|
||||||
with self.assertRaisesRegexp(AttributeError, r"'int'"):
|
with self.assertRaisesRegex(AttributeError, r"'int'"):
|
||||||
self.engine.apply_user_actions([useractions.from_repr(ua) for ua in (
|
self.engine.apply_user_actions([useractions.from_repr(ua) for ua in (
|
||||||
['UpdateRecord', 'Address', 11, {"city": "New York3"}],
|
['UpdateRecord', 'Address', 11, {"city": "New York3"}],
|
||||||
['AddColumn', 'Address', "C", {"isFormula": True}],
|
['AddColumn', 'Address', "C", {"isFormula": True}],
|
||||||
['AddColumn', 'Address', "D", 17]
|
['AddColumn', 'Address', "D", 17]
|
||||||
)])
|
)])
|
||||||
|
|
||||||
with self.assertRaisesRegexp(Exception, r"non-existent record #77"):
|
with self.assertRaisesRegex(Exception, r"non-existent record #77"):
|
||||||
self.engine.apply_user_actions([useractions.from_repr(ua) for ua in (
|
self.engine.apply_user_actions([useractions.from_repr(ua) for ua in (
|
||||||
['UpdateRecord', 'Address', 11, {"city": "New York4"}],
|
['UpdateRecord', 'Address', 11, {"city": "New York4"}],
|
||||||
['UpdateRecord', 'Address', 77, {"city": "Chicago"}],
|
['UpdateRecord', 'Address', 77, {"city": "Chicago"}],
|
||||||
|
@ -454,16 +454,16 @@
|
|||||||
// Check that Update Record on _grist_Tables_column properly triggers schema-change actions
|
// Check that Update Record on _grist_Tables_column properly triggers schema-change actions
|
||||||
"USER_ACTIONS": [
|
"USER_ACTIONS": [
|
||||||
["UpdateRecord", "_grist_Tables_column", 3,
|
["UpdateRecord", "_grist_Tables_column", 3,
|
||||||
{"formula": "str.upper(rec.firstName) + ' ' + rec.lastName"}],
|
{"formula": "rec.firstName.upper() + ' ' + rec.lastName"}],
|
||||||
["UpdateRecord", "_grist_Tables_column", 6,
|
["UpdateRecord", "_grist_Tables_column", 6,
|
||||||
{"colId" : "shortSchool"}]
|
{"colId" : "shortSchool"}]
|
||||||
],
|
],
|
||||||
"ACTIONS": {
|
"ACTIONS": {
|
||||||
"stored": [
|
"stored": [
|
||||||
["ModifyColumn", "Students", "fullName",
|
["ModifyColumn", "Students", "fullName",
|
||||||
{"formula": "str.upper(rec.firstName) + ' ' + rec.lastName"}],
|
{"formula": "rec.firstName.upper() + ' ' + rec.lastName"}],
|
||||||
["UpdateRecord", "_grist_Tables_column", 3,
|
["UpdateRecord", "_grist_Tables_column", 3,
|
||||||
{"formula": "str.upper(rec.firstName) + ' ' + rec.lastName"}],
|
{"formula": "rec.firstName.upper() + ' ' + rec.lastName"}],
|
||||||
["RenameColumn", "Students", "schoolShort", "shortSchool"],
|
["RenameColumn", "Students", "schoolShort", "shortSchool"],
|
||||||
["UpdateRecord", "_grist_Tables_column", 6, {"colId": "shortSchool"}],
|
["UpdateRecord", "_grist_Tables_column", 6, {"colId": "shortSchool"}],
|
||||||
["BulkUpdateRecord", "Students", [1, 2, 3, 4, 5, 6, 8],
|
["BulkUpdateRecord", "Students", [1, 2, 3, 4, 5, 6, 8],
|
||||||
@ -1921,14 +1921,14 @@
|
|||||||
// Check formula error handling
|
// Check formula error handling
|
||||||
"USER_ACTIONS" : [
|
"USER_ACTIONS" : [
|
||||||
["ModifyColumn", "Students", "fullName", {"formula" : "!#@%&T#$UDSAIKVFsdhifzsk" }],
|
["ModifyColumn", "Students", "fullName", {"formula" : "!#@%&T#$UDSAIKVFsdhifzsk" }],
|
||||||
["ModifyColumn", "Students", "schoolRegion", {"formula" : "5*len($firstName) / $fullNameLen" }]
|
["ModifyColumn", "Students", "schoolRegion", {"formula" : "5*len($firstName) // $fullNameLen" }]
|
||||||
],
|
],
|
||||||
"ACTIONS" : {
|
"ACTIONS" : {
|
||||||
"stored" : [
|
"stored" : [
|
||||||
["ModifyColumn", "Students", "fullName", {"formula": "!#@%&T#$UDSAIKVFsdhifzsk"}],
|
["ModifyColumn", "Students", "fullName", {"formula": "!#@%&T#$UDSAIKVFsdhifzsk"}],
|
||||||
["UpdateRecord", "_grist_Tables_column", 3, {"formula": "!#@%&T#$UDSAIKVFsdhifzsk"}],
|
["UpdateRecord", "_grist_Tables_column", 3, {"formula": "!#@%&T#$UDSAIKVFsdhifzsk"}],
|
||||||
["ModifyColumn", "Students", "schoolRegion", {"formula": "5*len($firstName) / $fullNameLen"}],
|
["ModifyColumn", "Students", "schoolRegion", {"formula": "5*len($firstName) // $fullNameLen"}],
|
||||||
["UpdateRecord", "_grist_Tables_column", 9, {"formula": "5*len($firstName) / $fullNameLen"}],
|
["UpdateRecord", "_grist_Tables_column", 9, {"formula": "5*len($firstName) // $fullNameLen"}],
|
||||||
["BulkUpdateRecord", "Students", [1, 2, 3, 4, 5, 6, 8],
|
["BulkUpdateRecord", "Students", [1, 2, 3, 4, 5, 6, 8],
|
||||||
{"fullName" : [["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"],
|
{"fullName" : [["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"],
|
||||||
["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"]]
|
["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"], ["E","SyntaxError"]]
|
||||||
@ -2719,15 +2719,15 @@
|
|||||||
["APPLY", {
|
["APPLY", {
|
||||||
"USER_ACTIONS": [
|
"USER_ACTIONS": [
|
||||||
// Basic tests
|
// Basic tests
|
||||||
["EvalCode", "print 'cats'", null],
|
["EvalCode", "print('cats')", null],
|
||||||
["EvalCode", "2 * 3 - 1 / 7 + 4", null],
|
["EvalCode", "2 * 3 - 1 // 7 + 4", null],
|
||||||
// Exception
|
// Exception
|
||||||
["EvalCode", "raise Exception('everything broke')", null],
|
["EvalCode", "raise Exception('everything broke')", null],
|
||||||
// Incomplete structure
|
// Incomplete structure
|
||||||
["EvalCode", "for x in range(1, 100):", null],
|
["EvalCode", "for x in range(1, 100):", null],
|
||||||
// Re-evaluation
|
// Re-evaluation
|
||||||
["EvalCode", "print 'cats'", 1],
|
["EvalCode", "print('cats')", 1],
|
||||||
["EvalCode", "print 'dogs'", 1],
|
["EvalCode", "print('dogs')", 1],
|
||||||
// Function definition
|
// Function definition
|
||||||
["EvalCode", "def foo(x):\n\treturn x * 10\n", null],
|
["EvalCode", "def foo(x):\n\treturn x * 10\n", null],
|
||||||
["EvalCode", "foo(10)", null]
|
["EvalCode", "foo(10)", null]
|
||||||
@ -2735,15 +2735,15 @@
|
|||||||
"ACTIONS" : {
|
"ACTIONS" : {
|
||||||
"stored" : [
|
"stored" : [
|
||||||
["AddRecord", "_grist_REPL_Hist", 1,
|
["AddRecord", "_grist_REPL_Hist", 1,
|
||||||
{"code": "print 'cats'", "errorText": "", "outputText": "cats\n"}],
|
{"code": "print('cats')", "errorText": "", "outputText": "cats\n"}],
|
||||||
["AddRecord", "_grist_REPL_Hist", 2,
|
["AddRecord", "_grist_REPL_Hist", 2,
|
||||||
{"code": "2 * 3 - 1 / 7 + 4", "errorText": "", "outputText": "10\n"}],
|
{"code": "2 * 3 - 1 // 7 + 4", "errorText": "", "outputText": "10\n"}],
|
||||||
["AddRecord", "_grist_REPL_Hist", 3,
|
["AddRecord", "_grist_REPL_Hist", 3,
|
||||||
{"code": "raise Exception('everything broke')", "errorText": "Traceback (most recent call last):\n File \"<input>\", line 1, in <module>\nException: everything broke\n", "outputText": ""}],
|
{"code": "raise Exception('everything broke')", "errorText": "Traceback (most recent call last):\n File \"<input>\", line 1, in <module>\nException: everything broke\n", "outputText": ""}],
|
||||||
["UpdateRecord", "_grist_REPL_Hist", 1,
|
["UpdateRecord", "_grist_REPL_Hist", 1,
|
||||||
{"code": "print 'cats'", "errorText": "", "outputText": "cats\n"}],
|
{"code": "print('cats')", "errorText": "", "outputText": "cats\n"}],
|
||||||
["UpdateRecord", "_grist_REPL_Hist", 1,
|
["UpdateRecord", "_grist_REPL_Hist", 1,
|
||||||
{"code": "print 'dogs'", "errorText": "", "outputText": "dogs\n"}],
|
{"code": "print('dogs')", "errorText": "", "outputText": "dogs\n"}],
|
||||||
["AddRecord", "_grist_REPL_Hist", 4,
|
["AddRecord", "_grist_REPL_Hist", 4,
|
||||||
{"code": "def foo(x):\n\treturn x * 10\n", "errorText": "", "outputText": ""}],
|
{"code": "def foo(x):\n\treturn x * 10\n", "errorText": "", "outputText": ""}],
|
||||||
["AddRecord", "_grist_REPL_Hist", 5,
|
["AddRecord", "_grist_REPL_Hist", 5,
|
||||||
@ -2755,9 +2755,9 @@
|
|||||||
["RemoveRecord", "_grist_REPL_Hist", 2],
|
["RemoveRecord", "_grist_REPL_Hist", 2],
|
||||||
["RemoveRecord", "_grist_REPL_Hist", 3],
|
["RemoveRecord", "_grist_REPL_Hist", 3],
|
||||||
["UpdateRecord", "_grist_REPL_Hist", 1,
|
["UpdateRecord", "_grist_REPL_Hist", 1,
|
||||||
{"code": "print 'cats'", "errorText": "", "outputText": "cats\n"}],
|
{"code": "print('cats')", "errorText": "", "outputText": "cats\n"}],
|
||||||
["UpdateRecord", "_grist_REPL_Hist", 1,
|
["UpdateRecord", "_grist_REPL_Hist", 1,
|
||||||
{"code": "print 'cats'", "errorText": "", "outputText": "cats\n"}],
|
{"code": "print('cats')", "errorText": "", "outputText": "cats\n"}],
|
||||||
["RemoveRecord", "_grist_REPL_Hist", 4],
|
["RemoveRecord", "_grist_REPL_Hist", 4],
|
||||||
["RemoveRecord", "_grist_REPL_Hist", 5]
|
["RemoveRecord", "_grist_REPL_Hist", 5]
|
||||||
],
|
],
|
||||||
|
@ -3,6 +3,8 @@ import math
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import actions
|
import actions
|
||||||
import logger
|
import logger
|
||||||
|
|
||||||
@ -103,7 +105,7 @@ def parse_test_sample(obj, samples={}):
|
|||||||
}
|
}
|
||||||
|
|
||||||
data = {t: table_data_from_rows(t, data[0], data[1:])
|
data = {t: table_data_from_rows(t, data[0], data[1:])
|
||||||
for t, data in obj["DATA"].iteritems()}
|
for t, data in six.iteritems(obj["DATA"])}
|
||||||
return {"SCHEMA": schema, "DATA": data}
|
return {"SCHEMA": schema, "DATA": data}
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@ import bisect
|
|||||||
import re
|
import re
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
Patch = namedtuple('Patch', ('start', 'end', 'old_text', 'new_text'))
|
Patch = namedtuple('Patch', ('start', 'end', 'old_text', 'new_text'))
|
||||||
|
|
||||||
line_start_re = re.compile(r'^', re.M)
|
line_start_re = re.compile(r'^', re.M)
|
||||||
@ -176,4 +178,4 @@ class Combiner(Builder):
|
|||||||
offset = self._offsets[start_index - 1]
|
offset = self._offsets[start_index - 1]
|
||||||
part = self._parts[start_index - 1]
|
part = self._parts[start_index - 1]
|
||||||
in_patch = Patch(patch.start - offset, patch.end - offset, patch.old_text, patch.new_text)
|
in_patch = Patch(patch.start - offset, patch.end - offset, patch.old_text, patch.new_text)
|
||||||
return None if isinstance(part, basestring) else part.map_back_patch(in_patch)
|
return None if isinstance(part, six.string_types) else part.map_back_patch(in_patch)
|
||||||
|
@ -14,6 +14,8 @@ value previously set, since the "right" dataset is "single" values), m.lookup_le
|
|||||||
that value, and m.lookup_right(value) returns a `set` of keys that map to the value.
|
that value, and m.lookup_right(value) returns a `set` of keys that map to the value.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
# Special sentinel value which can never be legitimately stored in TwoWayMap, to easily tell the
|
# Special sentinel value which can never be legitimately stored in TwoWayMap, to easily tell the
|
||||||
# difference between a present and absent value.
|
# difference between a present and absent value.
|
||||||
_NIL = object()
|
_NIL = object()
|
||||||
@ -47,6 +49,8 @@ class TwoWayMap(object):
|
|||||||
def __nonzero__(self):
|
def __nonzero__(self):
|
||||||
return bool(self._fwd)
|
return bool(self._fwd)
|
||||||
|
|
||||||
|
__bool__ = __nonzero__
|
||||||
|
|
||||||
def lookup_left(self, left, default=None):
|
def lookup_left(self, left, default=None):
|
||||||
""" Returns the value(s) on the right corresponding to the given value on the left. """
|
""" Returns the value(s) on the right corresponding to the given value on the left. """
|
||||||
return self._fwd.get(left, default)
|
return self._fwd.get(left, default)
|
||||||
@ -65,11 +69,11 @@ class TwoWayMap(object):
|
|||||||
|
|
||||||
def left_all(self):
|
def left_all(self):
|
||||||
""" Returns an iterable over all values on the left."""
|
""" Returns an iterable over all values on the left."""
|
||||||
return self._fwd.iterkeys()
|
return six.iterkeys(self._fwd)
|
||||||
|
|
||||||
def right_all(self):
|
def right_all(self):
|
||||||
""" Returns an iterable over all values on the right."""
|
""" Returns an iterable over all values on the right."""
|
||||||
return self._bwd.iterkeys()
|
return six.iterkeys(self._bwd)
|
||||||
|
|
||||||
def insert(self, left, right):
|
def insert(self, left, right):
|
||||||
""" Insert the (left, right) value pair. """
|
""" Insert the (left, right) value pair. """
|
||||||
|
@ -4,6 +4,8 @@ import re
|
|||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import acl
|
import acl
|
||||||
from acl_formula import parse_acl_formula_json
|
from acl_formula import parse_acl_formula_json
|
||||||
import actions
|
import actions
|
||||||
@ -45,7 +47,7 @@ _modify_col_schema_props = {'type', 'formula', 'isFormula'}
|
|||||||
# A few generic helpers.
|
# A few generic helpers.
|
||||||
def select_keys(dict_obj, keys):
|
def select_keys(dict_obj, keys):
|
||||||
"""Return copy of dict_obj containing only the given keys."""
|
"""Return copy of dict_obj containing only the given keys."""
|
||||||
return {k: v for k, v in dict_obj.iteritems() if k in keys}
|
return {k: v for k, v in six.iteritems(dict_obj) if k in keys}
|
||||||
|
|
||||||
def has_value(dict_obj, key, value):
|
def has_value(dict_obj, key, value):
|
||||||
"""Returns True if dict_obj contains key, and its value is value."""
|
"""Returns True if dict_obj contains key, and its value is value."""
|
||||||
@ -78,7 +80,7 @@ def useraction(method):
|
|||||||
"""
|
"""
|
||||||
Decorator for a method, which creates an action class with the same name and arguments.
|
Decorator for a method, which creates an action class with the same name and arguments.
|
||||||
"""
|
"""
|
||||||
code = method.func_code
|
code = method.__code__
|
||||||
name = method.__name__
|
name = method.__name__
|
||||||
cls = namedtuple(name, code.co_varnames[1:code.co_argcount])
|
cls = namedtuple(name, code.co_varnames[1:code.co_argcount])
|
||||||
setattr(_current_module, name, cls)
|
setattr(_current_module, name, cls)
|
||||||
@ -148,7 +150,7 @@ class UserActions(object):
|
|||||||
# Map of methods implementing particular (action_name, table_id) combinations. It mirrors
|
# Map of methods implementing particular (action_name, table_id) combinations. It mirrors
|
||||||
# global _action_method_overrides, but with methods *bound* to this UserActions instance.
|
# global _action_method_overrides, but with methods *bound* to this UserActions instance.
|
||||||
self._overrides = {key: method.__get__(self, UserActions)
|
self._overrides = {key: method.__get__(self, UserActions)
|
||||||
for key, method in _action_method_overrides.iteritems()}
|
for key, method in six.iteritems(_action_method_overrides)}
|
||||||
|
|
||||||
def _do_doc_action(self, action):
|
def _do_doc_action(self, action):
|
||||||
if hasattr(action, 'simplify'):
|
if hasattr(action, 'simplify'):
|
||||||
@ -169,7 +171,7 @@ class UserActions(object):
|
|||||||
for i, row_id in enumerate(row_ids):
|
for i, row_id in enumerate(row_ids):
|
||||||
rec = table.get_record(row_id)
|
rec = table.get_record(row_id)
|
||||||
yield ((i, rec) if col_values is None else
|
yield ((i, rec) if col_values is None else
|
||||||
(i, rec, {k: v[i] for k, v in col_values.iteritems()}))
|
(i, rec, {k: v[i] for k, v in six.iteritems(col_values)}))
|
||||||
|
|
||||||
def _collect_back_references(self, table_recs):
|
def _collect_back_references(self, table_recs):
|
||||||
"""
|
"""
|
||||||
@ -273,13 +275,13 @@ class UserActions(object):
|
|||||||
@useraction
|
@useraction
|
||||||
def AddRecord(self, table_id, row_id, column_values):
|
def AddRecord(self, table_id, row_id, column_values):
|
||||||
return self.BulkAddRecord(
|
return self.BulkAddRecord(
|
||||||
table_id, [row_id], {key: [val] for key, val in column_values.iteritems()}
|
table_id, [row_id], {key: [val] for key, val in six.iteritems(column_values)}
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
@useraction
|
@useraction
|
||||||
def BulkAddRecord(self, table_id, row_ids, column_values):
|
def BulkAddRecord(self, table_id, row_ids, column_values):
|
||||||
column_values = actions.decode_bulk_values(column_values)
|
column_values = actions.decode_bulk_values(column_values)
|
||||||
for col_id, values in column_values.iteritems():
|
for col_id, values in six.iteritems(column_values):
|
||||||
self._ensure_column_accepts_data(table_id, col_id, values)
|
self._ensure_column_accepts_data(table_id, col_id, values)
|
||||||
method = self._overrides.get(('BulkAddRecord', table_id), self.doBulkAddOrReplace)
|
method = self._overrides.get(('BulkAddRecord', table_id), self.doBulkAddOrReplace)
|
||||||
return method(table_id, row_ids, column_values)
|
return method(table_id, row_ids, column_values)
|
||||||
@ -339,7 +341,7 @@ class UserActions(object):
|
|||||||
def _addACLRules(self, table_id, row_ids, col_values):
|
def _addACLRules(self, table_id, row_ids, col_values):
|
||||||
# Automatically populate aclFormulaParsed value by parsing aclFormula.
|
# Automatically populate aclFormulaParsed value by parsing aclFormula.
|
||||||
if 'aclFormula' in col_values:
|
if 'aclFormula' in col_values:
|
||||||
col_values['aclFormulaParsed'] = map(parse_acl_formula_json, col_values['aclFormula'])
|
col_values['aclFormulaParsed'] = [parse_acl_formula_json(v) for v in col_values['aclFormula']]
|
||||||
return self.doBulkAddOrReplace(table_id, row_ids, col_values)
|
return self.doBulkAddOrReplace(table_id, row_ids, col_values)
|
||||||
|
|
||||||
#----------------------------------------
|
#----------------------------------------
|
||||||
@ -371,7 +373,7 @@ class UserActions(object):
|
|||||||
@useraction
|
@useraction
|
||||||
def UpdateRecord(self, table_id, row_id, columns):
|
def UpdateRecord(self, table_id, row_id, columns):
|
||||||
self.BulkUpdateRecord(table_id, [row_id],
|
self.BulkUpdateRecord(table_id, [row_id],
|
||||||
{key: [col] for key, col in columns.iteritems()})
|
{key: [col] for key, col in six.iteritems(columns)})
|
||||||
|
|
||||||
@useraction
|
@useraction
|
||||||
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
def BulkUpdateRecord(self, table_id, row_ids, columns):
|
||||||
@ -380,7 +382,7 @@ class UserActions(object):
|
|||||||
# Handle special tables, updates to which imply metadata actions.
|
# Handle special tables, updates to which imply metadata actions.
|
||||||
|
|
||||||
# Check that the update is valid.
|
# Check that the update is valid.
|
||||||
for col_id, values in columns.iteritems():
|
for col_id, values in six.iteritems(columns):
|
||||||
self._ensure_column_accepts_data(table_id, col_id, values)
|
self._ensure_column_accepts_data(table_id, col_id, values)
|
||||||
|
|
||||||
# Additionally check that we are not trying to modify group-by values in a summary column
|
# Additionally check that we are not trying to modify group-by values in a summary column
|
||||||
@ -413,7 +415,7 @@ class UserActions(object):
|
|||||||
|
|
||||||
@override_action('BulkUpdateRecord', '_grist_Tables')
|
@override_action('BulkUpdateRecord', '_grist_Tables')
|
||||||
def _updateTableRecords(self, table_id, row_ids, col_values):
|
def _updateTableRecords(self, table_id, row_ids, col_values):
|
||||||
avoid_tableid_set = set(self._engine.tables.viewkeys())
|
avoid_tableid_set = set(self._engine.tables)
|
||||||
update_pairs = []
|
update_pairs = []
|
||||||
for i, rec, values in self._bulk_action_iter(table_id, row_ids, col_values):
|
for i, rec, values in self._bulk_action_iter(table_id, row_ids, col_values):
|
||||||
update_pairs.append((rec, values))
|
update_pairs.append((rec, values))
|
||||||
@ -451,9 +453,9 @@ class UserActions(object):
|
|||||||
if table_renames:
|
if table_renames:
|
||||||
# Build up a dictionary mapping col_ref of each affected formula to the new formula text.
|
# Build up a dictionary mapping col_ref of each affected formula to the new formula text.
|
||||||
formula_updates = self._prepare_formula_renames(
|
formula_updates = self._prepare_formula_renames(
|
||||||
{(old, None): new for (old, new) in table_renames.iteritems()})
|
{(old, None): new for (old, new) in six.iteritems(table_renames)})
|
||||||
# Add the changes to the dict of col_updates. sort for reproducible order.
|
# Add the changes to the dict of col_updates. sort for reproducible order.
|
||||||
for col_rec, new_formula in sorted(formula_updates.iteritems()):
|
for col_rec, new_formula in sorted(six.iteritems(formula_updates)):
|
||||||
col_updates.setdefault(col_rec, {})['formula'] = new_formula
|
col_updates.setdefault(col_rec, {})['formula'] = new_formula
|
||||||
|
|
||||||
# If a table changes to onDemand, any empty columns (formula columns with no set formula)
|
# If a table changes to onDemand, any empty columns (formula columns with no set formula)
|
||||||
@ -464,7 +466,7 @@ class UserActions(object):
|
|||||||
for col in empty_cols:
|
for col in empty_cols:
|
||||||
col_updates.setdefault(col, {}).update(isFormula=False, type='Text')
|
col_updates.setdefault(col, {}).update(isFormula=False, type='Text')
|
||||||
|
|
||||||
for col, values in col_updates.iteritems():
|
for col, values in six.iteritems(col_updates):
|
||||||
if 'type' in values:
|
if 'type' in values:
|
||||||
self.doModifyColumn(col.tableId, col.colId, {'type': 'Int'})
|
self.doModifyColumn(col.tableId, col.colId, {'type': 'Int'})
|
||||||
|
|
||||||
@ -482,7 +484,7 @@ class UserActions(object):
|
|||||||
# Internal functions are used to prevent unintended additional changes from occurring.
|
# Internal functions are used to prevent unintended additional changes from occurring.
|
||||||
# Specifically, this prevents widgetOptions and displayCol from being cleared as a side
|
# Specifically, this prevents widgetOptions and displayCol from being cleared as a side
|
||||||
# effect of the column type change.
|
# effect of the column type change.
|
||||||
for col, values in col_updates.iteritems():
|
for col, values in six.iteritems(col_updates):
|
||||||
self.doModifyColumn(col.tableId, col.colId, values)
|
self.doModifyColumn(col.tableId, col.colId, values)
|
||||||
self.doBulkUpdateFromPairs('_grist_Tables_column', col_updates.items())
|
self.doBulkUpdateFromPairs('_grist_Tables_column', col_updates.items())
|
||||||
make_acl_updates()
|
make_acl_updates()
|
||||||
@ -516,7 +518,7 @@ class UserActions(object):
|
|||||||
|
|
||||||
# Collect all renamings that we are about to apply.
|
# Collect all renamings that we are about to apply.
|
||||||
renames = {(c.parentId.tableId, c.colId): values['colId']
|
renames = {(c.parentId.tableId, c.colId): values['colId']
|
||||||
for c, values in col_updates.iteritems()
|
for c, values in six.iteritems(col_updates)
|
||||||
if has_diff_value(values, 'colId', c.colId)}
|
if has_diff_value(values, 'colId', c.colId)}
|
||||||
|
|
||||||
if renames:
|
if renames:
|
||||||
@ -524,7 +526,7 @@ class UserActions(object):
|
|||||||
formula_updates = self._prepare_formula_renames(renames)
|
formula_updates = self._prepare_formula_renames(renames)
|
||||||
|
|
||||||
# For any affected columns, include the formula into the update.
|
# For any affected columns, include the formula into the update.
|
||||||
for col_rec, new_formula in sorted(formula_updates.iteritems()):
|
for col_rec, new_formula in sorted(six.iteritems(formula_updates)):
|
||||||
col_updates.setdefault(col_rec, {}).setdefault('formula', new_formula)
|
col_updates.setdefault(col_rec, {}).setdefault('formula', new_formula)
|
||||||
|
|
||||||
update_pairs = col_updates.items()
|
update_pairs = col_updates.items()
|
||||||
@ -534,7 +536,7 @@ class UserActions(object):
|
|||||||
if col.summarySourceCol:
|
if col.summarySourceCol:
|
||||||
underlying = col_updates.get(col.summarySourceCol, {})
|
underlying = col_updates.get(col.summarySourceCol, {})
|
||||||
if not all(value == getattr(col, key) or has_value(underlying, key, value)
|
if not all(value == getattr(col, key) or has_value(underlying, key, value)
|
||||||
for key, value in values.iteritems()):
|
for key, value in six.iteritems(values)):
|
||||||
raise ValueError("Cannot modify summary group-by column '%s'" % col.colId)
|
raise ValueError("Cannot modify summary group-by column '%s'" % col.colId)
|
||||||
|
|
||||||
make_acl_updates = acl.prepare_acl_col_renames(self._docmodel, self, renames)
|
make_acl_updates = acl.prepare_acl_col_renames(self._docmodel, self, renames)
|
||||||
@ -576,7 +578,7 @@ class UserActions(object):
|
|||||||
def _updateACLRules(self, table_id, row_ids, col_values):
|
def _updateACLRules(self, table_id, row_ids, col_values):
|
||||||
# Automatically populate aclFormulaParsed value by parsing aclFormula.
|
# Automatically populate aclFormulaParsed value by parsing aclFormula.
|
||||||
if 'aclFormula' in col_values:
|
if 'aclFormula' in col_values:
|
||||||
col_values['aclFormulaParsed'] = map(parse_acl_formula_json, col_values['aclFormula'])
|
col_values['aclFormulaParsed'] = [parse_acl_formula_json(v) for v in col_values['aclFormula']]
|
||||||
return self.doBulkUpdateRecord(table_id, row_ids, col_values)
|
return self.doBulkUpdateRecord(table_id, row_ids, col_values)
|
||||||
|
|
||||||
def _prepare_formula_renames(self, renames):
|
def _prepare_formula_renames(self, renames):
|
||||||
@ -605,7 +607,7 @@ class UserActions(object):
|
|||||||
# Apply the collected patches to each affected formula, converting to unicode to apply the
|
# Apply the collected patches to each affected formula, converting to unicode to apply the
|
||||||
# patches and back to byte string for how we maintain string values.
|
# patches and back to byte string for how we maintain string values.
|
||||||
result = {}
|
result = {}
|
||||||
for col_rec, patches in patches_map.iteritems():
|
for col_rec, patches in six.iteritems(patches_map):
|
||||||
formula = col_rec.formula.decode('utf8')
|
formula = col_rec.formula.decode('utf8')
|
||||||
replacer = textbuilder.Replacer(textbuilder.Text(formula), patches)
|
replacer = textbuilder.Replacer(textbuilder.Text(formula), patches)
|
||||||
result[col_rec] = replacer.get_text().encode('utf8')
|
result[col_rec] = replacer.get_text().encode('utf8')
|
||||||
@ -1066,7 +1068,7 @@ class UserActions(object):
|
|||||||
# metadata record. We implement the former interface by forwarding to the latter.
|
# metadata record. We implement the former interface by forwarding to the latter.
|
||||||
col = self._docmodel.get_column_rec(table_id, col_id)
|
col = self._docmodel.get_column_rec(table_id, col_id)
|
||||||
|
|
||||||
update_values = {k: v for k, v in col_info.iteritems() if k in _modifiable_col_fields}
|
update_values = {k: v for k, v in six.iteritems(col_info) if k in _modifiable_col_fields}
|
||||||
if '_position' in col_info:
|
if '_position' in col_info:
|
||||||
update_values['parentPos'] = col_info['_position']
|
update_values['parentPos'] = col_info['_position']
|
||||||
self._docmodel.update([col], **update_values)
|
self._docmodel.update([col], **update_values)
|
||||||
@ -1093,7 +1095,7 @@ class UserActions(object):
|
|||||||
old_col_info = schema.col_to_dict(self._engine.schema[table_id].columns[col_id],
|
old_col_info = schema.col_to_dict(self._engine.schema[table_id].columns[col_id],
|
||||||
include_id=False)
|
include_id=False)
|
||||||
|
|
||||||
col_info = {k: v for k, v in col_info.iteritems() if old_col_info.get(k, v) != v}
|
col_info = {k: v for k, v in six.iteritems(col_info) if old_col_info.get(k, v) != v}
|
||||||
if not col_info:
|
if not col_info:
|
||||||
log.info("useractions.ModifyColumn is a noop")
|
log.info("useractions.ModifyColumn is a noop")
|
||||||
return
|
return
|
||||||
@ -1182,7 +1184,7 @@ class UserActions(object):
|
|||||||
|
|
||||||
# Get the values from the columns and check which have changed.
|
# Get the values from the columns and check which have changed.
|
||||||
all_row_ids = list(table.row_ids)
|
all_row_ids = list(table.row_ids)
|
||||||
all_src_values = map(src_column.raw_get, all_row_ids)
|
all_src_values = [src_column.raw_get(r) for r in all_row_ids]
|
||||||
|
|
||||||
dst_column = table.get_column(dst_col_id)
|
dst_column = table.get_column(dst_col_id)
|
||||||
changed_rows, changed_values = [], []
|
changed_rows, changed_values = [], []
|
||||||
@ -1230,7 +1232,7 @@ class UserActions(object):
|
|||||||
Add the given table with columns without creating views.
|
Add the given table with columns without creating views.
|
||||||
"""
|
"""
|
||||||
# If needed, transform table_id into a valid identifier, and add a suffix to make it unique.
|
# If needed, transform table_id into a valid identifier, and add a suffix to make it unique.
|
||||||
table_id = identifiers.pick_table_ident(table_id, avoid=self._engine.tables.viewkeys())
|
table_id = identifiers.pick_table_ident(table_id, avoid=six.viewkeys(self._engine.tables))
|
||||||
|
|
||||||
# Sanitize and de-duplicate column identifiers.
|
# Sanitize and de-duplicate column identifiers.
|
||||||
col_ids = [c['id'] for c in columns]
|
col_ids = [c['id'] for c in columns]
|
||||||
|
@ -161,7 +161,7 @@ class Text(BaseColumnType):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
return isinstance(value, (basestring, NoneType))
|
return isinstance(value, (six.string_types, NoneType))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def typeConvert(cls, value):
|
def typeConvert(cls, value):
|
||||||
@ -203,11 +203,11 @@ class Bool(BaseColumnType):
|
|||||||
# recognize. Everything else will result in alttext.
|
# recognize. Everything else will result in alttext.
|
||||||
if not value:
|
if not value:
|
||||||
return False
|
return False
|
||||||
if isinstance(value, (float, int, long)):
|
if isinstance(value, (float, six.integer_types)):
|
||||||
return True
|
return True
|
||||||
if isinstance(value, AltText):
|
if isinstance(value, AltText):
|
||||||
value = str(value)
|
value = str(value)
|
||||||
if isinstance(value, basestring):
|
if isinstance(value, six.string_types):
|
||||||
if value.lower() in ("false", "no", "0"):
|
if value.lower() in ("false", "no", "0"):
|
||||||
return False
|
return False
|
||||||
if value.lower() in ("true", "yes", "1"):
|
if value.lower() in ("true", "yes", "1"):
|
||||||
@ -235,7 +235,7 @@ class Int(BaseColumnType):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
return value is None or (isinstance(value, (int, long)) and not isinstance(value, bool) and
|
return value is None or (isinstance(value, six.integer_types) and not isinstance(value, bool) and
|
||||||
objtypes.is_int_short(value))
|
objtypes.is_int_short(value))
|
||||||
|
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ class Numeric(BaseColumnType):
|
|||||||
# TODO: Python distinguishes ints from floats, while JS only has floats. A value that can be
|
# TODO: Python distinguishes ints from floats, while JS only has floats. A value that can be
|
||||||
# interpreted as an int will upon being entered have type 'float', but after database reload
|
# interpreted as an int will upon being entered have type 'float', but after database reload
|
||||||
# will have type 'int'.
|
# will have type 'int'.
|
||||||
return isinstance(value, (float, int, long, NoneType)) and not isinstance(value, bool)
|
return isinstance(value, (float, six.integer_types, NoneType)) and not isinstance(value, bool)
|
||||||
|
|
||||||
|
|
||||||
class Date(Numeric):
|
class Date(Numeric):
|
||||||
@ -267,9 +267,9 @@ class Date(Numeric):
|
|||||||
return moment.dt_to_ts(value)
|
return moment.dt_to_ts(value)
|
||||||
elif isinstance(value, datetime.date):
|
elif isinstance(value, datetime.date):
|
||||||
return moment.date_to_ts(value)
|
return moment.date_to_ts(value)
|
||||||
elif isinstance(value, (float, int, long)):
|
elif isinstance(value, (float, six.integer_types)):
|
||||||
return float(value)
|
return float(value)
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, six.string_types):
|
||||||
# We also accept a date in ISO format (YYYY-MM-DD), the time portion is optional and ignored
|
# We also accept a date in ISO format (YYYY-MM-DD), the time portion is optional and ignored
|
||||||
return moment.parse_iso_date(value)
|
return moment.parse_iso_date(value)
|
||||||
else:
|
else:
|
||||||
@ -277,7 +277,7 @@ class Date(Numeric):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
return isinstance(value, (float, int, long, NoneType))
|
return isinstance(value, (float, six.integer_types, NoneType))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def typeConvert(cls, value, date_format, timezone='UTC'): # pylint: disable=arguments-differ
|
def typeConvert(cls, value, date_format, timezone='UTC'): # pylint: disable=arguments-differ
|
||||||
@ -306,9 +306,9 @@ class DateTime(Date):
|
|||||||
return moment.dt_to_ts(value, self.timezone)
|
return moment.dt_to_ts(value, self.timezone)
|
||||||
elif isinstance(value, datetime.date):
|
elif isinstance(value, datetime.date):
|
||||||
return moment.date_to_ts(value, self.timezone)
|
return moment.date_to_ts(value, self.timezone)
|
||||||
elif isinstance(value, (float, int, long)):
|
elif isinstance(value, (float, six.integer_types)):
|
||||||
return float(value)
|
return float(value)
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, six.string_types):
|
||||||
# We also accept a datetime in ISO format (YYYY-MM-DD[T]HH:mm:ss)
|
# We also accept a datetime in ISO format (YYYY-MM-DD[T]HH:mm:ss)
|
||||||
return moment.parse_iso(value, self.timezone)
|
return moment.parse_iso(value, self.timezone)
|
||||||
else:
|
else:
|
||||||
@ -330,7 +330,7 @@ class ChoiceList(BaseColumnType):
|
|||||||
def do_convert(self, value):
|
def do_convert(self, value):
|
||||||
if not value:
|
if not value:
|
||||||
return None
|
return None
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, six.string_types):
|
||||||
# If it's a string that looks like JSON, try to parse it as such.
|
# If it's a string that looks like JSON, try to parse it as such.
|
||||||
if value.startswith('['):
|
if value.startswith('['):
|
||||||
try:
|
try:
|
||||||
@ -345,11 +345,11 @@ class ChoiceList(BaseColumnType):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
return value is None or (isinstance(value, (tuple, list)) and
|
return value is None or (isinstance(value, (tuple, list)) and
|
||||||
all(isinstance(item, basestring) for item in value))
|
all(isinstance(item, six.string_types) for item in value))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def typeConvert(cls, value):
|
def typeConvert(cls, value):
|
||||||
if isinstance(value, basestring) and not value.startswith('['):
|
if isinstance(value, six.string_types) and not value.startswith('['):
|
||||||
# Try to parse as CSV. If this doesn't work, we'll still try usual conversions later.
|
# Try to parse as CSV. If this doesn't work, we'll still try usual conversions later.
|
||||||
try:
|
try:
|
||||||
tags = next(csv.reader([value]))
|
tags = next(csv.reader([value]))
|
||||||
@ -383,7 +383,7 @@ class PositionNumber(BaseColumnType):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
# Same as Numeric, but does not support None.
|
# Same as Numeric, but does not support None.
|
||||||
return isinstance(value, (float, int, long)) and not isinstance(value, bool)
|
return isinstance(value, (float, six.integer_types)) and not isinstance(value, bool)
|
||||||
|
|
||||||
|
|
||||||
class ManualSortPos(PositionNumber):
|
class ManualSortPos(PositionNumber):
|
||||||
@ -411,7 +411,7 @@ class Id(BaseColumnType):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_right_type(cls, value):
|
def is_right_type(cls, value):
|
||||||
return (isinstance(value, (int, long)) and not isinstance(value, bool) and
|
return (isinstance(value, six.integer_types) and not isinstance(value, bool) and
|
||||||
objtypes.is_int_short(value))
|
objtypes.is_int_short(value))
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user