(core) Remaining Python 3 compatibility changes

Summary: Biggest change is turning everything to unicode

Test Plan: The tests

Reviewers: dsagal, paulfitz

Reviewed By: dsagal

Differential Revision: https://phab.getgrist.com/D2875
This commit is contained in:
Alex Hall 2021-06-24 14:23:33 +02:00
parent 1af99e9567
commit 305b133c59
30 changed files with 276 additions and 177 deletions

View File

@ -639,7 +639,7 @@ async function handleSandboxError<T>(tableId: string, colNames: string[], p: Pro
if (match) {
throw new ApiError(`Invalid row id ${match[1]}`, 400);
}
match = e.message.match(/\[Sandbox\] KeyError '(.*?)'/);
match = e.message.match(/\[Sandbox\] KeyError u?'(.*?)'/);
if (match) {
if (match[1] === tableId) {
throw new ApiError(`Table not found "${tableId}"`, 404);

View File

@ -78,7 +78,7 @@ export class NSandbox implements ISandbox {
public readonly childProc: ChildProcess;
private _logTimes: boolean;
private _exportedFunctions: {[name: string]: SandboxMethod};
private _marshaller = new marshal.Marshaller({stringToBuffer: true, version: 2});
private _marshaller = new marshal.Marshaller({stringToBuffer: false, version: 2});
private _unmarshaller = new marshal.Unmarshaller({ bufferToString: false });
// Members used for reading from the sandbox process.

View File

@ -109,8 +109,7 @@ def prepare_acl_col_renames(docmodel, useractions, col_renames_dict):
# Go through again checking if anything in ACL formulas is affected by the rename.
for rule_rec in docmodel.aclRules.all:
if rule_rec.aclFormula:
# Positions are obtained from unicode version of formulas, so that's what we must patch
formula = rule_rec.aclFormula.decode('utf8')
formula = rule_rec.aclFormula
patches = []
for entity in parse_acl_grist_entities(rule_rec.aclFormula):
@ -129,7 +128,7 @@ def prepare_acl_col_renames(docmodel, useractions, col_renames_dict):
patches.append(patch)
replacer = textbuilder.Replacer(textbuilder.Text(formula), patches)
txt = replacer.get_text().encode('utf8')
txt = replacer.get_text()
rule_updates.append((rule_rec, {'aclFormula': txt,
'aclFormulaParsed': parse_acl_formula_json(txt)}))

View File

@ -5,6 +5,8 @@ import tokenize
from collections import namedtuple
import asttokens
import six
def parse_acl_formula(acl_formula):
"""
@ -26,10 +28,12 @@ def parse_acl_formula(acl_formula):
Attr node, attr_name
Comment node, comment
"""
if isinstance(acl_formula, six.binary_type):
acl_formula = acl_formula.decode('utf8')
try:
tree = ast.parse(acl_formula, mode='eval')
result = _TreeConverter().visit(tree)
for part in tokenize.generate_tokens(io.StringIO(acl_formula.decode('utf-8')).readline):
for part in tokenize.generate_tokens(io.StringIO(acl_formula).readline):
if part[0] == tokenize.COMMENT and part[1].startswith('#'):
result = ['Comment', result, part[1][1:].strip()]
break

View File

@ -116,7 +116,7 @@ def action_from_repr(doc_action):
try:
return decode_objects(action_type(*doc_action[1:]))
except TypeError as e:
raise TypeError("%s: %s" % (doc_action[0], e.message))
raise TypeError("%s: %s" % (doc_action[0], str(e)))
def convert_recursive_helper(converter, data):

View File

@ -70,6 +70,9 @@ class AutocompleteContext(object):
return self._context
def process_result(self, result):
# 'for' suggests the autocompletion 'for ' in python 3
result = result.rstrip()
# Callables are returned by rlcompleter with a trailing "(".
if result.endswith('('):
funcname = result[0:-1]

View File

@ -118,9 +118,10 @@ def _create_syntax_error_code(builder, input_text, err):
output_offset = output_ln.line_to_offset(err.lineno, err.offset - 1 if err.offset else 0)
input_offset = builder.map_back_offset(output_offset)
line, col = input_ln.offset_to_line(input_offset)
return "%s\nraise %s('%s on line %d col %d')" % (
message = '%s on line %d col %d' % (err.args[0], line, col + 1)
return "%s\nraise %s(%r)" % (
textbuilder.line_start_re.sub('# ', input_text.rstrip()),
type(err).__name__, err.args[0], line, col + 1)
type(err).__name__, message)
#----------------------------------------------------------------------

View File

@ -268,11 +268,43 @@ class DateTimeColumn(NumericColumn):
def sample_value(self):
return _sample_datetime
class MixedTypesKey(object):
"""
Sort key that can contain different types.
This mimics Python 2 where values of different types can be compared,
falling back on some comparison of the types when the values
can't be compared normally.
"""
__slots__ = ("value",)
def __init__(self, value):
self.value = value
def __repr__(self):
return "MixedTypesKey({self.value!r})".format(self=self)
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return type(self.value).__name__ < type(other.value).__name__
if six.PY2:
def MixedTypesKey(x):
return x
class PositionColumn(NumericColumn):
def __init__(self, table, col_id, col_info):
super(PositionColumn, self).__init__(table, col_id, col_info)
# This is a list of row_ids, ordered by the position.
self._sorted_rows = SortedListWithKey(key=self.raw_get)
self._sorted_rows = SortedListWithKey(key=lambda x: MixedTypesKey(self.raw_get(x)))
def set(self, row_id, value):
self._sorted_rows.discard(row_id)
@ -282,7 +314,8 @@ class PositionColumn(NumericColumn):
def copy_from_column(self, other_column):
super(PositionColumn, self).copy_from_column(other_column)
self._sorted_rows = SortedListWithKey(other_column._sorted_rows[:], key=self.raw_get)
self._sorted_rows = SortedListWithKey(other_column._sorted_rows[:],
key=lambda x: MixedTypesKey(self.raw_get(x)))
def prepare_new_values(self, values, ignore_data=False, action_summary=None):
# This does the work of adjusting positions and relabeling existing rows with new position
@ -290,9 +323,11 @@ class PositionColumn(NumericColumn):
# used for updating a position for an existing row: we'll find a new value for it; later when
# this value is set, the old position will be removed and the new one added.
if ignore_data:
rows = SortedListWithKey([], key=self.raw_get)
rows = []
else:
rows = self._sorted_rows
# prepare_inserts expects floats as keys, not MixedTypesKeys
rows = SortedListWithKey(rows, key=self.raw_get)
adjustments, new_values = relabeling.prepare_inserts(rows, values)
return new_values, [(self._sorted_rows[i], pos) for (i, pos) in adjustments]

View File

@ -77,9 +77,9 @@ class Graph(object):
"""
Print out the graph to stdout, for debugging.
"""
print "Dependency graph (%d edges):" % len(self._all_edges)
print("Dependency graph (%d edges):" % len(self._all_edges))
for edge in sorted(self._all_edges):
print " %s" % (edge,)
print(" %s" % (edge,))
def add_edge(self, out_node, in_node, relation):
"""

View File

@ -34,12 +34,13 @@ import table as table_module
import useractions
import column
import repl
import urllib_patch # noqa imported for side effect
log = logger.Logger(__name__, logger.INFO)
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
sys.setdefaultencoding('utf8') # noqa
class OrderError(Exception):
@ -1125,8 +1126,14 @@ class Engine(object):
except Exception:
log.error("Inconsistent schema after revert on failure: %s" % traceback.format_exc())
# Re-raise the original exception (simple `raise` wouldn't do if undo also fails above).
raise exc_info[0], exc_info[1], exc_info[2]
# Re-raise the original exception
# In Python 2, 'raise' raises the most recent exception,
# which may come from the try/except just above
# Python 3 keeps track of nested exceptions better
if six.PY2:
six.reraise(*exc_info)
else:
raise
# Note that recalculations and auto-removals get included after processing all useractions.
self._bring_all_up_to_date()
@ -1183,8 +1190,15 @@ class Engine(object):
self.rebuild_usercode()
except Exception:
log.error("Error rebuilding usercode after restoring schema: %s" % traceback.format_exc())
# Re-raise the original exception (simple `raise` wouldn't do if rebuild also fails above).
raise exc_info[0], exc_info[1], exc_info[2]
# Re-raise the original exception
# In Python 2, 'raise' raises the most recent exception,
# which may come from the try/except just above
# Python 3 keeps track of nested exceptions better
if six.PY2:
six.reraise(*exc_info)
else:
raise
# If any columns got deleted, destroy them to clear _back_references in other tables, and to
# force errors if anything still uses them. Also clear them from calc actions if needed.

View File

@ -93,15 +93,15 @@ def SELF_HYPERLINK(label=None, page=None, **kwargs):
we might want to create links with `SELF_HYPERLINK(LinkKey_Code=$Code)`.
>>> SELF_HYPERLINK()
'https://docs.getgrist.com/sbaltsirg/Example'
u'https://docs.getgrist.com/sbaltsirg/Example'
>>> SELF_HYPERLINK(label='doc')
'doc https://docs.getgrist.com/sbaltsirg/Example'
u'doc https://docs.getgrist.com/sbaltsirg/Example'
>>> SELF_HYPERLINK(page=2)
'https://docs.getgrist.com/sbaltsirg/Example/p/2'
u'https://docs.getgrist.com/sbaltsirg/Example/p/2'
>>> SELF_HYPERLINK(LinkKey_Code='X1234')
'https://docs.getgrist.com/sbaltsirg/Example?Code_=X1234'
u'https://docs.getgrist.com/sbaltsirg/Example?Code_=X1234'
>>> SELF_HYPERLINK(label='order', page=3, LinkKey_Code='X1234', LinkKey_Name='Bi Ngo')
'order https://docs.getgrist.com/sbaltsirg/Example/p/3?Code_=X1234&Name_=Bi+Ngo'
u'order https://docs.getgrist.com/sbaltsirg/Example/p/3?Code_=X1234&Name_=Bi+Ngo'
>>> SELF_HYPERLINK(Linky_Link='Link')
Traceback (most recent call last):
...
@ -110,6 +110,7 @@ def SELF_HYPERLINK(label=None, page=None, **kwargs):
txt = os.environ.get('DOC_URL')
if not txt:
return None
txt = six.text_type(txt)
if page:
txt += "/p/{}".format(page)
if kwargs:
@ -124,7 +125,7 @@ def SELF_HYPERLINK(label=None, page=None, **kwargs):
parts[4] = urllib_parse.urlencode(query)
txt = urllib_parse.urlunparse(parts)
if label:
txt = "{} {}".format(label, txt)
txt = u"{} {}".format(label, txt)
return txt
def VLOOKUP(table, **field_value_pairs):

View File

@ -1,16 +1,17 @@
# -*- coding: UTF-8 -*-
import datetime
import dateutil.parser
import numbers
import re
import dateutil.parser
import six
from six import unichr
from six.moves import xrange
from usertypes import AltText # pylint: disable=import-error
from .unimplemented import unimplemented
from usertypes import AltText # pylint: disable=import-error
def CHAR(table_number):
"""
@ -26,7 +27,7 @@ def CHAR(table_number):
# See http://stackoverflow.com/a/93029/328565
_control_chars = ''.join(map(unichr, range(0,32) + range(127,160)))
_control_chars = ''.join(map(unichr, list(xrange(0,32)) + list(xrange(127,160))))
_control_char_re = re.compile('[%s]' % re.escape(_control_chars))
def CLEAN(text):
@ -58,7 +59,7 @@ def CODE(string):
def CONCATENATE(string, *more_strings):
"""
u"""
Joins together any number of text strings into one string. Also available under the name
`CONCAT`. Similar to the Python expression `"".join(array_of_strings)`.
@ -70,11 +71,15 @@ def CONCATENATE(string, *more_strings):
u'abc'
>>> CONCATENATE(0, "abc")
u'0abc'
>>> CONCATENATE(2, " crème ", "brûlée".decode('utf8')) == "2 crème brûlée".decode('utf8')
True
>>> assert CONCATENATE(2, u" crème ", u"brûlée") == u'2 crème brûlée'
>>> assert CONCATENATE(2, " crème ", u"brûlée") == u'2 crème brûlée'
>>> assert CONCATENATE(2, " crème ", "brûlée") == u'2 crème brûlée'
"""
return u''.join(val if isinstance(val, unicode) else str(val).decode('utf8')
for val in (string,) + more_strings)
return u''.join(
val.decode('utf8') if isinstance(val, six.binary_type) else
six.text_type(val)
for val in (string,) + more_strings
)
def CONCAT(string, *more_strings):
@ -90,8 +95,7 @@ def CONCAT(string, *more_strings):
u'abc'
>>> CONCAT(0, "abc")
u'0abc'
>>> CONCAT(2, " crème ", "brûlée".decode('utf8')) == "2 crème brûlée".decode('utf8')
True
>>> assert CONCAT(2, u" crème ", u"brûlée") == u'2 crème brûlée'
"""
return CONCATENATE(string, *more_strings)
@ -443,48 +447,57 @@ def SUBSTITUTE(text, old_text, new_text, instance_num=None):
Same as `text.replace(old_text, new_text)` when instance_num is omitted.
>>> SUBSTITUTE("Sales Data", "Sales", "Cost")
'Cost Data'
u'Cost Data'
>>> SUBSTITUTE("Quarter 1, 2008", "1", "2", 1)
'Quarter 2, 2008'
u'Quarter 2, 2008'
>>> SUBSTITUTE("Quarter 1, 2011", "1", "2", 3)
'Quarter 1, 2012'
u'Quarter 1, 2012'
More tests:
>>> SUBSTITUTE("Hello world", "", "-")
'Hello world'
u'Hello world'
>>> SUBSTITUTE("Hello world", " ", "-")
'Hello-world'
u'Hello-world'
>>> SUBSTITUTE("Hello world", " ", 12.1)
'Hello12.1world'
u'Hello12.1world'
>>> SUBSTITUTE(u"Hello world", u" ", 12.1)
u'Hello12.1world'
>>> SUBSTITUTE("Hello world", "world", "")
'Hello '
u'Hello '
>>> SUBSTITUTE("Hello", "world", "")
'Hello'
u'Hello'
Overlapping matches are all counted when looking for instance_num.
>>> SUBSTITUTE('abababab', 'abab', 'xxxx')
'xxxxxxxx'
u'xxxxxxxx'
>>> SUBSTITUTE('abababab', 'abab', 'xxxx', 1)
'xxxxabab'
u'xxxxabab'
>>> SUBSTITUTE('abababab', 'abab', 'xxxx', 2)
'abxxxxab'
u'abxxxxab'
>>> SUBSTITUTE('abababab', 'abab', 'xxxx', 3)
'ababxxxx'
u'ababxxxx'
>>> SUBSTITUTE('abababab', 'abab', 'xxxx', 4)
'abababab'
u'abababab'
>>> SUBSTITUTE('abababab', 'abab', 'xxxx', 0)
Traceback (most recent call last):
...
ValueError: instance_num invalid
>>> SUBSTITUTE( "crème", "è", "e")
u'creme'
>>> SUBSTITUTE(u"crème", u"è", "e")
u'creme'
>>> SUBSTITUTE(u"crème", "è", "e")
u'creme'
>>> SUBSTITUTE( "crème", u"è", "e")
u'creme'
"""
text = six.text_type(text)
old_text = six.text_type(old_text)
new_text = six.text_type(new_text)
if not old_text:
return text
if not isinstance(new_text, six.string_types):
new_text = str(new_text)
if instance_num is None:
return text.replace(old_text, new_text)
@ -505,22 +518,23 @@ def T(value):
Returns value if value is text, or the empty string when value is not text.
>>> T('Text')
'Text'
u'Text'
>>> T(826)
''
u''
>>> T('826')
'826'
u'826'
>>> T(False)
''
u''
>>> T('100 points')
'100 points'
u'100 points'
>>> T(AltText('Text'))
'Text'
u'Text'
>>> T(float('nan'))
''
u''
"""
return (value if isinstance(value, basestring) else
str(value) if isinstance(value, AltText) else "")
return (value.decode('utf8') if isinstance(value, six.binary_type) else
value if isinstance(value, six.text_type) else
six.text_type(value) if isinstance(value, AltText) else u"")
@unimplemented
@ -565,8 +579,7 @@ def VALUE(text):
>>> VALUE("$1,000")
1000
>>> VALUE("16:48:00") - VALUE("12:00:00")
datetime.timedelta(0, 17280)
>>> assert VALUE("16:48:00") - VALUE("12:00:00") == datetime.timedelta(0, 17280)
>>> VALUE("01/01/2012")
datetime.datetime(2012, 1, 1, 0, 0)
>>> VALUE("")

View File

@ -36,6 +36,7 @@ def table_data_from_db(table_name, table_data_repr):
if table_data_repr is None:
return actions.TableData(table_name, [], {})
table_data_parsed = marshal.loads(table_data_repr)
table_data_parsed = {key.decode("utf8"): value for key, value in table_data_parsed.items()}
id_col = table_data_parsed.pop("id")
return actions.TableData(table_name, id_col,
actions.decode_bulk_values(table_data_parsed, _decode_db_value))
@ -44,14 +45,8 @@ def _decode_db_value(value):
# Decode database values received from SQLite's allMarshal() call. These are encoded by
# marshalling certain types and storing as BLOBs (received in Python as binary strings, as
# opposed to text which is received as unicode). See also encodeValue() in DocStorage.js
# TODO For the moment, the sandbox uses binary strings throughout (with text in utf8 encoding).
# We should switch to representing text with unicode instead. This requires care, at least in
# fixing various occurrences of str() in our code, which may fail and which return wrong type.
t = type(value)
if t == unicode:
return value.encode('utf8')
elif t == str:
if t == six.binary_type:
return objtypes.decode_object(marshal.loads(value))
else:
return value

View File

@ -73,7 +73,8 @@ def create_migrations(all_tables, metadata_only=False):
new_col_info = {c['id']: c for c in new_schema[table_id].columns}
# Use an incomplete default for unknown (i.e. deprecated) columns; some uses of the column
# would be invalid, such as adding a new record with missing values.
col_info = sorted([new_col_info.get(col_id, {'id': col_id}) for col_id in data.columns])
col_info = sorted([new_col_info.get(col_id, {'id': col_id}) for col_id in data.columns],
key=lambda c: list(six.iteritems(c)))
tdset.apply_doc_action(actions.AddTable(table_id, col_info))
# And load in the original data, interpreting the TableData object as BulkAddRecord action.
@ -177,7 +178,7 @@ def migration1(tdset):
if rows:
values = {'tableRef': [r[0] for r in rows],
'viewRef': [r[1] for r in rows]}
row_ids = range(1, len(rows) + 1)
row_ids = list(xrange(1, len(rows) + 1))
doc_actions.append(actions.ReplaceTableData('_grist_TabItems', row_ids, values))
return tdset.apply_doc_actions(doc_actions)
@ -212,14 +213,14 @@ def migration2(tdset):
return actions.BulkUpdateRecord('_grist_Tables', row_ids, values)
def create_tab_bar_action(views_to_table):
row_ids = range(1, len(views_to_table) + 1)
row_ids = list(xrange(1, len(views_to_table) + 1))
return actions.ReplaceTableData('_grist_TabBar', row_ids, {
'viewRef': sorted(views_to_table.keys())
})
def create_table_views_action(views_to_table, primary_views):
related_views = sorted(set(views_to_table.keys()) - set(primary_views.values()))
row_ids = range(1, len(related_views) + 1)
row_ids = list(xrange(1, len(related_views) + 1))
return actions.ReplaceTableData('_grist_TableViews', row_ids, {
'tableRef': [views_to_table[v] for v in related_views],
'viewRef': related_views,
@ -757,7 +758,7 @@ def migration20(tdset):
# the name of primary view's is the same as the tableId
return (view.name, -1)
views.sort(key=view_key)
row_ids = range(1, len(views) + 1)
row_ids = list(xrange(1, len(views) + 1))
return tdset.apply_doc_actions([
actions.AddTable('_grist_Pages', [
schema.make_column('viewRef', 'Ref:_grist_Views'),

View File

@ -142,8 +142,9 @@ class TzInfo(_tzinfo):
def tzname(self, dt):
"""Implementation of tzinfo.tzname interface."""
abbr = self.zone.dt_tzname(dt, self._favor_offset)
# tzname must return a string, not unicode.
return abbr.encode('utf8') if isinstance(abbr, unicode) else abbr
if six.PY2 and isinstance(abbr, six.text_type):
abbr = abbr.encode('utf8')
return abbr
def dst(self, dt):
"""Implementation of tzinfo.dst interface."""

View File

@ -47,6 +47,7 @@ DATE_TOKENS_REGEX = re.compile("("+("|".join(DATE_TOKENS))+")")
# List of separators to replace and match any standard date/time separators
SEP = r"[\s/.\-:,]*"
SEP_REGEX = re.compile(SEP)
SEP_REPLACEMENT = SEP.replace("\\", "\\\\")
# Maps date parse format to compile regex
FORMAT_CACHE = {}
@ -77,7 +78,8 @@ def parse(date_string, parse_format, zonelabel='UTC', override_current_date=None
# e.g. "MM-YY" -> "(?P<mm>\d{1,2})-(?P<yy>\d{2})"
# Note that DATE_TOKENS is ordered so that the longer letter chains are recognized first
tokens = DATE_TOKENS_REGEX.split(parse_format)
tokens = [DATE_TOKENS[t] if t in DATE_TOKENS else SEP_REGEX.sub(SEP, t) for t in tokens]
tokens = [DATE_TOKENS[t] if t in DATE_TOKENS else SEP_REGEX.sub(SEP_REPLACEMENT, t)
for t in tokens]
# Compile new token string ignoring case (for month names)
parser = re.compile(''.join(tokens), re.I)

View File

@ -163,14 +163,16 @@ def encode_object(value):
Returns ['U', repr(value)] if it fails to encode otherwise.
"""
try:
if isinstance(value, (str, unicode, float, bool)) or value is None:
if isinstance(value, (six.text_type, float, bool)) or value is None:
return value
elif isinstance(value, (long, int)):
elif isinstance(value, six.binary_type):
return value.decode('utf8')
elif isinstance(value, six.integer_types):
if not is_int_short(value):
raise UnmarshallableError("Integer too large")
return value
elif isinstance(value, AltText):
return str(value)
return six.text_type(value)
elif isinstance(value, records.Record):
return ['R', value._table.table_id, value._row_id]
elif isinstance(value, RecordStub):
@ -210,13 +212,6 @@ def decode_object(value):
"""
try:
if not isinstance(value, (list, tuple)):
if isinstance(value, unicode):
# TODO For now, the sandbox uses binary strings throughout; see TODO in main.py for more
# on this. Strings that come from JS become Python binary strings, and we will not see
# unicode here. But we may see it if unmarshalling data that comes from DB, since
# DocStorage encodes/decodes values by marshaling JS strings as unicode. For consistency,
# convert those unicode strings to binary strings too.
return value.encode('utf8')
return value
code = value[0]
args = value[1:]

View File

@ -6,9 +6,10 @@ slight changes in order to be convenient for Grist's purposes
import code
import sys
from StringIO import StringIO
from collections import namedtuple
import six
SUCCESS = 0
INCOMPLETE = 1
ERROR = 2
@ -38,7 +39,7 @@ class REPLInterpreter(code.InteractiveInterpreter):
old_stdout = sys.stdout
old_stderr = sys.stderr
user_output = StringIO()
user_output = six.StringIO()
self.error_text = ""
try:
@ -67,7 +68,10 @@ class REPLInterpreter(code.InteractiveInterpreter):
sys.stderr = old_stderr
program_output = user_output.getvalue()
user_output.close()
try:
user_output.close()
except:
pass
return EvalTuple(program_output, self.error_text, status)

View File

@ -92,7 +92,6 @@ class TestCodeBuilder(unittest.TestCase):
# Check for reasonable behaviour with non-empty text and no statements.
self.assertEqual(make_body('# comment'), '# comment\npass')
self.assertEqual(make_body('\\'), '\\\npass')
self.assertEqual(make_body('rec = 1'), "# rec = 1\n" +
"raise SyntaxError('Grist disallows assignment " +

View File

@ -1,5 +1,9 @@
import doctest
import os
import re
import six
import functions
import moment
@ -15,6 +19,12 @@ def date_tearDown(doc_test):
# pylint: disable=unused-argument
functions.date._get_global_tz = _old_date_get_global_tz
class Py23DocChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
if six.PY3:
want = re.sub(r"^u'(.*?)'$", r"'\1'", want)
want = re.sub(r'^u"(.*?)"$', r'"\1"', want)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
# This works with the unittest module to turn all the doctests in the functions' doc-comments into
# unittest test cases.
@ -26,8 +36,8 @@ def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(functions.logical))
tests.addTests(doctest.DocTestSuite(functions.math))
tests.addTests(doctest.DocTestSuite(functions.stats))
tests.addTests(doctest.DocTestSuite(functions.text))
tests.addTests(doctest.DocTestSuite(functions.text, checker=Py23DocChecker()))
tests.addTests(doctest.DocTestSuite(functions.schedule,
setUp = date_setUp, tearDown = date_tearDown))
tests.addTests(doctest.DocTestSuite(functions.lookup))
tests.addTests(doctest.DocTestSuite(functions.lookup, checker=Py23DocChecker()))
return tests

View File

@ -256,7 +256,7 @@ class TestRelabeling(unittest.TestCase):
self._do_test_renumber_ends([])
def test_renumber_endpoints2(self):
self._do_test_renumber_ends(zip("abcd", [40,50,60,70]))
self._do_test_renumber_ends(list(zip("abcd", [40,50,60,70])))
def _do_test_renumber_ends(self, initial):
# Test insertions that happen together on the left and on the right.

View File

@ -330,23 +330,23 @@ class TestRenames(test_engine.EngineTestCase):
def test_renames_with_non_ascii(self):
# Test that presence of unicode does not interfere with formula adjustments for renaming.
self.load_sample(self.sample)
self.add_column("Address", "CityUpper", formula="'Øî'+$city.upper()+'áü'")
self.add_column("Address", "CityUpper", formula=u"'Øî'+$city.upper()+'áü'")
out_actions = self.apply_user_action(["RenameColumn", "Address", "city", "ciudad"])
self.assertPartialOutActions(out_actions, { "stored": [
["RenameColumn", "Address", "city", "ciudad"],
["ModifyColumn", "People", "city", {"formula": "$addr.ciudad"}],
["ModifyColumn", "Address", "CityUpper", {"formula": "'Øî'+$ciudad.upper()+'áü'"}],
["ModifyColumn", "Address", "CityUpper", {"formula": u"'Øî'+$ciudad.upper()+'áü'"}],
["BulkUpdateRecord", "_grist_Tables_column", [21, 24, 25], {
"colId": ["ciudad", "city", "CityUpper"],
"formula": ["", "$addr.ciudad", "'Øî'+$ciudad.upper()+'áü'"],
"formula": ["", "$addr.ciudad", u"'Øî'+$ciudad.upper()+'áü'"],
}]
]})
self.assertTableData("Address", cols="all", data=[
["id", "ciudad", "CityUpper"],
[11, "New York", "ØîNEW YORKáü"],
[12, "Colombia", "ØîCOLOMBIAáü"],
[13, "New Haven", "ØîNEW HAVENáü"],
[14, "West Haven", "ØîWEST HAVENáü"],
[11, "New York", u"ØîNEW YORKáü"],
[12, "Colombia", u"ØîCOLOMBIAáü"],
[13, "New Haven", u"ØîNEW HAVENáü"],
[14, "West Haven", u"ØîWEST HAVENáü"],
])
def test_rename_updates_properties(self):

View File

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
import six
import logger
import testutil
@ -25,7 +26,7 @@ class TestTypes(test_engine.EngineTestCase):
"Types": [
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, False, False, False, False, False],
[14, True, True, True, True, True],
[15, 1509556595, 1509556595, 1509556595, 1509556595, 1509556595],
@ -61,20 +62,20 @@ class TestTypes(test_engine.EngineTestCase):
self.assertPartialOutActions(out_actions, {
"stored": [["BulkUpdateRecord", "Types", self.all_row_ids, {
"text": [None,"","1","0","8.153","1509556595","True","False","Chîcágö","New York"],
"numeric": [None, None, 1.0, 0.0, 8.153, 1509556595.0, 1.0, 0.0, "Chîcágö", "New York"],
"int": [None, None, 1, 0, 8, 1509556595, 1, 0, "Chîcágö", "New York"],
"bool": [False, False, True, False, True, True, True, False, "Chîcágö", "New York"],
"text": [None,"","1","0","8.153","1509556595","True","False",u"Chîcágö","New York"],
"numeric": [None, None, 1.0, 0.0, 8.153, 1509556595.0, 1.0, 0.0, u"Chîcágö", "New York"],
"int": [None, None, 1, 0, 8, 1509556595, 1, 0, u"Chîcágö", "New York"],
"bool": [False, False, True, False, True, True, True, False, u"Chîcágö", "New York"],
"date": [None, None, 1.0, 0.0, 8.153, 1509556595.0, 1.0, 0.0, 1548115200.0, "New York"]
}],
["UpdateRecord", "Formulas", 1, {"division": 0.0}],
],
"undo": [["BulkUpdateRecord", "Types", self.all_row_ids, {
"text": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"numeric": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"int": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"bool": ["New York", "Chîcágö", False, True, 1509556595, 8.153, False, True, "", None],
"date": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None]
"text": ["New York", u"Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"numeric": ["New York", u"Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"int": ["New York", u"Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"bool": ["New York", u"Chîcágö", False, True, 1509556595, 8.153, False, True, "", None],
"date": ["New York", u"Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None]
}],
["UpdateRecord", "Formulas", 1, {"division": 0.5}],
]
@ -90,7 +91,7 @@ class TestTypes(test_engine.EngineTestCase):
[16, "1509556595", 1509556595, 1509556595, True, 1509556595.0],
[17, "True", 1.0, 1, True, 1.0],
[18, "False", 0.0, 0, False, 0.0],
[19, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", 1548115200.0],
[19, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", 1548115200.0],
[20, "New York", "New York", "New York", "New York", "New York"]
])
@ -184,7 +185,7 @@ class TestTypes(test_engine.EngineTestCase):
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, False, "False", "False", "False", "False"],
[14, True, "True", "True", "True", "True"],
[15, 1509556595, "1509556595.0","1509556595","1509556595","1509556595.0"],
@ -283,7 +284,7 @@ class TestTypes(test_engine.EngineTestCase):
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, 0.0, False, 0.0, 0.0, 0.0],
[14, 1.0, True, 1.0, 1.0, 1.0],
[15, 1509556595, 1509556595, 1509556595, 1509556595, 1509556595],
@ -327,15 +328,13 @@ class TestTypes(test_engine.EngineTestCase):
["BulkUpdateRecord", "Types", [13, 14, 16, 19],
{"numeric": [0, 1, 8, None]}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Int"}],
["UpdateRecord", "Formulas", 1, {"division": 0}],
],
] + six.PY2 * [["UpdateRecord", "Formulas", 1, {"division": 0}]], # Only in Python 2 due to integer division,
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 16, 19],
{"numeric": [False, True, 8.153, ""]}],
["ModifyColumn", "Types", "numeric", {"type": "Numeric"}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Numeric"}],
["UpdateRecord", "Formulas", 1, {"division": 0.5}],
]
] + six.PY2 * [["UpdateRecord", "Formulas", 1, {"division": 0.5}]], # Only in Python 2 due to integer division
})
# Test Int -> Int conversion
@ -383,7 +382,7 @@ class TestTypes(test_engine.EngineTestCase):
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, 0, 0, False, 0, 0],
[14, 1, 1, True, 1, 1],
[15, 1509556595, 1509556595, 1509556595, 1509556595, 1509556595],
@ -428,15 +427,13 @@ class TestTypes(test_engine.EngineTestCase):
["BulkUpdateRecord", "Types", [15, 16, 17, 18, 19, 20],
{"numeric": [True, True, False, True, False, False]}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Bool"}],
["UpdateRecord", "Formulas", 1, {"division": 0}],
],
] + six.PY2 * [["UpdateRecord", "Formulas", 1, {"division": 0}]], # Only in Python 2 due to integer division,
"undo": [
["BulkUpdateRecord", "Types", [15, 16, 17, 18, 19, 20],
{"numeric": [1509556595.0, 8.153, 0.0, 1.0, "", None]}],
["ModifyColumn", "Types", "numeric", {"type": "Numeric"}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Numeric"}],
["UpdateRecord", "Formulas", 1, {"division": 0.5}],
]
] + six.PY2 * [["UpdateRecord", "Formulas", 1, {"division": 0.5}]], # Only in Python 2 due to integer division
})
# Test Int -> Bool conversion
@ -484,7 +481,7 @@ class TestTypes(test_engine.EngineTestCase):
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, False, False, False, False, False],
[14, True, True, True, True, True],
[15, True, True, True, 1509556595, True],
@ -585,7 +582,7 @@ class TestTypes(test_engine.EngineTestCase):
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[12, u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö", u"Chîcágö"],
[13, 0.0, 0.0, 0.0, 0.0, False],
[14, 1.0, 1.0, 1.0, 1.0, True],
[15, 1509556595, 1509556595, 1509556595, 1509556595, 1509556595],

View File

@ -0,0 +1,19 @@
# coding=utf-8
import unittest
import urllib
import six
from urllib_patch import original_quote
class TestUrllibPatch(unittest.TestCase):
def test_patched_quote(self):
self.assertEqual(urllib.quote( "a b"), u"a%20b")
self.assertEqual(urllib.quote(u"a b"), u"a%20b")
self.assertEqual(urllib.quote(u"a é"), u"a%20%C3%A9")
self.assertEqual(original_quote( "a b"), u"a%20b")
self.assertEqual(original_quote(u"a b"), u"a%20b")
if six.PY3: # python 2 original quote can't handle non-ascii
self.assertEqual(original_quote(u"a é"), u"a%20%C3%A9")

View File

@ -2768,7 +2768,7 @@
["APPLY", {
"USER_ACTIONS": [
// Access to usercode before and after re-generation
["EvalCode", "list(Students.all.firstName)", null],
["EvalCode", "list(map(str, Students.all.firstName))", null],
["UpdateRecord", "Students", 1, {"firstName": "2e6"}],
["ModifyColumn", "Students", "firstName", { "type" : "Numeric" }],
["EvalCode", "list(Students.all.firstName)", 6],
@ -2778,7 +2778,7 @@
"ACTIONS": {
"stored": [
["AddRecord", "_grist_REPL_Hist", 6,
{"code": "list(Students.all.firstName)", "errorText": "", "outputText": "['Barack', 'George W', 'Bill', 'George H', 'Ronald', 'Jimmy', 'Gerald']\n"}],
{"code": "list(map(str, Students.all.firstName))", "errorText": "", "outputText": "['Barack', 'George W', 'Bill', 'George H', 'Ronald', 'Jimmy', 'Gerald']\n"}],
["UpdateRecord", "Students", 1, {"firstName": "2e6"}],
["ModifyColumn", "Students", "firstName", {"type": "Numeric"}],
@ -2808,7 +2808,7 @@
["ModifyColumn", "Students", "firstName", {"type": "Text"}],
["UpdateRecord", "_grist_Tables_column", 1, {"type": "Text"}],
["UpdateRecord", "_grist_REPL_Hist", 6, {"code": "list(Students.all.firstName)",
["UpdateRecord", "_grist_REPL_Hist", 6, {"code": "list(map(str, Students.all.firstName))",
"errorText": "", "outputText": "['Barack', 'George W', 'Bill', 'George H', 'Ronald', 'Jimmy', 'Gerald']\n"}],
["UpdateRecord", "_grist_REPL_Hist", 6, {"code": "list(Students.all.firstName)",
"errorText": "",
@ -2825,7 +2825,7 @@
["APPLY", {
"USER_ACTIONS": [
// Syntax Error
["EvalCode", "*!@&$fjjj112#(8!", null],
["EvalCode", "not correct c", null],
// Other continuations
["EvalCode", "map(filter, ", null],
["EvalCode", "[1,2,3,", null],
@ -2834,15 +2834,14 @@
["EvalCode", "sys.exit(0)", null],
// User reassignment of sys.stdout/sys.stderr
["EvalCode", "sys.stdout = None", null],
["EvalCode", "delattr(sys.stderr, 'close')", null],
["EvalCode", "setattr(sys.stdout, 'getvalue', lambda : 2)", null],
["EvalCode", "def foo():\n global stdout\n exec 'stdout = 2'\n", null],
["EvalCode", "def foo():\n global stdout\n exec('stdout = 2')\n", null],
["EvalCode", "setattr(sys.stderr, 'close', foo)", null]
],
"ACTIONS": {
"stored": [
["AddRecord", "_grist_REPL_Hist", 7,
{"code": "*!@&$fjjj112#(8!", "errorText": " File \"<input>\", line 1\n *!@&$fjjj112#(8!\n ^\nSyntaxError: invalid syntax\n", "outputText": ""}],
{"code": "not correct c", "errorText": " File \"<input>\", line 1\n not correct c\n ^\nSyntaxError: invalid syntax\n", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 8,
{"code": "import sys", "errorText": "", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 9,
@ -2850,15 +2849,13 @@
["AddRecord", "_grist_REPL_Hist", 10,
{"code": "sys.stdout = None", "errorText": "", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 11,
{"code": "delattr(sys.stderr, 'close')", "errorText": "Traceback (most recent call last):\n File \"<input>\", line 1, in <module>\nAttributeError: StringIO instance has no attribute 'close'\n", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 12,
{"code": "setattr(sys.stdout, 'getvalue', lambda : 2)", "errorText": "", "outputText": 2}],
["AddRecord", "_grist_REPL_Hist", 12,
{"code": "def foo():\n global stdout\n exec('stdout = 2')\n", "errorText": "", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 13,
{"code": "def foo():\n global stdout\n exec 'stdout = 2'\n", "errorText": "", "outputText": ""}],
["AddRecord", "_grist_REPL_Hist", 14,
{"code": "setattr(sys.stderr, 'close', foo)", "errorText": "", "outputText": ""}]
],
"direct": [true, true, true, true, true, true, true, true],
"direct": [true, true, true, true, true, true, true],
"undo": [
["RemoveRecord", "_grist_REPL_Hist", 7],
["RemoveRecord", "_grist_REPL_Hist", 8],
@ -2866,10 +2863,9 @@
["RemoveRecord", "_grist_REPL_Hist", 10],
["RemoveRecord", "_grist_REPL_Hist", 11],
["RemoveRecord", "_grist_REPL_Hist", 12],
["RemoveRecord", "_grist_REPL_Hist", 13],
["RemoveRecord", "_grist_REPL_Hist", 14]
["RemoveRecord", "_grist_REPL_Hist", 13]
],
"retValue" : [true,false,false,true,true,true,true,true,true,true ]
"retValue" : [true,false,false,true,true,true,true,true,true ]
}
}]
]

View File

@ -59,7 +59,7 @@ def parse_testscript(script_path=None):
all_lines.append(line)
full_text = "".join(all_lines)
script = byteify(json.loads(full_text))
script = json.loads(full_text)
samples = {}
test_cases = []
@ -109,16 +109,6 @@ def parse_test_sample(obj, samples={}):
return {"SCHEMA": schema, "DATA": data}
def byteify(data):
"""
Convert all unicode strings in a parsed JSON object into utf8-encoded strings. We deal with
utf8-encoded strings throughout the test.
"""
if isinstance(data, unicode):
return data.encode('utf-8')
return actions.convert_recursive_helper(byteify, data)
def replace_nans(data):
"""
Convert all NaNs and Infinities in the data to descriptive strings, since they cannot be

View File

@ -157,7 +157,11 @@ class Combiner(Builder):
def __init__(self, parts):
self._parts = parts
self._offsets = []
text_parts = [(p if isinstance(p, basestring) else p.get_text()) for p in self._parts]
text_parts = [
(p if isinstance(p, six.text_type) else
p.decode('utf8') if isinstance(p, six.binary_type) else
p.get_text())
for p in self._parts]
self._text = ''.join(text_parts)
offset = 0

View File

@ -0,0 +1,16 @@
import urllib
import six
from six.moves import urllib_parse
original_quote = urllib_parse.quote
def patched_quote(s, safe='/'):
if isinstance(s, six.text_type):
s = s.encode('utf8')
result = original_quote(s, safe=safe)
if isinstance(result, six.binary_type):
result = result.decode('utf8')
return result
urllib.quote = patched_quote

View File

@ -5,6 +5,7 @@ import json
import sys
import six
from six.moves import xrange
import acl
from acl_formula import parse_acl_formula_json
@ -109,7 +110,7 @@ def from_repr(user_action):
try:
return action_type(*user_action[1:])
except TypeError as e:
raise TypeError("%s: %s" % (user_action[0], e.message))
raise TypeError("%s: %s" % (user_action[0], str(e)))
def _make_clean_col_info(col_info, col_id=None):
"""
@ -332,7 +333,7 @@ class UserActions(object):
# Invalidate new records, including the omitted columns that may have default formulas,
# in order to get dynamically-computed default values.
omitted_cols = table.all_columns.viewkeys() - column_values.viewkeys()
omitted_cols = six.viewkeys(table.all_columns) - six.viewkeys(column_values)
self._engine.invalidate_records(table_id, filled_row_ids, data_cols_to_recompute=omitted_cols)
return filled_row_ids
@ -599,18 +600,16 @@ class UserActions(object):
col_rec = self._docmodel.get_column_rec(formula_table, formula_col)
# Create a patch and append to the list for this col_rec.
name = col_id or table_id
# Positions are obtained from unicode version of formulas, so that's what we must patch
formula = col_rec.formula.decode('utf8')
formula = col_rec.formula
patch = textbuilder.make_patch(formula, pos, pos + len(name), new_name)
patches_map.setdefault(col_rec, []).append(patch)
# Apply the collected patches to each affected formula, converting to unicode to apply the
# patches and back to byte string for how we maintain string values.
# Apply the collected patches to each affected formula
result = {}
for col_rec, patches in six.iteritems(patches_map):
formula = col_rec.formula.decode('utf8')
for col_rec, patches in patches_map.items():
formula = col_rec.formula
replacer = textbuilder.Replacer(textbuilder.Text(formula), patches)
result[col_rec] = replacer.get_text().encode('utf8')
result[col_rec] = replacer.get_text()
return result

View File

@ -12,7 +12,6 @@ data structure for values of the wrong type, and the memory savings aren't that
the extra complexity.
"""
import csv
import cStringIO
import datetime
import json
import six
@ -31,7 +30,7 @@ _type_defaults = {
'Attachments': None,
'Blob': None,
'Bool': False,
'Choice': '',
'Choice': u'',
'ChoiceList': None,
'Date': None,
'DateTime': None,
@ -42,7 +41,7 @@ _type_defaults = {
'PositionNumber': float('inf'),
'Ref': 0,
'RefList': None,
'Text': '',
'Text': u'',
}
def get_type_default(col_type):
@ -131,10 +130,7 @@ class BaseColumnType(object):
except Exception as e:
# If conversion failed, return a string to serve as alttext.
try:
if isinstance(value_to_convert, six.text_type):
# str() will fail for a non-ascii unicode object, which needs an explicit encoding.
return value_to_convert.encode('utf8')
return str(value_to_convert)
return six.text_type(value_to_convert)
except Exception:
# If converting to string failed, we should still produce something.
return objtypes.safe_repr(value_to_convert)
@ -157,7 +153,12 @@ class Text(BaseColumnType):
"""
@classmethod
def do_convert(cls, value):
return str(value) if value is not None else None
if isinstance(value, six.binary_type):
return value.decode('utf8')
elif value is None:
return None
else:
return six.text_type(value)
@classmethod
def is_right_type(cls, value):
@ -176,11 +177,11 @@ class Blob(BaseColumnType):
"""
@classmethod
def do_convert(cls, value):
return str(value) if value is not None else None
return value
@classmethod
def is_right_type(cls, value):
return isinstance(value, (basestring, NoneType))
return isinstance(value, (six.binary_type, NoneType))
class Any(BaseColumnType):
@ -190,7 +191,7 @@ class Any(BaseColumnType):
@classmethod
def do_convert(cls, value):
# Convert AltText values to plain text when assigning to type Any.
return str(value) if isinstance(value, AltText) else value
return six.text_type(value) if isinstance(value, AltText) else value
class Bool(BaseColumnType):
@ -206,7 +207,7 @@ class Bool(BaseColumnType):
if isinstance(value, (float, six.integer_types)):
return True
if isinstance(value, AltText):
value = str(value)
value = six.text_type(value)
if isinstance(value, six.string_types):
if value.lower() in ("false", "no", "0"):
return False
@ -334,13 +335,13 @@ class ChoiceList(BaseColumnType):
# If it's a string that looks like JSON, try to parse it as such.
if value.startswith('['):
try:
return tuple(str(item) for item in json.loads(value))
return tuple(six.text_type(item) for item in json.loads(value))
except Exception:
pass
return value
else:
# Accepts other kinds of iterables; if that doesn't work, fail the conversion too.
return tuple(str(item) for item in value)
return tuple(six.text_type(item) for item in value)
@classmethod
def is_right_type(cls, value):
@ -362,7 +363,7 @@ class ChoiceList(BaseColumnType):
def toString(cls, value):
if isinstance(value, (tuple, list)):
try:
buf = cStringIO.StringIO()
buf = six.StringIO()
csv.writer(buf).writerow(value)
return buf.getvalue().strip()
except Exception:
@ -434,7 +435,7 @@ class Reference(Id):
@classmethod
def typeConvert(cls, value, ref_table, visible_col=None): # pylint: disable=arguments-differ
if ref_table and visible_col:
return ref_table.lookupOne(**{visible_col: value}) or str(value)
return ref_table.lookupOne(**{visible_col: value}) or six.text_type(value)
else:
return value