(core) Update logging in sandbox code, and log tracebacks as single log messages.

Summary:
- Replace logger module by the standard module 'logging'.
- When a log message from the sandbox includes newlines (e.g. for tracebacks),
  keep those lines together in the Node log message.

  Previously each line was a different message, making it difficult to view
  tracebacks, particularly in prod where each line becomes a separate message
  object.

- Fix assorted lint errors.

Test Plan: Added a test for the log-line splitting and escaping logic.

Reviewers: georgegevoian

Reviewed By: georgegevoian

Differential Revision: https://phab.getgrist.com/D3956
This commit is contained in:
Dmitry S 2023-07-18 11:20:02 -04:00
parent 7fd48364df
commit 534615dd50
46 changed files with 154 additions and 249 deletions

View File

@ -279,7 +279,7 @@ export class NSandbox implements ISandbox {
if (!this._streamToSandbox) {
throw new Error('expected streamToSandbox to be configured');
}
const sandboxStderrLogger = sandboxUtil.makeLinePrefixer('Sandbox stderr: ', this._logMeta);
const sandboxStderrLogger = sandboxUtil.makeLogLinePrefixer('Sandbox stderr: ', this._logMeta);
this.childProc.stderr!.on('data', data => {
this._lastStderr = data;
sandboxStderrLogger(data);

View File

@ -33,16 +33,32 @@ export const EXC = false;
* Binary data is encoded as with JSON.stringify.
*/
export function makeLinePrefixer(prefix: string, logMeta: object) {
return _makeLinePrefixer(prefix, logMeta, text => text.indexOf('\n'));
}
/**
* Same as makeLinePrefixer, but avoids splitting lines except when a line starts with '[', since
* the sandbox prefixes all log messages with "[LEVEL]" prefix.
*/
export function makeLogLinePrefixer(prefix: string, logMeta: object) {
return _makeLinePrefixer(prefix, logMeta, text => {
const newline = text.indexOf("\n[");
// If no next log message, split at the last newline. Any earlier newlines would be included.
return (newline !== -1) ? newline : text.lastIndexOf("\n");
});
}
function _makeLinePrefixer(prefix: string, logMeta: object, findLineEnd: (text: string) => number) {
let partial = '';
return (data: Uint8Array) => {
partial += MemBuffer.arrayToString(data);
let newline;
while ((newline = partial.indexOf("\n")) !== -1) {
while (partial && (newline = findLineEnd(partial)) !== -1) {
const line = partial.slice(0, newline);
partial = partial.slice(newline + 1);
// Escape some parts of the string by serializing it to JSON (without the quotes).
log.origLog('info', "%s%s", prefix,
JSON.stringify(line).slice(1, -1).replace(/\\"/g, '"').replace(/\\\\/g, '\\'),
JSON.stringify(line).slice(1, -1).replace(/\\(['"\\])/g, '$1').replace(/\\n/g, '\n'),
logMeta);
}
};

View File

@ -3,13 +3,13 @@
# and to produce the ActionBundles expected by other code.
import json
import logging
from acl_formula import parse_acl_grist_entities, parse_acl_formula_json
import action_obj
import logger
import textbuilder
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class Permissions(object):
@ -68,7 +68,7 @@ def prepare_acl_table_renames(docmodel, useractions, table_renames_dict):
rule_info["tableId"] = table_renames_dict[rule_info.get("tableId")]
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
except Exception as e:
log.warn("Error examining aclRule: %s" % (e,))
log.warning("Error examining aclRule: %s", e)
def do_renames():
useractions.doBulkUpdateFromPairs('_grist_ACLResources', resource_updates)
@ -104,7 +104,7 @@ def prepare_acl_col_renames(docmodel, useractions, col_renames_dict):
rule_info["lookupColId"] = new_col_id
rule_updates.append((rule_rec, {'userAttributes': json.dumps(rule_info)}))
except Exception as e:
log.warn("Error examining aclRule: %s" % (e,))
log.warning("Error examining aclRule: %s", e)
# Go through again checking if anything in ACL formulas is affected by the rename.
for rule_rec in docmodel.aclRules.all:

View File

@ -2,6 +2,7 @@ import ast
import contextlib
import itertools
import linecache
import logging
import re
import textwrap
@ -11,8 +12,7 @@ import six
import friendly_errors
import textbuilder
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
DOLLAR_REGEX = re.compile(r'\$(?=[a-zA-Z_][a-zA-Z_0-9]*)')

View File

@ -1,4 +1,5 @@
import json
import logging
import types
from collections import namedtuple
from numbers import Number
@ -11,10 +12,9 @@ import usertypes
import relabeling
import relation
import moment
import logger
from sortedcontainers import SortedListWithKey
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
MANUAL_SORT = 'manualSort'
MANUAL_SORT_COL_INFO = {

View File

@ -1,11 +1,11 @@
import logging
import six
import actions
import logger
import schema
from objtypes import strict_equal
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class DocActions(object):
def __init__(self, engine):

View File

@ -4,6 +4,7 @@ The data engine ties the code generated from the schema with the document data,
dependency tracking.
"""
import itertools
import logging
import re
import rlcompleter
import sys
@ -13,7 +14,7 @@ from collections import namedtuple, OrderedDict, defaultdict
import six
from six.moves import zip
from six.moves.collections_abc import Hashable # pylint:disable-all
from six.moves.collections_abc import Hashable # pylint:disable=import-error,no-name-in-module
from sortedcontainers import SortedSet
import acl
@ -26,7 +27,6 @@ import docactions
import docmodel
from fake_std_streams import FakeStdStreams
import gencode
import logger
import match_counter
import objtypes
from objtypes import strict_equal
@ -40,7 +40,7 @@ import useractions
import column
import urllib_patch # noqa imported for side effect # pylint:disable=unused-import
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
if six.PY2:
reload(sys)
@ -451,7 +451,7 @@ class Engine(object):
if n:
matched_cols = matched_cols[:n]
log.info('Found column from values in %.3fs' % (time.time() - start_time))
log.info('Found column from values in %.3fs', time.time() - start_time)
return [c[1] for c in matched_cols]
def assert_schema_consistent(self):
@ -495,9 +495,9 @@ class Engine(object):
self.dump_recompute_map()
def dump_recompute_map(self):
log.debug("Recompute map (%d nodes):" % len(self.recompute_map))
log.debug("Recompute map (%d nodes):", len(self.recompute_map))
for node, dirty_rows in six.iteritems(self.recompute_map):
log.debug(" Node %s: %s" % (node, dirty_rows))
log.debug(" Node %s: %s", node, dirty_rows)
def _use_node(self, node, relation, row_ids=[]):
# This is used whenever a formula accesses any part of any record. It's hot code, and
@ -849,7 +849,7 @@ class Engine(object):
is_first = node not in self._is_node_exception_reported
if is_first:
self._is_node_exception_reported.add(node)
log.info(value.details)
log.info("Formula error in %s: %s", node, value.details)
# strip out details after logging
value = objtypes.RaisedException(value.error, user_input=value.user_input)
@ -1304,7 +1304,7 @@ class Engine(object):
# If we get an exception, we should revert all changes applied so far, to keep things
# consistent internally as well as with the clients and database outside of the sandbox
# (which won't see any changes in case of an error).
log.info("Failed to apply useractions; reverting: %r" % (e,))
log.info("Failed to apply useractions; reverting: %r", e)
self._undo_to_checkpoint(checkpoint)
# Check schema consistency again. If this fails, something is really wrong (we tried to go
@ -1313,7 +1313,7 @@ class Engine(object):
if self._schema_updated:
self.assert_schema_consistent()
except Exception:
log.error("Inconsistent schema after revert on failure: %s" % traceback.format_exc())
log.error("Inconsistent schema after revert on failure: %s", traceback.format_exc())
# Re-raise the original exception
# In Python 2, 'raise' raises the most recent exception,
@ -1354,7 +1354,7 @@ class Engine(object):
Applies a single user action to the document, without running any triggered updates.
A UserAction is a tuple whose first element is the name of the action.
"""
log.debug("applying user_action %s" % (user_action,))
log.debug("applying user_action %s", user_action)
return getattr(self.user_actions, user_action.__class__.__name__)(*user_action)
def apply_doc_action(self, doc_action):
@ -1362,7 +1362,6 @@ class Engine(object):
Applies a doc action, which is a step of a user action. It is represented by an Action object
as defined in actions.py.
"""
#log.warn("Engine.apply_doc_action %s" % (doc_action,))
self._gone_columns = []
action_name = doc_action.__class__.__name__
@ -1385,7 +1384,7 @@ class Engine(object):
try:
self.rebuild_usercode()
except Exception:
log.error("Error rebuilding usercode after restoring schema: %s" % traceback.format_exc())
log.error("Error rebuilding usercode after restoring schema: %s", traceback.format_exc())
# Re-raise the original exception
# In Python 2, 'raise' raises the most recent exception,
@ -1524,7 +1523,7 @@ class Engine(object):
if new_checkpoint != checkpoint:
(len_calc, len_stored, len_undo, len_ret) = checkpoint
undo_actions = self.out_actions.undo[len_undo:]
log.info("Reverting %d doc actions" % len(undo_actions))
log.info("Reverting %d doc actions", len(undo_actions))
self.user_actions.ApplyUndoActions([actions.get_action_repr(a) for a in undo_actions])
del self.out_actions.calc[len_calc:]
del self.out_actions.stored[len_stored:]

View File

@ -15,7 +15,7 @@ The schema for grist data is:
"formula": <opt_string>,
}
"""
import linecache
import logging
import re
import imp
from collections import OrderedDict
@ -28,8 +28,7 @@ import summary
import table
import textbuilder
from usertypes import get_type_default
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
indent_str = " "

View File

@ -3,6 +3,7 @@
A module for creating and sanitizing identifiers
"""
import itertools
import logging
import re
import unicodedata
from keyword import iskeyword
@ -10,9 +11,7 @@ from string import ascii_uppercase
import six
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]+')
_invalid_ident_start_re = re.compile(r'^(?=[0-9_])')

View File

@ -1,10 +1,10 @@
import logging
from six.moves import zip
import column
import identifiers
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
# Prefix for transform columns created during imports.
_import_transform_col_prefix = 'gristHelper_Import_'
@ -133,11 +133,11 @@ class ImportActions(object):
tables = self._docmodel.tables
hidden_table_rec = tables.lookupOne(tableId=hidden_table_id)
src_cols = {c.colId for c in hidden_table_rec.columns}
log.debug("destCols:" + repr(transform_rule['destCols']))
log.debug("destCols: %r", transform_rule['destCols'])
dest_cols = transform_rule['destCols']
log.debug("_MakeImportTransformColumns: {}".format("gen_all" if gen_all else "optimize"))
log.debug("_MakeImportTransformColumns: %s", "gen_all" if gen_all else "optimize")
# Calling rebuild_usercode once per added column is wasteful and can be very slow.
self._engine._should_rebuild_usercode = False

View File

@ -14,6 +14,7 @@ from imports import import_utils
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
SCHEMA = [
{

View File

@ -4,16 +4,17 @@ and returns a object formatted so that it can be used by grist for a bulk add re
"""
import logging
import six
import openpyxl
from openpyxl.utils.datetime import from_excel
from openpyxl.worksheet import _reader
from openpyxl.worksheet import _reader # pylint:disable=no-name-in-module
import six
from six.moves import zip
import parse_data
from imports import import_utils
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
# Some strange Excel files have values that are marked as dates but are invalid as dates.

View File

@ -1,74 +0,0 @@
"""
Logging for code running in the sandbox. The output simply goes to stderr (which gets to the
console of the Node process), but the levels allow some configuration.
We don't use the `logging` module because it assumes more about the `time` module than we have.
Usage:
import logger
log = logger.Logger(__name__, logger.DEBUG) # Or logger.WARN; default is logger.INFO.
log.info("Hello world")
-> produces "[I] [foo.bar] Hello world"
"""
import sys
# Level definitions
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
CRITICAL = 50
# Level strings
level_strings = {
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
CRITICAL: 'CRITICAL',
}
def log_stderr(level, name, msg):
sys.stderr.write("[%s] [%s] %s\n" % (level_strings.get(level, '?'), name, msg))
sys.stderr.flush()
_log_handler = log_stderr
def set_handler(log_handler):
"""
Allows overriding the handler for all log messages. The handler should be a function, called as
log_handler(level, name, message).
Returns the handler which was set previously.
"""
global _log_handler # pylint: disable=global-statement
prev = _log_handler
_log_handler = log_handler
return prev
class Logger(object):
"""
The object that actually provides the logging interface, specifically the methods debug, info,
warn, error, and critical. The constructor takes an argument for a name that gets included in
each message, and a minimum level, below which messages get ignored.
"""
def __init__(self, name, min_level=INFO):
self._name = name
self._min_level = min_level
def _log(self, level, msg):
if level >= self._min_level:
_log_handler(level, self._name, msg)
def debug(self, msg):
self._log(DEBUG, msg)
def info(self, msg):
self._log(INFO, msg)
def warn(self, msg):
self._log(WARN, msg)
def error(self, msg):
self._log(ERROR, msg)
def critical(self, msg):
self._log(CRITICAL, msg)

View File

@ -1,4 +1,5 @@
import itertools
import logging
from abc import abstractmethod
import six
@ -11,8 +12,7 @@ import twowaymap
import usertypes
from functions.lookup import _Contains
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def _extract(cell_value):

View File

@ -25,11 +25,20 @@ from acl_formula import parse_acl_formula
from sandbox import get_default_sandbox
from imports.register import register_import_parsers
import logger
log = logger.Logger(__name__, logger.INFO)
# Handler for logging, which flushes each message.
class FlushingStreamHandler(logging.StreamHandler):
def emit(self, record):
super(FlushingStreamHandler, self).emit(record)
self.flush()
# Configure logging module to behave similarly to logger. (It may be OK to get rid of logger.)
logging.basicConfig(format="[%(levelname)s] [%(name)s] %(message)s")
# Configure logging module to produce messages with log level and logger name.
logging.basicConfig(format="[%(levelname)s] [%(name)s] %(message)s",
handlers=[FlushingStreamHandler(sys.stderr)],
level=logging.INFO)
# The default level is INFO. If a different level is desired, add a call like this:
# log.setLevel(logging.WARNING)
log = logging.getLogger(__name__)
def table_data_from_db(table_name, table_data_repr):
if table_data_repr is None:
@ -57,7 +66,7 @@ def run(sandbox):
# Wrap each method so that it logs a message that it's being called.
@functools.wraps(method)
def wrapper(*args, **kwargs):
log.debug("calling %s" % method.__name__)
log.debug("calling %s", method.__name__)
return method(*args, **kwargs)
sandbox.register(method.__name__, wrapper)

View File

@ -1,4 +1,6 @@
# pylint:disable=too-many-lines
import json
import logging
import re
from collections import defaultdict
@ -10,10 +12,9 @@ import identifiers
import schema
import summary
import table_data_set
import logger
from column import is_visible_column
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
# PHILOSOPHY OF MIGRATIONS.
#
@ -388,8 +389,8 @@ def migration7(tdset):
new_name = summary.encode_summary_table_name(source_table_name, groupby_col_ids)
new_name = identifiers.pick_table_ident(new_name, avoid=table_name_set)
table_name_set.add(new_name)
log.warn("Upgrading summary table %s for %s(%s) to %s" % (
t.tableId, source_table_name, groupby_colrefs, new_name))
log.warning("Upgrading summary table %s for %s(%s) to %s",
t.tableId, source_table_name, groupby_colrefs, new_name)
# Remove the "lookupOrAddDerived" column from the source table (which is named using the
# summary table name for its colId).
@ -418,7 +419,7 @@ def migration7(tdset):
groupby_cols.add(sum_col)
source_cols.append((sum_col, src_col.id))
else:
log.warn("Upgrading summary table %s: couldn't find column %s" % (t.tableId, col_ref))
log.warning("Upgrading summary table %s: couldn't find column %s", t.tableId, col_ref)
# Finally, we have to remove all non-formula columns that are not groupby-columns (e.g.
# 'manualSort'), because the new approach assumes ALL non-formula columns are for groupby.
@ -1050,8 +1051,8 @@ def migration31(tdset):
continue
new_name = identifiers.pick_table_ident(new_name, avoid=table_name_set)
table_name_set.add(new_name)
log.warn("Upgrading summary table %s for %s(%s) to %s" % (
t.tableId, source_table.tableId, groupby_col_ids, new_name))
log.warning("Upgrading summary table %s for %s(%s) to %s",
t.tableId, source_table.tableId, groupby_col_ids, new_name)
# Schedule a rename of the summary table.
table_renames.append((t, new_name))

View File

@ -15,6 +15,7 @@ import six
from six.moves import zip, xrange
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
# Typecheck using type(value) instead of isinstance(value, some_type) makes parsing 25% faster

View File

@ -10,10 +10,13 @@ Usage:
"""
import os
import logging
import marshal
import sys
import traceback
log = logging.getLogger(__name__)
class CarefulReader(object):
"""
Wrap a pipe when reading from Pyodide, to work around marshaling
@ -40,9 +43,6 @@ class CarefulReader(object):
def __getattr__(self, attr):
return getattr(self._file, attr)
def log(msg):
sys.stderr.write(str(msg) + "\n")
sys.stderr.flush()
class Sandbox(object):
"""
@ -93,6 +93,7 @@ class Sandbox(object):
@classmethod
def use_pyodide(cls):
# pylint: disable=import-error,no-member
import js # Get pyodide object.
external_input = CarefulReader(sys.stdin.buffer)
external_output_method = lambda data: js.sendFromSandbox(data)
@ -146,7 +147,7 @@ class Sandbox(object):
ret = self._functions[fname](*args)
self._send_to_js(Sandbox.DATA, ret)
except Exception as e:
traceback.print_exc()
log.warn("Call error in %s: %s", fname, traceback.format_exc())
self._send_to_js(Sandbox.EXC, "%s %s" % (type(e).__name__, e))
if break_on_response:
raise Exception("Sandbox disconnected unexpectedly")

View File

@ -1,13 +1,13 @@
from collections import namedtuple
import json
import logging
import six
from column import is_visible_column
import sort_specs
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
ColInfo = namedtuple('ColInfo', ('colId', 'type', 'isFormula', 'formula',
'widgetOptions', 'label'))
@ -134,7 +134,7 @@ def _update_sort_spec(sort_spec, old_table, new_table):
new_sort_spec = [col_spec for col_spec in new_sort_spec if sort_specs.col_ref(col_spec)]
return json.dumps(new_sort_spec, separators=(',', ':'))
except Exception:
log.warn("update_summary_section: can't parse sortColRefs JSON; clearing sortColRefs")
log.warning("update_summary_section: can't parse sortColRefs JSON; clearing sortColRefs")
return ''

View File

@ -1,5 +1,6 @@
import collections
import itertools
import logging
import types
import six
@ -9,13 +10,12 @@ import column
import depend
import docmodel
import functions
import logger
import lookup
from records import adjust_record, Record as BaseRecord, RecordSet as BaseRecordSet
import relation as relation_module # "relation" is used too much as a variable name below.
import usertypes
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def get_default_func_name(col_id):

View File

@ -1,11 +1,11 @@
import logging
from six.moves import zip as izip
import six
import actions
from usertypes import get_type_default
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TableDataSet(object):
"""
@ -32,7 +32,7 @@ class TableDataSet(object):
try:
getattr(self, action.__class__.__name__)(*action)
except Exception as e:
log.warn("ERROR applying action %s: %s" % (action, e))
log.warning("ERROR applying action %s: %s", action, e)
raise
def apply_doc_actions(self, doc_actions):

View File

@ -1,10 +1,10 @@
import logger
import logging
import testutil
import test_engine
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestColumnActions(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({

View File

@ -1,9 +1,9 @@
import time
import logger
import logging
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestDefaultFormulas(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({

View File

@ -1,10 +1,10 @@
import logging
import actions
import logger
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def _bulk_update(table_name, col_names, row_data):
return actions.BulkUpdateRecord(

View File

@ -1,10 +1,10 @@
import logger
import logging
import testutil
import test_engine
from test_engine import Table, Column
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestUserActions(test_engine.EngineTestCase):
ref_sample = testutil.parse_test_sample({

View File

@ -1,11 +1,11 @@
import logging
import actions
import logger
import testsamples
import test_engine
from test_engine import Table, Column
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestDocModel(test_engine.EngineTestCase):

View File

@ -1,6 +1,7 @@
import difflib
import functools
import json
import logging
import unittest
from collections import namedtuple
from pprint import pprint
@ -10,12 +11,11 @@ import six
import actions
import column
import engine
import logger
import useractions
import testutil
import objtypes
log = logger.Logger(__name__, logger.DEBUG)
log = logging.getLogger(__name__)
# These are for use in verifying metadata using assertTables/assertViews methods. E.g.
# self.assertViews([View(1, sections=[Section(1, parentKey="record", tableRef=1, fields=[
@ -35,17 +35,14 @@ class EngineTestCase(unittest.TestCase):
Provides functionality for verifying engine actions and data, which is general enough to be
useful for other tests. It is also used by TestEngine below.
"""
# Place to keep the original log handler (which we modify for the duration of the test).
# We can't use cls._orig_log_handler directly because then Python it's an actual class method.
_orig_log_handler = []
@classmethod
def setUpClass(cls):
cls._orig_log_handler.append(logger.set_handler(testutil.limit_log_stderr(logger.WARN)))
cls._orig_log_level = logging.root.level
logging.root.setLevel(logging.WARNING)
@classmethod
def tearDownClass(cls):
logger.set_handler(cls._orig_log_handler.pop())
logging.root.setLevel(cls._orig_log_level)
def setUp(self):
@ -247,7 +244,10 @@ class EngineTestCase(unittest.TestCase):
if sort:
row_ids.sort(key=lambda r: sort(table.get_record(r)))
observed_col_data = {c.col_id: [c.raw_get(r) for r in row_ids] for c in columns if c.col_id != "id"}
observed_col_data = {
c.col_id: [c.raw_get(r) for r in row_ids]
for c in columns if c.col_id != "id"
}
observed = actions.TableData(table_name, row_ids, observed_col_data)
self.assertEqualDocData({table_name: observed}, {table_name: expected},
col_names=col_names)
@ -353,7 +353,7 @@ class EngineTestCase(unittest.TestCase):
def apply_user_action(self, user_action_repr, is_undo=False, user=None):
if not is_undo:
log.debug("Applying user action %r" % (user_action_repr,))
log.debug("Applying user action %r", user_action_repr)
if self._undo_state_tracker is not None:
doc_state = self.getFullEngineData()
@ -384,7 +384,7 @@ def test_undo(test_method):
self._undo_state_tracker = []
test_method(self)
for (expected_engine_data, undo_actions) in reversed(self._undo_state_tracker):
log.debug("Applying undo actions %r" % (undo_actions,))
log.debug("Applying undo actions %r", undo_actions)
self.apply_undo_actions(undo_actions)
self.assertEqualDocData(self.getFullEngineData(), expected_engine_data)
return wrapped

View File

@ -1,8 +1,8 @@
# pylint: disable=line-too-long
import logger
import logging
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestImportActions(test_engine.EngineTestCase):

View File

@ -1,38 +0,0 @@
import unittest
import logger
class TestLogger(unittest.TestCase):
def _log_handler(self, level, name, msg):
self.messages.append((level, name, msg))
def setUp(self):
self.messages = []
self.orig_handler = logger.set_handler(self._log_handler)
def tearDown(self):
logger.set_handler(self.orig_handler)
def test_logger(self):
log = logger.Logger("foo", logger.INFO)
log.info("Hello Info")
log.debug("Hello Debug")
log.warn("Hello Warn")
self.assertEqual(self.messages, [
(logger.INFO, 'foo', 'Hello Info'),
(logger.WARN, 'foo', 'Hello Warn'),
])
del self.messages[:]
log = logger.Logger("baz", logger.DEBUG)
log.debug("Hello Debug")
log.info("Hello Info")
self.assertEqual(self.messages, [
(logger.DEBUG, 'baz', 'Hello Debug'),
(logger.INFO, 'baz', 'Hello Info'),
])
if __name__ == "__main__":
unittest.main()

View File

@ -1,11 +1,11 @@
import logging
import actions
import logger
import testsamples
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def _bulk_update(table_name, col_names, row_data):
return actions.BulkUpdateRecord(

View File

@ -1,15 +1,16 @@
# pylint: disable=line-too-long
import datetime
import logging
import actions
import logger
import moment
import objtypes
from objtypes import RecordStub
import testsamples
import testutil
import test_engine
from objtypes import RecordStub
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def _bulk_update(table_name, col_names, row_data):
return actions.BulkUpdateRecord(

View File

@ -2,11 +2,11 @@
This test replicates a bug involving a column conversion after a table rename in the presence of
a RefList. A RefList column type today only appears as a result of detaching a summary table.
"""
import logger
import logging
import test_engine
from test_engine import Table, Column
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestRefListRelation(test_engine.EngineTestCase):
def test_ref_list_relation(self):

View File

@ -1,11 +1,10 @@
import unittest
import sys
import relabeling
from sortedcontainers import SortedListWithKey
from six.moves import zip as izip, xrange
import unittest
import sys
# Shortcut to keep code more concise.
r = relabeling

View File

@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
import logger
import logging
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestRenames(test_engine.EngineTestCase):

View File

@ -1,11 +1,11 @@
import textwrap
import unittest
import logger
import logging
import six
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def _replace_col_name(data, old_name, new_name):

View File

@ -3,16 +3,15 @@ Test of Summary tables. This has many test cases, so to keep files smaller, it's
files: test_summary.py and test_summary2.py.
"""
import logging
import actions
import logger
import summary
import testutil
import test_engine
from test_engine import Table, Column, View, Section, Field
from useractions import allowed_summary_change
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestSummary(test_engine.EngineTestCase):

View File

@ -1,16 +1,16 @@
# pylint:disable=too-many-lines
"""
Test of Summary tables. This has many test cases, so to keep files smaller, it's split into two
files: test_summary.py and test_summary2.py.
"""
import logging
import actions
import logger
import test_engine
from test_engine import Table, Column, View, Section, Field
import test_summary
import testutil
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestSummary2(test_engine.EngineTestCase):

View File

@ -1,13 +1,14 @@
# pylint: disable=line-too-long
"""
Test of Summary tables grouped by ChoiceList columns.
"""
import logging
import column
import logger
import lookup
import testutil
from test_engine import EngineTestCase, Table, Column, test_undo
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestSummaryChoiceList(EngineTestCase):

View File

@ -1,11 +1,11 @@
"""
Some more test cases for summary tables, involving UNDO.
"""
import logger
import logging
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestSummaryUndo(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({

View File

@ -1,10 +1,10 @@
import logger
import logging
import testutil
import test_engine
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestTableActions(test_engine.EngineTestCase):

View File

@ -1,9 +1,9 @@
import copy
import logging
import time
import six
import logger
import objtypes
import testutil
import test_engine
@ -11,7 +11,7 @@ from schema import RecalcWhen
# pylint: disable=line-too-long
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
def column_error(table, column, user_input):
return objtypes.RaisedException(

View File

@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
import logging
import six
import logger
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestTypes(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({

View File

@ -1,6 +1,7 @@
# pylint:disable=too-many-lines
import json
import types
import logger
import logging
import useractions
import testutil
@ -8,7 +9,7 @@ import test_engine
from test_engine import Table, Column, View, Section, Field
from schema import RecalcWhen
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
class TestUserActions(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({

View File

@ -6,18 +6,6 @@ import re
import six
import actions
import logger
def limit_log_stderr(min_level):
"""
Returns a log handler suitable for logger.set_handler(), which logs using log_stderr but only
messages at the given level or higher.
"""
def handler(level, name, msg):
if level >= min_level:
logger.log_stderr(level, name, msg)
return handler
def table_data_from_rows(table_id, col_names, rows):
"""

View File

@ -2,6 +2,7 @@
from collections import namedtuple, Counter, OrderedDict
import re
import json
import logging
import sys
from contextlib import contextmanager
@ -27,8 +28,7 @@ import treeview
from table import get_validation_func_name
import logger
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
_current_module = sys.modules[__name__]

View File

@ -15,6 +15,7 @@ the extra complexity.
import csv
import datetime
import json
import logging
import math
import six
@ -22,9 +23,9 @@ from six import integer_types
import objtypes
from objtypes import AltText, is_int_short
import moment
import logger
from records import Record, RecordSet
log = logger.Logger(__name__, logger.INFO)
log = logging.getLogger(__name__)
NoneType = type(None)