mirror of
https://github.com/gristlabs/grist-core.git
synced 2024-10-27 20:44:07 +00:00
(core) Add LogMethods helper and use it for more JSON data in logs. Reduce unhelpful logging.
Summary: - Sharing, Client, DocClients, HostingStorageManager all include available info. - In HostingStorageManager, log numSteps and maxStepTimeMs, in case that helps debug SQLITE_BUSY problem. - Replace some action-bundle logging with a JSON version aggregating some info. - Skip logging detailed list of actions in production. Test Plan: Tested manually by eyeballing log output in dev environment. Reviewers: paulfitz Reviewed By: paulfitz Differential Revision: https://phab.getgrist.com/D3086
This commit is contained in:
parent
8eeeae7fbf
commit
f2f4fe0eca
@ -79,6 +79,10 @@ export function isRenameTable(act: DocAction): act is RenameTable { return act[0
|
||||
const SCHEMA_ACTIONS = new Set(['AddTable', 'RemoveTable', 'RenameTable', 'AddColumn',
|
||||
'RemoveColumn', 'RenameColumn', 'ModifyColumn']);
|
||||
|
||||
// Maps each data action to whether it's a bulk action.
|
||||
const DATA_ACTIONS = new Set(['AddRecord', 'RemoveRecord', 'UpdateRecord', 'BulkAddRecord',
|
||||
'BulkRemoveRecord', 'BulkUpdateRecord', 'ReplaceTableData', 'TableData']);
|
||||
|
||||
/**
|
||||
* Determines whether a given action is a schema action or not.
|
||||
*/
|
||||
@ -87,6 +91,13 @@ export function isSchemaAction(action: DocAction):
|
||||
return SCHEMA_ACTIONS.has(action[0]);
|
||||
}
|
||||
|
||||
export function isDataAction(action: DocAction):
|
||||
action is AddRecord | RemoveRecord | UpdateRecord |
|
||||
BulkAddRecord | BulkRemoveRecord | BulkUpdateRecord |
|
||||
ReplaceTableData | TableDataAction {
|
||||
return DATA_ACTIONS.has(action[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the tableId from the action.
|
||||
*/
|
||||
@ -149,6 +160,12 @@ export function getSelectionDesc(action: UserAction, optExcludeVals: boolean): s
|
||||
return `table ${table}, row${s} ${rows}; ${columns.join(", ")}`;
|
||||
}
|
||||
|
||||
export function getNumRows(action: DocAction): number {
|
||||
return !isDataAction(action) ? 0
|
||||
: Array.isArray(action[2]) ? action[2].length
|
||||
: 1;
|
||||
}
|
||||
|
||||
// Convert from TableColValues (used by DocStorage and external APIs) to TableDataAction (used
|
||||
// mainly by the sandbox).
|
||||
export function toTableDataAction(tableId: string, colValues: TableColValues): TableDataAction {
|
||||
|
@ -63,6 +63,7 @@ import {makeForkIds} from 'app/server/lib/idUtils';
|
||||
import {GRIST_DOC_SQL, GRIST_DOC_WITH_TABLE1_SQL} from 'app/server/lib/initialDocSql';
|
||||
import {ISandbox} from 'app/server/lib/ISandbox';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
import {shortDesc} from 'app/server/lib/shortDesc';
|
||||
import {TableMetadataLoader} from 'app/server/lib/TableMetadataLoader';
|
||||
import {fetchURL, FileUploadInfo, globalUploadSet, UploadInfo} from 'app/server/lib/uploads';
|
||||
@ -140,6 +141,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
// result).
|
||||
protected _modificationLock: Mutex = new Mutex();
|
||||
|
||||
private _log = new LogMethods('ActiveDoc ', (s: OptDocSession) => this.getLogMeta(s));
|
||||
private _triggers: DocTriggers;
|
||||
private _dataEngine: Promise<ISandbox>|undefined;
|
||||
private _activeDocImport: ActiveDocImport;
|
||||
@ -194,7 +196,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
// TODO: cache engine requirement for doc in home db so we can retain this parallelism
|
||||
// when offering a choice of data engines.
|
||||
if (!supportsEngineChoices()) {
|
||||
this._getEngine().catch(e => this.logError({client: null}, `engine for ${docName} failed to launch: ${e}`));
|
||||
this._getEngine().catch(e => this._log.error({client: null}, `engine for ${docName} failed to launch: ${e}`));
|
||||
}
|
||||
|
||||
this._activeDocImport = new ActiveDocImport(this);
|
||||
@ -215,16 +217,10 @@ export class ActiveDoc extends EventEmitter {
|
||||
return this._granularAccess.getUserOverride(docSession);
|
||||
}
|
||||
|
||||
// Helpers to log a message along with metadata about the request.
|
||||
public logDebug(s: OptDocSession, msg: string, ...args: any[]) { this._log('debug', s, msg, ...args); }
|
||||
public logInfo(s: OptDocSession, msg: string, ...args: any[]) { this._log('info', s, msg, ...args); }
|
||||
public logWarn(s: OptDocSession, msg: string, ...args: any[]) { this._log('warn', s, msg, ...args); }
|
||||
public logError(s: OptDocSession, msg: string, ...args: any[]) { this._log('error', s, msg, ...args); }
|
||||
|
||||
// Constructs metadata for logging, given a Client or an OptDocSession.
|
||||
public getLogMeta(docSession: OptDocSession, docMethod?: string): log.ILogMeta {
|
||||
public getLogMeta(docSession: OptDocSession|null, docMethod?: string): log.ILogMeta {
|
||||
return {
|
||||
...getLogMetaFromDocSession(docSession),
|
||||
...(docSession ? getLogMetaFromDocSession(docSession) : {}),
|
||||
docId: this._docName,
|
||||
...(docMethod ? {docMethod} : {}),
|
||||
};
|
||||
@ -307,7 +303,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
|
||||
// If we had a shutdown scheduled, unschedule it.
|
||||
if (this._inactivityTimer.isEnabled()) {
|
||||
this.logInfo(docSession, "will stay open");
|
||||
this._log.info(docSession, "will stay open");
|
||||
this._inactivityTimer.disable();
|
||||
}
|
||||
return docSession;
|
||||
@ -319,10 +315,10 @@ export class ActiveDoc extends EventEmitter {
|
||||
*/
|
||||
public async shutdown(removeThisActiveDoc: boolean = true): Promise<void> {
|
||||
const docSession = makeExceptionalDocSession('system');
|
||||
this.logDebug(docSession, "shutdown starting");
|
||||
this._log.debug(docSession, "shutdown starting");
|
||||
this._inactivityTimer.disable();
|
||||
if (this.docClients.clientCount() > 0) {
|
||||
this.logWarn(docSession, `Doc being closed with ${this.docClients.clientCount()} clients left`);
|
||||
this._log.warn(docSession, `Doc being closed with ${this.docClients.clientCount()} clients left`);
|
||||
await this.docClients.broadcastDocMessage(null, 'docShutdown', null);
|
||||
this.docClients.removeAllClients();
|
||||
}
|
||||
@ -361,9 +357,9 @@ export class ActiveDoc extends EventEmitter {
|
||||
} catch (err) {
|
||||
// Initialization errors do not matter at this point.
|
||||
}
|
||||
this.logDebug(docSession, "shutdown complete");
|
||||
this._log.debug(docSession, "shutdown complete");
|
||||
} catch (err) {
|
||||
this.logError(docSession, "failed to shutdown some resources", err);
|
||||
this._log.error(docSession, "failed to shutdown some resources", err);
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,7 +369,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
*/
|
||||
@ActiveDoc.keepDocOpen
|
||||
public async createEmptyDocWithDataEngine(docSession: OptDocSession): Promise<ActiveDoc> {
|
||||
this.logDebug(docSession, "createEmptyDocWithDataEngine");
|
||||
this._log.debug(docSession, "createEmptyDocWithDataEngine");
|
||||
await this._docManager.storageManager.prepareToCreateDoc(this.docName);
|
||||
await this.docStorage.createFile();
|
||||
await this._rawPyCall('load_empty');
|
||||
@ -417,7 +413,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
skipInitialTable?: boolean, // If set, and document is new, "Table1" will not be added.
|
||||
}): Promise<ActiveDoc> {
|
||||
const startTime = Date.now();
|
||||
this.logDebug(docSession, "loadDoc");
|
||||
this._log.debug(docSession, "loadDoc");
|
||||
try {
|
||||
const isNew: boolean = options?.forceNew || await this._docManager.storageManager.prepareLocalDoc(this.docName);
|
||||
if (isNew) {
|
||||
@ -585,7 +581,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
|
||||
// If no more clients, schedule a shutdown.
|
||||
if (this.docClients.clientCount() === 0) {
|
||||
this.logInfo(docSession, "will self-close in %d ms", this._inactivityTimer.getDelay());
|
||||
this._log.info(docSession, "will self-close in %d ms", this._inactivityTimer.getDelay());
|
||||
this._inactivityTimer.enable();
|
||||
}
|
||||
}
|
||||
@ -646,7 +642,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
// and serve the attachment.
|
||||
const data = await this.docStorage.getFileData(fileIdent);
|
||||
if (!data) { throw new ApiError("Invalid attachment identifier", 404); }
|
||||
this.logInfo(docSession, "getAttachment: %s -> %s bytes", fileIdent, data.length);
|
||||
this._log.info(docSession, "getAttachment: %s -> %s bytes", fileIdent, data.length);
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -654,7 +650,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* Fetches the meta tables to return to the client when first opening a document.
|
||||
*/
|
||||
public async fetchMetaTables(docSession: OptDocSession) {
|
||||
this.logInfo(docSession, "fetchMetaTables");
|
||||
this._log.info(docSession, "fetchMetaTables");
|
||||
if (!this.docData) { throw new Error("No doc data"); }
|
||||
// Get metadata from local cache rather than data engine, so that we can
|
||||
// still get it even if data engine is busy calculating.
|
||||
@ -749,7 +745,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
const wantFull = waitForFormulas || query.tableId.startsWith('_grist_') ||
|
||||
this._granularAccess.getReadPermission(tableAccess) === 'mixed';
|
||||
const onDemand = this._onDemandActions.isOnDemand(query.tableId);
|
||||
this.logInfo(docSession, "fetchQuery %s %s", JSON.stringify(query),
|
||||
this._log.info(docSession, "fetchQuery %s %s", JSON.stringify(query),
|
||||
onDemand ? "(onDemand)" : "(regular)");
|
||||
let data: TableDataAction;
|
||||
if (onDemand) {
|
||||
@ -773,7 +769,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
data = cloneDeep(data!); // Clone since underlying fetch may be cached and shared.
|
||||
await this._granularAccess.filterData(docSession, data);
|
||||
}
|
||||
this.logInfo(docSession, "fetchQuery -> %d rows, cols: %s",
|
||||
this._log.info(docSession, "fetchQuery -> %d rows, cols: %s",
|
||||
data![2].length, Object.keys(data![3]).join(", "));
|
||||
return data!;
|
||||
}
|
||||
@ -784,7 +780,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* @returns {Promise} Promise for a string representing the generated table schema.
|
||||
*/
|
||||
public async fetchTableSchema(docSession: DocSession): Promise<string> {
|
||||
this.logInfo(docSession, "fetchTableSchema(%s)", docSession);
|
||||
this._log.info(docSession, "fetchTableSchema(%s)", docSession);
|
||||
// Permit code view if user can read everything, or can download/copy (perhaps
|
||||
// via an exceptional permission for sample documents)
|
||||
if (!(await this._granularAccess.canReadEverything(docSession) ||
|
||||
@ -800,7 +796,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* docActions that affect this query's results.
|
||||
*/
|
||||
public async useQuerySet(docSession: OptDocSession, query: ServerQuery): Promise<QueryResult> {
|
||||
this.logInfo(docSession, "useQuerySet(%s, %s)", docSession, query);
|
||||
this._log.info(docSession, "useQuerySet(%s, %s)", docSession, query);
|
||||
// TODO implement subscribing to the query.
|
||||
// - Convert tableId+colIds to TableData/ColData references
|
||||
// - Return a unique identifier for unsubscribing
|
||||
@ -817,7 +813,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* docActions relevant only to this query.
|
||||
*/
|
||||
public async disposeQuerySet(docSession: DocSession, querySubId: number): Promise<void> {
|
||||
this.logInfo(docSession, "disposeQuerySet(%s, %s)", docSession, querySubId);
|
||||
this._log.info(docSession, "disposeQuerySet(%s, %s)", docSession, querySubId);
|
||||
// TODO To-be-implemented
|
||||
}
|
||||
|
||||
@ -834,7 +830,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
optTableId?: string): Promise<number[]> {
|
||||
// This could leak information about private tables, so check for permission.
|
||||
if (!await this._granularAccess.canScanData(docSession)) { return []; }
|
||||
this.logInfo(docSession, "findColFromValues(%s, %s, %s)", docSession, values, n);
|
||||
this._log.info(docSession, "findColFromValues(%s, %s, %s)", docSession, values, n);
|
||||
await this.waitForInitialization();
|
||||
return this._pyCall('find_col_from_values', values, n, optTableId);
|
||||
}
|
||||
@ -883,7 +879,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
public async getFormulaError(docSession: DocSession, tableId: string, colId: string,
|
||||
rowId: number): Promise<CellValue> {
|
||||
if (!await this._granularAccess.hasTableAccess(docSession, tableId)) { return null; }
|
||||
this.logInfo(docSession, "getFormulaError(%s, %s, %s, %s)",
|
||||
this._log.info(docSession, "getFormulaError(%s, %s, %s, %s)",
|
||||
docSession, tableId, colId, rowId);
|
||||
await this.waitForInitialization();
|
||||
return this._pyCall('get_formula_error', tableId, colId, rowId);
|
||||
@ -978,7 +974,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
}
|
||||
|
||||
public async renameDocTo(docSession: OptDocSession, newName: string): Promise<void> {
|
||||
this.logDebug(docSession, 'renameDoc', newName);
|
||||
this._log.debug(docSession, 'renameDoc', newName);
|
||||
await this.docStorage.renameDocTo(newName);
|
||||
this._docName = newName;
|
||||
}
|
||||
@ -1314,7 +1310,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
const versionCol = docInfo.schemaVersion;
|
||||
const docSchemaVersion = (versionCol && versionCol.length === 1 ? versionCol[0] : 0) as number;
|
||||
if (docSchemaVersion < schemaVersion) {
|
||||
this.logInfo(docSession, "Doc needs migration from v%s to v%s", docSchemaVersion, schemaVersion);
|
||||
this._log.info(docSession, "Doc needs migration from v%s to v%s", docSchemaVersion, schemaVersion);
|
||||
await this._beforeMigration(docSession, 'schema', docSchemaVersion, schemaVersion);
|
||||
let success: boolean = false;
|
||||
try {
|
||||
@ -1330,7 +1326,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
// migration action, but that requires merging and still may not be safe. For now, doing
|
||||
// nothing seems best, as long as we follow the recommendations in migrations.py (never
|
||||
// remove/modify/rename metadata tables or columns, or change their meaning).
|
||||
this.logWarn(docSession, "Doc is newer (v%s) than this version of Grist (v%s); " +
|
||||
this._log.warn(docSession, "Doc is newer (v%s) than this version of Grist (v%s); " +
|
||||
"proceeding with fingers crossed", docSchemaVersion, schemaVersion);
|
||||
}
|
||||
|
||||
@ -1350,10 +1346,10 @@ export class ActiveDoc extends EventEmitter {
|
||||
const onDemandMap = zipObject(tablesParsed.tableId as string[], tablesParsed.onDemand);
|
||||
const onDemandNames = remove(tableNames, (t) => onDemandMap[t]);
|
||||
|
||||
this.logInfo(docSession, "Loading %s normal tables, skipping %s on-demand tables",
|
||||
this._log.debug(docSession, "Loading %s normal tables, skipping %s on-demand tables",
|
||||
tableNames.length, onDemandNames.length);
|
||||
this.logDebug(docSession, "Normal tables: %s", tableNames.join(", "));
|
||||
this.logDebug(docSession, "On-demand tables: %s", onDemandNames.join(", "));
|
||||
this._log.debug(docSession, "Normal tables: %s", tableNames.join(", "));
|
||||
this._log.debug(docSession, "On-demand tables: %s", onDemandNames.join(", "));
|
||||
|
||||
return [tableNames, onDemandNames];
|
||||
}
|
||||
@ -1380,7 +1376,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
options: ApplyUAOptions = {}): Promise<ApplyUAResult> {
|
||||
|
||||
const client = docSession.client;
|
||||
this.logDebug(docSession, "_applyUserActions(%s, %s)", client, shortDesc(actions));
|
||||
this._log.debug(docSession, "_applyUserActions(%s, %s)", client, shortDesc(actions));
|
||||
this._inactivityTimer.ping(); // The doc is in active use; ping it to stay open longer.
|
||||
|
||||
if (options?.bestEffort) {
|
||||
@ -1397,7 +1393,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
const result: ApplyUAResult = await new Promise<ApplyUAResult>(
|
||||
(resolve, reject) =>
|
||||
this._sharing.addUserAction({action, docSession, resolve, reject}));
|
||||
this.logDebug(docSession, "_applyUserActions returning %s", shortDesc(result));
|
||||
this._log.debug(docSession, "_applyUserActions returning %s", shortDesc(result));
|
||||
|
||||
if (result.isModification) {
|
||||
this._fetchCache.clear(); // This could be more nuanced.
|
||||
@ -1413,7 +1409,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
private async _createDocFile(docSession: OptDocSession, options?: {
|
||||
skipInitialTable?: boolean, // If set, "Table1" will not be added.
|
||||
}): Promise<void> {
|
||||
this.logDebug(docSession, "createDoc");
|
||||
this._log.debug(docSession, "createDoc");
|
||||
await this._docManager.storageManager.prepareToCreateDoc(this.docName);
|
||||
await this.docStorage.createFile();
|
||||
const sql = options?.skipInitialTable ? GRIST_DOC_SQL : GRIST_DOC_WITH_TABLE1_SQL;
|
||||
@ -1461,7 +1457,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
const checksum = await checksumFile(fileData.absPath);
|
||||
const fileIdent = checksum + fileData.ext;
|
||||
const ret: boolean = await this.docStorage.findOrAttachFile(fileData.absPath, fileIdent);
|
||||
this.logInfo(docSession, "addAttachment: file %s (image %sx%s) %s", fileIdent,
|
||||
this._log.info(docSession, "addAttachment: file %s (image %sx%s) %s", fileIdent,
|
||||
dimensions.width, dimensions.height, ret ? "attached" : "already exists");
|
||||
return ['AddRecord', '_grist_Attachments', null, {
|
||||
fileIdent,
|
||||
@ -1506,7 +1502,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
// If a new migration needs this flag, more work is needed. The current approach creates
|
||||
// more memory pressure than usual since full data is present in memory at once both in node
|
||||
// and in Python; and it doesn't skip onDemand tables. This is liable to cause crashes.
|
||||
this.logWarn(docSession, "_migrate: retrying with all tables");
|
||||
this._log.warn(docSession, "_migrate: retrying with all tables");
|
||||
for (const tableName of tableNames) {
|
||||
if (!tableData[tableName] && !tableName.startsWith('_gristsys_')) {
|
||||
tableData[tableName] = await this.docStorage.fetchTable(tableName);
|
||||
@ -1518,10 +1514,10 @@ export class ActiveDoc extends EventEmitter {
|
||||
const processedTables = Object.keys(tableData);
|
||||
const numSchema = countIf(processedTables, t => t.startsWith("_grist_"));
|
||||
const numUser = countIf(processedTables, t => !t.startsWith("_grist_"));
|
||||
this.logInfo(docSession, "_migrate: applying %d migration actions (processed %s schema, %s user tables)",
|
||||
this._log.info(docSession, "_migrate: applying %d migration actions (processed %s schema, %s user tables)",
|
||||
docActions.length, numSchema, numUser);
|
||||
|
||||
docActions.forEach((action, i) => this.logInfo(docSession, "_migrate: docAction %s: %s", i, shortDesc(action)));
|
||||
docActions.forEach((action, i) => this._log.info(docSession, "_migrate: docAction %s: %s", i, shortDesc(action)));
|
||||
await this.docStorage.execTransaction(() => this.docStorage.applyStoredActions(docActions));
|
||||
}
|
||||
|
||||
@ -1529,7 +1525,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* Load the specified tables into the data engine.
|
||||
*/
|
||||
private async _loadTables(docSession: OptDocSession, tableNames: string[]) {
|
||||
this.logDebug(docSession, "loading %s tables: %s", tableNames.length,
|
||||
this._log.debug(docSession, "loading %s tables: %s", tableNames.length,
|
||||
tableNames.join(", "));
|
||||
// Pass the resulting array to `map`, which allows parallel processing of the tables. Database
|
||||
// and DataEngine may still do things serially, but it allows them to be busy simultaneously.
|
||||
@ -1545,7 +1541,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
* The loader can be directed to stream the tables on to the engine.
|
||||
*/
|
||||
private _startLoadingTables(docSession: OptDocSession, tableNames: string[]) {
|
||||
this.logDebug(docSession, "starting to load %s tables: %s", tableNames.length,
|
||||
this._log.debug(docSession, "starting to load %s tables: %s", tableNames.length,
|
||||
tableNames.join(", "));
|
||||
for (const tableId of tableNames) {
|
||||
this._tableMetadataLoader.startFetchingTable(tableId);
|
||||
@ -1597,11 +1593,11 @@ export class ActiveDoc extends EventEmitter {
|
||||
// took longer, scale it up proportionately.
|
||||
const closeTimeout = Math.max(loadMs, 1000) * Deps.ACTIVEDOC_TIMEOUT;
|
||||
this._inactivityTimer.setDelay(closeTimeout);
|
||||
this.logDebug(docSession, `loaded in ${loadMs} ms, InactivityTimer set to ${closeTimeout} ms`);
|
||||
this._log.debug(docSession, `loaded in ${loadMs} ms, InactivityTimer set to ${closeTimeout} ms`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
if (!this._shuttingDown) {
|
||||
this.logWarn(docSession, "_finishInitialization stopped with %s", err);
|
||||
this._log.warn(docSession, "_finishInitialization stopped with %s", err);
|
||||
}
|
||||
this._fullyLoaded = true;
|
||||
return false;
|
||||
@ -1639,10 +1635,6 @@ export class ActiveDoc extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
private _log(level: string, docSession: OptDocSession, msg: string, ...args: any[]) {
|
||||
log.origLog(level, `ActiveDoc ` + msg, ...args, this.getLogMeta(docSession));
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before a migration. Makes sure a back-up is made.
|
||||
*/
|
||||
@ -1652,7 +1644,7 @@ export class ActiveDoc extends EventEmitter {
|
||||
const label = `migrate-${versionType}-last-v${currentVersion}-before-v${newVersion}`;
|
||||
this._docManager.markAsChanged(this); // Give backup current time.
|
||||
const location = await this._docManager.makeBackup(this, label);
|
||||
this.logInfo(docSession, "_beforeMigration: backup made with label %s at %s", label, location);
|
||||
this._log.info(docSession, "_beforeMigration: backup made with label %s at %s", label, location);
|
||||
this.emit("backupMade", location);
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@ import {Authorizer} from 'app/server/lib/Authorizer';
|
||||
import {ScopedSession} from 'app/server/lib/BrowserSession';
|
||||
import {DocSession} from 'app/server/lib/DocSession';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
import {shortDesc} from 'app/server/lib/shortDesc';
|
||||
import * as crypto from 'crypto';
|
||||
import * as moment from 'moment';
|
||||
@ -65,6 +66,8 @@ export class Client {
|
||||
|
||||
private _session: ScopedSession|null = null;
|
||||
|
||||
private _log = new LogMethods('Client ', (s: null) => this.getLogMeta());
|
||||
|
||||
// Maps docFDs to DocSession objects.
|
||||
private _docFDs: Array<DocSession|null> = [];
|
||||
|
||||
@ -163,12 +166,12 @@ export class Client {
|
||||
if (docSession && docSession.activeDoc) {
|
||||
// Note that this indirectly calls to removeDocSession(docSession.fd)
|
||||
docSession.activeDoc.closeDoc(docSession)
|
||||
.catch((e) => { log.warn("%s: error closing docFD %d", this, fd); });
|
||||
.catch((e) => { this._log.warn(null, "error closing docFD %d", fd); });
|
||||
count++;
|
||||
}
|
||||
this._docFDs[fd] = null;
|
||||
}
|
||||
log.debug("%s: closeAllDocs() closed %d doc(s)", this, count);
|
||||
this._log.debug(null, "closeAllDocs() closed %d doc(s)", count);
|
||||
}
|
||||
|
||||
public interruptConnection() {
|
||||
@ -190,12 +193,8 @@ export class Client {
|
||||
const message: string = JSON.stringify(messageObj);
|
||||
|
||||
// Log something useful about the message being sent.
|
||||
if (messageObj.type) {
|
||||
log.info("%s: sending %s: %d bytes", this, messageObj.type, message.length);
|
||||
} else if (messageObj.error) {
|
||||
log.warn("%s: responding to #%d ERROR %s", this, messageObj.reqId, messageObj.error);
|
||||
} else {
|
||||
log.info("%s: responding to #%d OK: %d bytes", this, messageObj.reqId, message.length);
|
||||
if (messageObj.error) {
|
||||
this._log.warn(null, "responding to #%d ERROR %s", messageObj.reqId, messageObj.error);
|
||||
}
|
||||
|
||||
if (this._websocket) {
|
||||
@ -207,9 +206,9 @@ export class Client {
|
||||
// NOTE: if this handler is run after onClose, we could have messages end up out of order.
|
||||
// Let's check to make sure. If this can happen, we need to refactor for correct ordering.
|
||||
if (!this._websocket) {
|
||||
log.error("%s sendMessage: UNEXPECTED ORDER OF CALLBACKS", this);
|
||||
this._log.error(null, "sendMessage: UNEXPECTED ORDER OF CALLBACKS");
|
||||
}
|
||||
log.warn("%s sendMessage: queuing after send error: %s", this, err.toString());
|
||||
this._log.warn(null, "sendMessage: queuing after send error: %s", err.toString());
|
||||
this._missedMessages.push(message);
|
||||
}
|
||||
} else if (this._missedMessages.length < clientMaxMissedMessages) {
|
||||
@ -217,7 +216,7 @@ export class Client {
|
||||
this._missedMessages.push(message);
|
||||
} else {
|
||||
// Too many messages queued. Boot the client now, to make it reset when/if it reconnects.
|
||||
log.error("%s sendMessage: too many messages queued; booting client", this);
|
||||
this._log.error(null, "sendMessage: too many messages queued; booting client");
|
||||
if (this._destroyTimer) {
|
||||
clearTimeout(this._destroyTimer);
|
||||
this._destroyTimer = null;
|
||||
@ -244,17 +243,17 @@ export class Client {
|
||||
* indicate success or failure.
|
||||
*/
|
||||
public async onMessage(message: string): Promise<void> {
|
||||
const clientId = this.clientId;
|
||||
const request = JSON.parse(message);
|
||||
if (request.beat) {
|
||||
const profile = this.getProfile();
|
||||
// this is a heart beat, to keep the websocket alive. No need to reply.
|
||||
log.rawInfo('heartbeat', {clientId, counter: this._counter, url: request.url,
|
||||
docId: request.docId, // caution: trusting client for docId for this purpose.
|
||||
email: profile?.email, userId: this.getCachedUserId()});
|
||||
log.rawInfo('heartbeat', {
|
||||
...this.getLogMeta(),
|
||||
url: request.url,
|
||||
docId: request.docId, // caution: trusting client for docId for this purpose.
|
||||
});
|
||||
return;
|
||||
} else {
|
||||
log.info("%s: onMessage", this, shortDesc(message));
|
||||
this._log.info(null, "onMessage", shortDesc(message));
|
||||
}
|
||||
const response: any = {reqId: request.reqId};
|
||||
const method = this._methods[request.method];
|
||||
@ -274,7 +273,7 @@ export class Client {
|
||||
(typeof code === 'string' && code.startsWith('AUTH_NO'))
|
||||
);
|
||||
|
||||
log.warn("%s: Error %s %s", this, skipStack ? err : err.stack, code || '');
|
||||
this._log.warn(null, "Error %s %s", skipStack ? err : err.stack, code || '');
|
||||
response.error = err.message;
|
||||
if (err.code) {
|
||||
response.errorCode = err.code;
|
||||
|
@ -9,7 +9,7 @@ import {Authorizer} from 'app/server/lib/Authorizer';
|
||||
import {Client} from 'app/server/lib/Client';
|
||||
import {sendDocMessage} from 'app/server/lib/Comm';
|
||||
import {DocSession, OptDocSession} from 'app/server/lib/DocSession';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
|
||||
// Allow tests to impose a serial order for broadcasts if they need that for repeatability.
|
||||
export const Deps = {
|
||||
@ -18,6 +18,7 @@ export const Deps = {
|
||||
|
||||
export class DocClients {
|
||||
private _docSessions: DocSession[] = [];
|
||||
private _log = new LogMethods('DocClients ', (s: DocSession|null) => this.activeDoc.getLogMeta(s));
|
||||
|
||||
constructor(
|
||||
public readonly activeDoc: ActiveDoc
|
||||
@ -36,7 +37,7 @@ export class DocClients {
|
||||
public addClient(client: Client, authorizer: Authorizer): DocSession {
|
||||
const docSession = client.addDocSession(this.activeDoc, authorizer);
|
||||
this._docSessions.push(docSession);
|
||||
log.debug("DocClients (%s) now has %d clients; new client is %s (fd %s)", this.activeDoc.docName,
|
||||
this._log.debug(docSession, "now %d clients; new client is %s (fd %s)",
|
||||
this._docSessions.length, client.clientId, docSession.fd);
|
||||
return docSession;
|
||||
}
|
||||
@ -46,11 +47,11 @@ export class DocClients {
|
||||
* this DocSession.
|
||||
*/
|
||||
public removeClient(docSession: DocSession): void {
|
||||
log.debug("DocClients.removeClient", docSession.client.clientId);
|
||||
this._log.debug(docSession, "removeClient", docSession.client.clientId);
|
||||
docSession.client.removeDocSession(docSession.fd);
|
||||
|
||||
if (arrayRemove(this._docSessions, docSession)) {
|
||||
log.debug("DocClients (%s) now has %d clients", this.activeDoc.docName, this._docSessions.length);
|
||||
this._log.debug(docSession, "now %d clients", this._docSessions.length);
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,7 +59,7 @@ export class DocClients {
|
||||
* Removes all active clients from this document, i.e. closes all DocSessions.
|
||||
*/
|
||||
public removeAllClients(): void {
|
||||
log.debug("DocClients.removeAllClients() removing %s docSessions", this._docSessions.length);
|
||||
this._log.debug(null, "removeAllClients() removing %s docSessions", this._docSessions.length);
|
||||
const docSessions = this._docSessions.splice(0);
|
||||
for (const docSession of docSessions) {
|
||||
docSession.client.removeDocSession(docSession.fd);
|
||||
@ -66,7 +67,7 @@ export class DocClients {
|
||||
}
|
||||
|
||||
public interruptAllClients() {
|
||||
log.debug("DocClients.interruptAllClients() interrupting %s docSessions", this._docSessions.length);
|
||||
this._log.debug(null, "interruptAllClients() interrupting %s docSessions", this._docSessions.length);
|
||||
for (const docSession of this._docSessions) {
|
||||
docSession.client.interruptConnection();
|
||||
}
|
||||
@ -116,7 +117,7 @@ export class DocClients {
|
||||
if (filteredMessageData) {
|
||||
sendDocMessage(target.client, target.fd, type, filteredMessageData, fromSelf);
|
||||
} else {
|
||||
this.activeDoc.logDebug(target, 'skip broadcastDocMessage because it is not allowed for this client');
|
||||
this._log.debug(target, 'skip broadcastDocMessage because it is not allowed for this client');
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.code && e.code === 'NEED_RELOAD') {
|
||||
@ -129,10 +130,7 @@ export class DocClients {
|
||||
} catch (e) {
|
||||
if (e.code === 'AUTH_NO_VIEW') {
|
||||
// Skip sending data to this user, they have no view access.
|
||||
log.rawDebug('skip broadcastDocMessage because AUTH_NO_VIEW', {
|
||||
docId: target.authorizer.getDocId(),
|
||||
...target.client.getLogMeta()
|
||||
});
|
||||
this._log.debug(target, 'skip broadcastDocMessage because AUTH_NO_VIEW');
|
||||
// Go further and trigger a shutdown for this user, in case they are granted
|
||||
// access again later.
|
||||
sendDocMessage(target.client, target.fd, 'docShutdown', null, fromSelf);
|
||||
|
@ -154,6 +154,14 @@ export function getDocSessionAccess(docSession: OptDocSession): Role {
|
||||
throw new Error('getDocSessionAccess could not find access information in DocSession');
|
||||
}
|
||||
|
||||
export function getDocSessionAccessOrNull(docSession: OptDocSession): Role|null {
|
||||
try {
|
||||
return getDocSessionAccess(docSession);
|
||||
} catch (err) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached information about the document, if available. May be stale.
|
||||
*/
|
||||
|
@ -14,7 +14,7 @@ import {ChecksummedExternalStorage, DELETED_TOKEN, ExternalStorage} from 'app/se
|
||||
import {HostedMetadataManager} from 'app/server/lib/HostedMetadataManager';
|
||||
import {ICreate} from 'app/server/lib/ICreate';
|
||||
import {IDocStorageManager} from 'app/server/lib/IDocStorageManager';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
import {fromCallback} from 'app/server/lib/serverUtils';
|
||||
import * as fse from 'fs-extra';
|
||||
import * as path from 'path';
|
||||
@ -113,6 +113,8 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
// Latest version ids of documents.
|
||||
private _latestVersions = new Map<string, string>();
|
||||
|
||||
private _log = new LogMethods('HostedStorageManager ', (docId: string|null) => ({docId}));
|
||||
|
||||
/**
|
||||
* Initialize with the given root directory, which should be a fully-resolved path.
|
||||
* If s3Bucket is blank, S3 storage will be disabled.
|
||||
@ -140,7 +142,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
delayBeforeOperationMs: secondsBeforePush * 1000,
|
||||
retry: true,
|
||||
logError: (key, failureCount, err) => {
|
||||
log.error("HostedStorageManager: error pushing %s (%d): %s", key, failureCount, err);
|
||||
this._log.error(null, "error pushing %s (%d): %s", key, failureCount, err);
|
||||
}
|
||||
});
|
||||
|
||||
@ -309,7 +311,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
await fse.remove(this._getHashFile(this.getPath(docId)));
|
||||
this.markAsChanged(docId, 'edit');
|
||||
} catch (err) {
|
||||
log.error("HostedStorageManager: problem replacing %s: %s", docId, err);
|
||||
this._log.error(docId, "problem replacing doc: %s", err);
|
||||
await fse.move(tmpPath, docPath, {overwrite: true});
|
||||
throw err;
|
||||
} finally {
|
||||
@ -373,7 +375,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
* Close the storage manager. Make sure any pending changes reach S3 first.
|
||||
*/
|
||||
public async closeStorage(): Promise<void> {
|
||||
await this._uploads.wait(() => log.info('HostedStorageManager: waiting for closeStorage to finish'));
|
||||
await this._uploads.wait(() => this._log.info(null, 'waiting for closeStorage to finish'));
|
||||
|
||||
// Close metadata manager.
|
||||
if (this._metadataManager) { await this._metadataManager.close(); }
|
||||
@ -415,7 +417,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
// pick up the pace of pushing to s3, from leisurely to urgent.
|
||||
public prepareToCloseStorage() {
|
||||
if (this._pruner) {
|
||||
this._pruner.close().catch(e => log.error("HostedStorageManager: pruning error %s", e));
|
||||
this._pruner.close().catch(e => this._log.error(null, "pruning error %s", e));
|
||||
}
|
||||
this._uploads.expediteOperations();
|
||||
}
|
||||
@ -436,7 +438,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
*/
|
||||
public async flushDoc(docName: string): Promise<void> {
|
||||
while (!this.isAllSaved(docName)) {
|
||||
log.info('HostedStorageManager: waiting for document to finish: %s', docName);
|
||||
this._log.info(docName, 'waiting for document to finish');
|
||||
await this._uploads.expediteOperationAndWait(docName);
|
||||
await this._inventory?.flush(docName);
|
||||
if (!this.isAllSaved(docName)) {
|
||||
@ -576,7 +578,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
// Fine, accept the doc as existing on our file system.
|
||||
return true;
|
||||
} else {
|
||||
log.info("Local hash does not match redis: %s vs %s", checksum, docStatus.docMD5);
|
||||
this._log.info(docName, "Local hash does not match redis: %s vs %s", checksum, docStatus.docMD5);
|
||||
// The file that exists locally does not match S3. But S3 is the canonical version.
|
||||
// On the assumption that the local file is outdated, delete it.
|
||||
// TODO: may want to be more careful in case the local file has modifications that
|
||||
@ -647,7 +649,7 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
private async _prepareBackup(docId: string, postfix: string = 'backup'): Promise<string> {
|
||||
const docPath = this.getPath(docId);
|
||||
const tmpPath = `${docPath}-${postfix}`;
|
||||
return backupSqliteDatabase(docPath, tmpPath, undefined, postfix);
|
||||
return backupSqliteDatabase(docPath, tmpPath, undefined, postfix, {docId});
|
||||
}
|
||||
|
||||
/**
|
||||
@ -775,10 +777,14 @@ export class HostedStorageManager implements IDocStorageManager {
|
||||
*/
|
||||
export async function backupSqliteDatabase(src: string, dest: string,
|
||||
testProgress?: (e: BackupEvent) => void,
|
||||
label?: string): Promise<string> {
|
||||
log.debug(`backupSqliteDatabase: starting copy of ${src} (${label})`);
|
||||
label?: string,
|
||||
logMeta: object = {}): Promise<string> {
|
||||
const _log = new LogMethods<null>('backupSqliteDatabase: ', () => logMeta);
|
||||
_log.debug(null, `starting copy of ${src} (${label})`);
|
||||
let db: sqlite3.DatabaseWithBackup|null = null;
|
||||
let success: boolean = false;
|
||||
let maxStepTimeMs: number = 0;
|
||||
let numSteps: number = 0;
|
||||
try {
|
||||
// NOTE: fse.remove succeeds also when the file does not exist.
|
||||
await fse.remove(dest); // Just in case some previous process terminated very badly.
|
||||
@ -808,9 +814,11 @@ export async function backupSqliteDatabase(src: string, dest: string,
|
||||
// this message at most once a second.
|
||||
// See https://www.sqlite.org/c3ref/backup_finish.html and
|
||||
// https://github.com/mapbox/node-sqlite3/pull/1116 for api details.
|
||||
if (remaining >= 0 && backup.remaining > remaining && Date.now() - restartMsgTime > 1000) {
|
||||
log.info(`backupSqliteDatabase: copy of ${src} (${label}) restarted`);
|
||||
restartMsgTime = Date.now();
|
||||
numSteps++;
|
||||
const stepStart = Date.now();
|
||||
if (remaining >= 0 && backup.remaining > remaining && stepStart - restartMsgTime > 1000) {
|
||||
_log.info(null, `copy of ${src} (${label}) restarted`);
|
||||
restartMsgTime = stepStart;
|
||||
}
|
||||
remaining = backup.remaining;
|
||||
if (testProgress) { testProgress({action: 'step', phase: 'before'}); }
|
||||
@ -819,15 +827,18 @@ export async function backupSqliteDatabase(src: string, dest: string,
|
||||
isCompleted = Boolean(await fromCallback(cb => backup.step(PAGES_TO_BACKUP_PER_STEP, cb)));
|
||||
} catch (err) {
|
||||
if (String(err) !== String(prevError) || Date.now() - errorMsgTime > 1000) {
|
||||
log.info(`backupSqliteDatabase (${src} ${label}): ${err}`);
|
||||
_log.info(null, `error (${src} ${label}): ${err}`);
|
||||
errorMsgTime = Date.now();
|
||||
}
|
||||
prevError = err;
|
||||
if (backup.failed) { throw new Error(`backupSqliteDatabase (${src} ${label}): internal copy failed`); }
|
||||
} finally {
|
||||
const stepTimeMs = Date.now() - stepStart;
|
||||
if (stepTimeMs > maxStepTimeMs) { maxStepTimeMs = stepTimeMs; }
|
||||
}
|
||||
if (testProgress) { testProgress({action: 'step', phase: 'after'}); }
|
||||
if (isCompleted) {
|
||||
log.info(`backupSqliteDatabase: copy of ${src} (${label}) completed successfully`);
|
||||
_log.info(null, `copy of ${src} (${label}) completed successfully`);
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
@ -838,7 +849,7 @@ export async function backupSqliteDatabase(src: string, dest: string,
|
||||
try {
|
||||
if (db) { await fromCallback(cb => db!.close(cb)); }
|
||||
} catch (err) {
|
||||
log.debug(`backupSqliteDatabase: problem stopping copy of ${src} (${label}): ${err}`);
|
||||
_log.debug(null, `problem stopping copy of ${src} (${label}): ${err}`);
|
||||
}
|
||||
if (!success) {
|
||||
// Something went wrong, remove backup if it was started.
|
||||
@ -846,11 +857,11 @@ export async function backupSqliteDatabase(src: string, dest: string,
|
||||
// NOTE: fse.remove succeeds also when the file does not exist.
|
||||
await fse.remove(dest);
|
||||
} catch (err) {
|
||||
log.debug(`backupSqliteDatabase: problem removing copy of ${src} (${label}): ${err}`);
|
||||
_log.debug(null, `problem removing copy of ${src} (${label}): ${err}`);
|
||||
}
|
||||
}
|
||||
if (testProgress) { testProgress({action: 'close', phase: 'after'}); }
|
||||
log.debug(`backupSqliteDatabase: stopped copy of ${src} (${label})`);
|
||||
_log.rawLog('debug', null, `stopped copy of ${src} (${label})`, {maxStepTimeMs, numSteps});
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
36
app/server/lib/LogMethods.ts
Normal file
36
app/server/lib/LogMethods.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import * as log from 'app/server/lib/log';
|
||||
|
||||
export type ILogMeta = log.ILogMeta;
|
||||
|
||||
/**
|
||||
* Helper for logging with metadata. The created object has methods similar to those of the `log`
|
||||
* module, but with an extra required first argument. The produced messages get metadata produced
|
||||
* by the constructor callback applied to that argument, and the specified prefix.
|
||||
*
|
||||
* Usage:
|
||||
* _log = new LogMethods(prefix, (info) => ({...logMetadata...}))
|
||||
* _log.info(info, "hello %", name);
|
||||
* _log.warn(info, "hello %", name);
|
||||
* etc.
|
||||
*/
|
||||
export class LogMethods<Info> {
|
||||
constructor(
|
||||
private _prefix: string,
|
||||
private _getMeta: (info: Info) => log.ILogMeta,
|
||||
) {}
|
||||
|
||||
public debug(info: Info, msg: string, ...args: any[]) { this.log('debug', info, msg, ...args); }
|
||||
public info(info: Info, msg: string, ...args: any[]) { this.log('info', info, msg, ...args); }
|
||||
public warn(info: Info, msg: string, ...args: any[]) { this.log('warn', info, msg, ...args); }
|
||||
public error(info: Info, msg: string, ...args: any[]) { this.log('error', info, msg, ...args); }
|
||||
|
||||
public log(level: string, info: Info, msg: string, ...args: any[]): void {
|
||||
log.origLog(level, this._prefix + msg, ...args, this._getMeta(info));
|
||||
}
|
||||
|
||||
// Log with the given level, and include the provided log metadata in addition to that produced
|
||||
// by _getMeta(info).
|
||||
public rawLog(level: string, info: Info, msg: string, meta: ILogMeta): void {
|
||||
log.origLog(level, this._prefix + msg, {...this._getMeta(info), ...meta});
|
||||
}
|
||||
}
|
@ -6,10 +6,10 @@ import {
|
||||
LocalActionBundle,
|
||||
UserActionBundle
|
||||
} from 'app/common/ActionBundle';
|
||||
import {DocAction, UserAction} from 'app/common/DocActions';
|
||||
import {DocAction, getNumRows, UserAction} from 'app/common/DocActions';
|
||||
import {allToken} from 'app/common/sharing';
|
||||
import {timeFormat} from 'app/common/timeFormat';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
import {shortDesc} from 'app/server/lib/shortDesc';
|
||||
import * as assert from 'assert';
|
||||
import {Mutex} from 'async-mutex';
|
||||
@ -42,6 +42,9 @@ interface UserResult {
|
||||
// Internally-used enum to distinguish if applied actions should be logged as local or shared.
|
||||
enum Branch { Local, Shared }
|
||||
|
||||
// Don't log details of action bundles in production.
|
||||
const LOG_ACTION_BUNDLE = (process.env.NODE_ENV !== 'production');
|
||||
|
||||
export class Sharing {
|
||||
protected _activeDoc: ActiveDoc;
|
||||
protected _actionHistory: ActionHistory;
|
||||
@ -49,6 +52,8 @@ export class Sharing {
|
||||
protected _pendingQueue: Deque<UserRequest> = new Deque();
|
||||
protected _workCoordinator: WorkCoordinator;
|
||||
|
||||
private _log = new LogMethods('Sharing ', (s: OptDocSession|null) => this._activeDoc.getLogMeta(s));
|
||||
|
||||
constructor(activeDoc: ActiveDoc, actionHistory: ActionHistory, private _modificationLock: Mutex) {
|
||||
// TODO actionHistory is currently unused (we use activeDoc.actionLog).
|
||||
assert(actionHistory.isInitialized());
|
||||
@ -114,7 +119,7 @@ export class Sharing {
|
||||
const ret = await this._doApplyUserActionBundle(userRequest.action, userRequest.docSession);
|
||||
userRequest.resolve(ret);
|
||||
} catch (e) {
|
||||
log.warn("Unable to apply action...", e);
|
||||
this._log.warn(userRequest.docSession, "Unable to apply action...", e);
|
||||
userRequest.reject(e);
|
||||
}
|
||||
}
|
||||
@ -125,7 +130,7 @@ export class Sharing {
|
||||
try {
|
||||
await this._doApplySharedActionBundle(action);
|
||||
} catch (e) {
|
||||
log.error("Unable to apply hub action... skipping");
|
||||
this._log.error(null, "Unable to apply hub action... skipping");
|
||||
}
|
||||
}
|
||||
|
||||
@ -141,7 +146,7 @@ export class Sharing {
|
||||
await this._rebaseLocalActions();
|
||||
}
|
||||
} catch (e) {
|
||||
log.error("Unable to apply hub action... skipping");
|
||||
this._log.error(null, "Unable to apply hub action... skipping");
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +160,7 @@ export class Sharing {
|
||||
rebaseQueue.push(...actions.map((a) => getUserActionBundle(a)));
|
||||
await this._actionHistory.clearLocalActions();
|
||||
} catch (e) {
|
||||
log.error("Can't undo local actions; sharing is off");
|
||||
this._log.error(null, "Can't undo local actions; sharing is off");
|
||||
this._rollbackToCheckpoint();
|
||||
// TODO this.disconnect();
|
||||
// TODO errorState = true;
|
||||
@ -173,14 +178,14 @@ export class Sharing {
|
||||
try {
|
||||
await this._doApplyUserActionBundle(adjusted, null);
|
||||
} catch (e) {
|
||||
log.warn("Unable to apply rebased action...");
|
||||
this._log.warn(null, "Unable to apply rebased action...");
|
||||
rebaseFailures.push([action, adjusted]);
|
||||
}
|
||||
}
|
||||
if (rebaseFailures.length > 0) {
|
||||
this._createBackupAtCheckpoint();
|
||||
// TODO we should notify the user too.
|
||||
log.error('Rebase failed to reapply some of your actions, backup of local at...');
|
||||
this._log.error(null, 'Rebase failed to reapply some of your actions, backup of local at...');
|
||||
}
|
||||
this._releaseCheckpoint();
|
||||
}
|
||||
@ -238,7 +243,19 @@ export class Sharing {
|
||||
actionHash: null, // Gets set below by _actionHistory.recordNext...
|
||||
parentActionHash: null, // Gets set below by _actionHistory.recordNext...
|
||||
};
|
||||
this._logActionBundle(`_doApplyUserActions (${Branch[branch]})`, localActionBundle);
|
||||
|
||||
const logMeta = {
|
||||
actionNum,
|
||||
linkId: info.linkId,
|
||||
otherId: info.otherId,
|
||||
numDocActions: localActionBundle.stored.length,
|
||||
numRows: localActionBundle.stored.reduce((n, env) => n + getNumRows(env[1]), 0),
|
||||
author: info.user,
|
||||
};
|
||||
this._log.rawLog('debug', docSession, '_doApplyUserActions', logMeta);
|
||||
if (LOG_ACTION_BUNDLE) {
|
||||
this._logActionBundle(`_doApplyUserActions (${Branch[branch]})`, localActionBundle);
|
||||
}
|
||||
|
||||
// TODO Note that the sandbox may produce actions which are not addressed to us (e.g. when we
|
||||
// have EDIT permission without VIEW). These are not sent to the browser or the database. But
|
||||
@ -361,11 +378,6 @@ export class Sharing {
|
||||
/** Log an action bundle to the debug log. */
|
||||
private _logActionBundle(prefix: string, actionBundle: ActionBundle) {
|
||||
const includeEnv = actionBundle.envelopes.map((e) => this.isOwnEnvelope(e.recipients));
|
||||
log.debug("%s: ActionBundle #%s with #%s envelopes: %s",
|
||||
prefix, actionBundle.actionNum, actionBundle.envelopes.length,
|
||||
infoDesc(actionBundle.info[1]));
|
||||
actionBundle.envelopes.forEach((env, i) =>
|
||||
log.debug("%s: env #%s: %s", prefix, i, env.recipients.join(' ')));
|
||||
actionBundle.stored.forEach((envAction, i) =>
|
||||
log.debug("%s: stored #%s [%s%s]: %s", prefix, i, envAction[0],
|
||||
(includeEnv[envAction[0]] ? "" : " alien"),
|
||||
@ -414,17 +426,6 @@ export function findOrAddAllEnvelope(envelopes: Envelope[]): number {
|
||||
return envelopes.length - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert actionInfo to a concise human-readable description, for debugging.
|
||||
*/
|
||||
function infoDesc(info: ActionInfo): string {
|
||||
const timestamp = timeFormat('A', new Date(info.time));
|
||||
const desc = info.desc ? ` desc=[${info.desc}]` : '';
|
||||
const otherId = info.otherId ? ` [otherId=${info.otherId}]` : '';
|
||||
const linkId = info.linkId ? ` [linkId=${info.linkId}]` : '';
|
||||
return `${timestamp} on ${info.inst} by ${info.user}${desc}${otherId}${linkId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a UserActionBundle from a LocalActionBundle, which contains a superset of data.
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@ import * as uuidv4 from 'uuid/v4';
|
||||
import {EngineCode} from 'app/common/DocumentSettings';
|
||||
import * as log from 'app/server/lib/log';
|
||||
import { OpenMode, SQLiteDB } from 'app/server/lib/SQLiteDB';
|
||||
import { getDocSessionAccess, getDocSessionUser, OptDocSession } from './DocSession';
|
||||
import { getDocSessionAccessOrNull, getDocSessionUser, OptDocSession } from './DocSession';
|
||||
|
||||
/**
|
||||
* Promisify a node-style callback function. E.g.
|
||||
@ -142,7 +142,7 @@ export async function checkAllegedGristDoc(docSession: OptDocSession, fname: str
|
||||
*/
|
||||
export function getLogMetaFromDocSession(docSession: OptDocSession) {
|
||||
const client = docSession.client;
|
||||
const access = getDocSessionAccess(docSession);
|
||||
const access = getDocSessionAccessOrNull(docSession);
|
||||
const user = getDocSessionUser(docSession);
|
||||
return {
|
||||
access,
|
||||
|
Loading…
Reference in New Issue
Block a user