mirror of
https://github.com/gristlabs/grist-core.git
synced 2024-10-27 20:44:07 +00:00
(core) Exit more cleanly on unhandled errors, and handle errors writing to Clients.
Summary: - Node has a strong recommendation to assume bad state and exit promptly on unhandled exceptions and rejections. We follow it, and only make an effort to clean up before exiting, and to log the error in a more standard way. - The only case seen in recent month of an unhandled rejection was for attempting to write overly large JSON to a Client websocket. Ensure that's handled, and add a test case that artificially reproduces this scenario. Test Plan: Added a test case for failing write to Client, and a test case that unhandled errors indeed kill the server but with an attempt at cleanup. Reviewers: georgegevoian Reviewed By: georgegevoian Differential Revision: https://phab.getgrist.com/D4124
This commit is contained in:
parent
d89e008a75
commit
4d9bbf6263
@ -34,9 +34,6 @@ export type ClientMethod = (client: Client, ...args: any[]) => Promise<unknown>;
|
||||
// How long the client state persists after a disconnect.
|
||||
const clientRemovalTimeoutMs = 300 * 1000; // 300s = 5 minutes.
|
||||
|
||||
// A hook for dependency injection.
|
||||
export const Deps = {clientRemovalTimeoutMs};
|
||||
|
||||
// How much memory to allow using for large JSON responses before waiting for some to clear.
|
||||
// Max total across all clients and all JSON responses.
|
||||
const jsonResponseTotalReservation = 500 * 1024 * 1024;
|
||||
@ -45,6 +42,9 @@ const jsonResponseTotalReservation = 500 * 1024 * 1024;
|
||||
const jsonResponseReservation = 20 * 1024 * 1024;
|
||||
export const jsonMemoryPool = new MemoryPool(jsonResponseTotalReservation);
|
||||
|
||||
// A hook for dependency injection.
|
||||
export const Deps = {clientRemovalTimeoutMs, jsonResponseReservation};
|
||||
|
||||
/**
|
||||
* Generates and returns a random string to use as a clientId. This is better
|
||||
* than numbering clients with consecutive integers; otherwise a reconnecting
|
||||
@ -197,6 +197,19 @@ export class Client {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the client. If the send fails in a way that the message can't get queued
|
||||
* (e.g. due to an unexpected exception in code), logs an error and interrupts the connection.
|
||||
*/
|
||||
public async sendMessageOrInterrupt(messageObj: CommMessage|CommResponse|CommResponseError): Promise<void> {
|
||||
try {
|
||||
await this.sendMessage(messageObj);
|
||||
} catch (e) {
|
||||
this._log.error(null, 'sendMessage error', e);
|
||||
this.interruptConnection();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the client, queuing it up on failure or if the client is disconnected.
|
||||
*/
|
||||
@ -221,7 +234,7 @@ export class Client {
|
||||
// Overall, a better solution would be to stream large responses, or to have the client
|
||||
// request data piecemeal (as we'd have to for handling large data).
|
||||
|
||||
await jsonMemoryPool.withReserved(jsonResponseReservation, async (updateReservation) => {
|
||||
await jsonMemoryPool.withReserved(Deps.jsonResponseReservation, async (updateReservation) => {
|
||||
if (this._destroyed) {
|
||||
// If this Client got destroyed while waiting, stop here and release the reservation.
|
||||
return;
|
||||
@ -562,7 +575,7 @@ export class Client {
|
||||
}
|
||||
}
|
||||
}
|
||||
await this.sendMessage(response);
|
||||
await this.sendMessageOrInterrupt(response);
|
||||
}
|
||||
|
||||
// Fetch the user database record from profile.email, or null when profile is not set.
|
||||
|
@ -37,7 +37,6 @@ import * as http from 'http';
|
||||
import * as https from 'https';
|
||||
import * as WebSocket from 'ws';
|
||||
|
||||
import {CommDocEventType, CommMessage} from 'app/common/CommTypes';
|
||||
import {parseFirstUrlPart} from 'app/common/gristUrls';
|
||||
import {firstDefined, safeJsonParse} from 'app/common/gutil';
|
||||
import {UserProfile} from 'app/common/LoginSessionAPI';
|
||||
@ -270,19 +269,3 @@ export class Comm extends EventEmitter {
|
||||
return wss;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a per-doc message to the given client.
|
||||
* @param {Object} client - The client object, as passed to all per-doc methods.
|
||||
* @param {Number} docFD - The document's file descriptor in the given client.
|
||||
* @param {String} type - The type of the message, e.g. 'docUserAction'.
|
||||
* @param {Object} messageData - The data for this type of message.
|
||||
* @param {Boolean} fromSelf - Whether `client` is the originator of this message.
|
||||
*/
|
||||
export function sendDocMessage(
|
||||
client: Client, docFD: number, type: CommDocEventType, data: unknown, fromSelf?: boolean
|
||||
) {
|
||||
// TODO Warning disabled to preserve past behavior, but perhaps better to return the Promise?
|
||||
// eslint-disable-next-line @typescript-eslint/no-floating-promises
|
||||
client.sendMessage({type, docFD, data, fromSelf} as CommMessage);
|
||||
}
|
||||
|
@ -3,12 +3,11 @@
|
||||
* open, and what FD they are using.
|
||||
*/
|
||||
|
||||
import {CommDocEventType} from 'app/common/CommTypes';
|
||||
import {CommDocEventType, CommMessage} from 'app/common/CommTypes';
|
||||
import {arrayRemove} from 'app/common/gutil';
|
||||
import {ActiveDoc} from 'app/server/lib/ActiveDoc';
|
||||
import {Authorizer} from 'app/server/lib/Authorizer';
|
||||
import {Client} from 'app/server/lib/Client';
|
||||
import {sendDocMessage} from 'app/server/lib/Comm';
|
||||
import {DocSession, OptDocSession} from 'app/server/lib/DocSession';
|
||||
import {LogMethods} from "app/server/lib/LogMethods";
|
||||
|
||||
@ -85,7 +84,14 @@ export class DocClients {
|
||||
public async broadcastDocMessage(client: Client|null, type: CommDocEventType, messageData: any,
|
||||
filterMessage?: (docSession: OptDocSession,
|
||||
messageData: any) => Promise<any>): Promise<void> {
|
||||
const send = (curr: DocSession) => this._send(curr, client, type, messageData, filterMessage);
|
||||
const send = async (target: DocSession) => {
|
||||
const msg = await this._prepareMessage(target, type, messageData, filterMessage);
|
||||
if (msg) {
|
||||
const fromSelf = (target.client === client);
|
||||
await target.client.sendMessageOrInterrupt({...msg, docFD: target.fd, fromSelf} as CommMessage);
|
||||
}
|
||||
};
|
||||
|
||||
if (Deps.BROADCAST_ORDER === 'parallel') {
|
||||
await Promise.all(this._docSessions.map(send));
|
||||
} else {
|
||||
@ -101,30 +107,30 @@ export class DocClients {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a message to a single client. See broadcastDocMessage for parameters.
|
||||
* Prepares a message to a single client. See broadcastDocMessage for parameters.
|
||||
*/
|
||||
private async _send(target: DocSession, client: Client|null, type: CommDocEventType, messageData: any,
|
||||
filterMessage?: (docSession: OptDocSession,
|
||||
messageData: any) => Promise<any>): Promise<void> {
|
||||
const fromSelf = (target.client === client);
|
||||
private async _prepareMessage(
|
||||
target: DocSession, type: CommDocEventType, messageData: any,
|
||||
filterMessage?: (docSession: OptDocSession, messageData: any) => Promise<any>
|
||||
): Promise<{type: CommDocEventType, data: unknown}|undefined> {
|
||||
try {
|
||||
// Make sure user still has view access.
|
||||
await target.authorizer.assertAccess('viewers');
|
||||
if (!filterMessage) {
|
||||
sendDocMessage(target.client, target.fd, type, messageData, fromSelf);
|
||||
return {type, data: messageData};
|
||||
} else {
|
||||
try {
|
||||
const filteredMessageData = await filterMessage(target, messageData);
|
||||
if (filteredMessageData) {
|
||||
sendDocMessage(target.client, target.fd, type, filteredMessageData, fromSelf);
|
||||
return {type, data: filteredMessageData};
|
||||
} else {
|
||||
this._log.debug(target, 'skip broadcastDocMessage because it is not allowed for this client');
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.code && e.code === 'NEED_RELOAD') {
|
||||
sendDocMessage(target.client, target.fd, 'docShutdown', null, fromSelf);
|
||||
return {type: 'docShutdown', data: null};
|
||||
} else {
|
||||
sendDocMessage(target.client, target.fd, 'docUserAction', {error: String(e)}, fromSelf);
|
||||
return {type: 'docUserAction', data: {error: String(e)}};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -134,9 +140,10 @@ export class DocClients {
|
||||
this._log.debug(target, 'skip broadcastDocMessage because AUTH_NO_VIEW');
|
||||
// Go further and trigger a shutdown for this user, in case they are granted
|
||||
// access again later.
|
||||
sendDocMessage(target.client, target.fd, 'docShutdown', null, fromSelf);
|
||||
return {type: 'docShutdown', data: null};
|
||||
} else {
|
||||
throw(e);
|
||||
// Propagate any totally unexpected exceptions.
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -515,6 +515,26 @@ export class FlexServer implements GristServer {
|
||||
if (this._check('cleanup')) { return; }
|
||||
// Set up signal handlers. Note that nodemon sends SIGUSR2 to restart node.
|
||||
shutdown.cleanupOnSignals('SIGINT', 'SIGTERM', 'SIGHUP', 'SIGUSR2');
|
||||
|
||||
// We listen for uncaughtExceptions / unhandledRejections, but do exit when they happen. It is
|
||||
// a strong recommendation, which seems best to follow
|
||||
// (https://nodejs.org/docs/latest-v18.x/api/process.html#warning-using-uncaughtexception-correctly).
|
||||
// We do try to shutdown cleanly (i.e. do any planned cleanup), which goes somewhat against
|
||||
// the recommendation to do only synchronous work.
|
||||
|
||||
let counter = 0;
|
||||
|
||||
// Note that this event catches also 'unhandledRejection' (origin should be either
|
||||
// 'uncaughtException' or 'unhandledRejection').
|
||||
process.on('uncaughtException', (err, origin) => {
|
||||
log.error(`UNHANDLED ERROR ${origin} (${counter}):`, err);
|
||||
if (counter === 0) {
|
||||
// Only call shutdown once. It's async and could in theory fail, in which case it would be
|
||||
// another unhandledRejection, and would get caught and reported by this same handler.
|
||||
void(shutdown.exit(1));
|
||||
}
|
||||
counter++;
|
||||
});
|
||||
}
|
||||
|
||||
public addTagChecker() {
|
||||
|
@ -4,6 +4,12 @@
|
||||
import * as t from "ts-interface-checker";
|
||||
// tslint:disable:object-literal-key-quotes
|
||||
|
||||
export const ClientJsonMemoryLimits = t.iface([], {
|
||||
"totalSize": t.opt("number"),
|
||||
"jsonResponseReservation": t.opt("number"),
|
||||
"maxReservationSize": t.opt(t.union("number", "null")),
|
||||
});
|
||||
|
||||
export const ITestingHooks = t.iface([], {
|
||||
"getOwnPort": t.func("number"),
|
||||
"getPort": t.func("number"),
|
||||
@ -13,7 +19,7 @@ export const ITestingHooks = t.iface([], {
|
||||
"commShutdown": t.func("void"),
|
||||
"commRestart": t.func("void"),
|
||||
"commSetClientPersistence": t.func("number", t.param("ttlMs", "number")),
|
||||
"commSetClientJsonMemoryLimit": t.func("number", t.param("newTotalSize", "number")),
|
||||
"commSetClientJsonMemoryLimits": t.func("ClientJsonMemoryLimits", t.param("limits", "ClientJsonMemoryLimits")),
|
||||
"closeDocs": t.func("void"),
|
||||
"setDocWorkerActivation": t.func("void", t.param("workerId", "string"), t.param("active", t.union(t.lit('active'), t.lit('inactive'), t.lit('crash')))),
|
||||
"flushAuthorizerCache": t.func("void"),
|
||||
@ -23,9 +29,11 @@ export const ITestingHooks = t.iface([], {
|
||||
"setDiscourseConnectVar": t.func(t.union("string", "null"), t.param("varName", "string"), t.param("value", t.union("string", "null"))),
|
||||
"setWidgetRepositoryUrl": t.func("void", t.param("url", "string")),
|
||||
"getMemoryUsage": t.func("object"),
|
||||
"tickleUnhandledErrors": t.func("void", t.param("errType", "string")),
|
||||
});
|
||||
|
||||
const exportedTypeSuite: t.ITypeSuite = {
|
||||
ClientJsonMemoryLimits,
|
||||
ITestingHooks,
|
||||
};
|
||||
export default exportedTypeSuite;
|
||||
|
@ -1,5 +1,11 @@
|
||||
import {UserProfile} from 'app/common/LoginSessionAPI';
|
||||
|
||||
export interface ClientJsonMemoryLimits {
|
||||
totalSize?: number;
|
||||
jsonResponseReservation?: number;
|
||||
maxReservationSize?: number|null;
|
||||
}
|
||||
|
||||
export interface ITestingHooks {
|
||||
getOwnPort(): Promise<number>;
|
||||
getPort(): Promise<number>;
|
||||
@ -9,7 +15,7 @@ export interface ITestingHooks {
|
||||
commShutdown(): Promise<void>;
|
||||
commRestart(): Promise<void>;
|
||||
commSetClientPersistence(ttlMs: number): Promise<number>;
|
||||
commSetClientJsonMemoryLimit(newTotalSize: number): Promise<number>;
|
||||
commSetClientJsonMemoryLimits(limits: ClientJsonMemoryLimits): Promise<ClientJsonMemoryLimits>;
|
||||
closeDocs(): Promise<void>;
|
||||
setDocWorkerActivation(workerId: string, active: 'active'|'inactive'|'crash'): Promise<void>;
|
||||
flushAuthorizerCache(): Promise<void>;
|
||||
@ -19,4 +25,5 @@ export interface ITestingHooks {
|
||||
setDiscourseConnectVar(varName: string, value: string|null): Promise<string|null>;
|
||||
setWidgetRepositoryUrl(url: string): Promise<void>;
|
||||
getMemoryUsage(): Promise<object>; // actually NodeJS.MemoryUsage
|
||||
tickleUnhandledErrors(errType: string): Promise<void>;
|
||||
}
|
||||
|
@ -8,10 +8,11 @@ import * as Client from 'app/server/lib/Client';
|
||||
import {Comm} from 'app/server/lib/Comm';
|
||||
import log from 'app/server/lib/log';
|
||||
import {IMessage, Rpc} from 'grain-rpc';
|
||||
import {EventEmitter} from 'events';
|
||||
import {Request} from 'express';
|
||||
import * as t from 'ts-interface-checker';
|
||||
import {FlexServer} from './FlexServer';
|
||||
import {ITestingHooks} from './ITestingHooks';
|
||||
import {ClientJsonMemoryLimits, ITestingHooks} from './ITestingHooks';
|
||||
import ITestingHooksTI from './ITestingHooks-ti';
|
||||
import {connect, fromCallback} from './serverUtils';
|
||||
import {WidgetRepositoryImpl} from 'app/server/lib/WidgetRepository';
|
||||
@ -127,11 +128,42 @@ export class TestingHooks implements ITestingHooks {
|
||||
return prev;
|
||||
}
|
||||
|
||||
// Set the amount of memory Client.ts can use for JSON responses, in bytes.
|
||||
// Returns the old limit.
|
||||
public async commSetClientJsonMemoryLimit(newTotalSize: number): Promise<number> {
|
||||
log.info("TestingHooks.commSetClientJsonMemoryLimit called with", newTotalSize);
|
||||
return Client.jsonMemoryPool.setTotalSize(newTotalSize);
|
||||
// Set one or more limits that Client.ts can use for JSON responses, in bytes.
|
||||
// Returns the old limits.
|
||||
// - totalSize limits total amount of memory Client allocates to JSON response
|
||||
// - jsonResponseReservation sets the initial amount reserved for each response
|
||||
// - maxReservationSize monkey-patches reservation logic to fail when reservation exceeds the
|
||||
// given amount, to simulate unexpected failures.
|
||||
public async commSetClientJsonMemoryLimits(limits: ClientJsonMemoryLimits): Promise<ClientJsonMemoryLimits> {
|
||||
log.info("TestingHooks.commSetClientJsonMemoryLimits called with", limits);
|
||||
const previous: ClientJsonMemoryLimits = {};
|
||||
if (limits.totalSize !== undefined) {
|
||||
previous.totalSize = Client.jsonMemoryPool.setTotalSize(limits.totalSize);
|
||||
}
|
||||
if (limits.jsonResponseReservation !== undefined) {
|
||||
previous.jsonResponseReservation = CommClientDeps.jsonResponseReservation;
|
||||
CommClientDeps.jsonResponseReservation = limits.jsonResponseReservation;
|
||||
}
|
||||
if (limits.maxReservationSize !== undefined) {
|
||||
previous.maxReservationSize = null;
|
||||
const orig = Object.getPrototypeOf(Client.jsonMemoryPool)._updateReserved;
|
||||
if (limits.maxReservationSize === null) {
|
||||
(Client.jsonMemoryPool as any)._updateReserved = orig;
|
||||
} else {
|
||||
// Monkey-patch reservation logic to simulate unexpected failures.
|
||||
const jsonMemoryThrowLimit = limits.maxReservationSize;
|
||||
function updateReservedWithLimit(this: typeof Client.jsonMemoryPool, sizeDelta: number) {
|
||||
const newSize: number = (this as any)._reservedSize + sizeDelta;
|
||||
log.warn(`TestingHooks _updateReserved reserving ${newSize}, limit ${jsonMemoryThrowLimit}`);
|
||||
if (newSize > jsonMemoryThrowLimit) {
|
||||
throw new Error(`TestingHooks: hit JsonMemoryThrowLimit: ${newSize} > ${jsonMemoryThrowLimit}`);
|
||||
}
|
||||
return orig.call(this, sizeDelta);
|
||||
}
|
||||
(Client.jsonMemoryPool as any)._updateReserved = updateReservedWithLimit;
|
||||
}
|
||||
}
|
||||
return previous;
|
||||
}
|
||||
|
||||
public async closeDocs(): Promise<void> {
|
||||
@ -229,4 +261,18 @@ export class TestingHooks implements ITestingHooks {
|
||||
public async getMemoryUsage(): Promise<NodeJS.MemoryUsage> {
|
||||
return process.memoryUsage();
|
||||
}
|
||||
|
||||
// This is for testing the handling of unhandled exceptions and rejections.
|
||||
public async tickleUnhandledErrors(errType: 'exception'|'rejection'|'error-event'): Promise<void> {
|
||||
if (errType === 'exception') {
|
||||
setTimeout(() => { throw new Error("TestingHooks: Fake exception"); }, 0);
|
||||
} else if (errType === 'rejection') {
|
||||
void(Promise.resolve(null).then(() => { throw new Error("TestingHooks: Fake rejection"); }));
|
||||
} else if (errType === 'error-event') {
|
||||
const emitter = new EventEmitter();
|
||||
setTimeout(() => emitter.emit('error', new Error('TestingHooks: Fake error-event')), 0);
|
||||
} else {
|
||||
throw new Error(`Unrecognized errType ${errType}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -87,6 +87,10 @@ export class TestServerMerged extends EventEmitter implements IMochaServer {
|
||||
// immediately. It is simplest to use a diffent socket each time
|
||||
// we restart.
|
||||
const testingSocket = path.join(this.testDir, `testing-${this._starts}.socket`);
|
||||
if (testingSocket.length >= 108) {
|
||||
// Unix socket paths typically can't be longer than this. Who knew. Make the error obvious.
|
||||
throw new Error(`Path of testingSocket too long: ${testingSocket.length} (${testingSocket})`);
|
||||
}
|
||||
|
||||
const stubCmd = '_build/stubs/app/server/server';
|
||||
const isCore = await fse.pathExists(stubCmd + '.js');
|
||||
|
@ -11,7 +11,7 @@ import * as tmp from 'tmp';
|
||||
import {GristWSConnection, GristWSSettings} from 'app/client/components/GristWSConnection';
|
||||
import {Comm as ClientComm} from 'app/client/components/Comm';
|
||||
import * as log from 'app/client/lib/log';
|
||||
import {Comm, sendDocMessage} from 'app/server/lib/Comm';
|
||||
import {Comm} from 'app/server/lib/Comm';
|
||||
import {Client, ClientMethod} from 'app/server/lib/Client';
|
||||
import {CommClientConnect} from 'app/common/CommTypes';
|
||||
import {delay} from 'app/common/delay';
|
||||
@ -73,8 +73,8 @@ describe('Comm', function() {
|
||||
return {x: x, y: y, name: "methodAsync"};
|
||||
},
|
||||
methodSend: async function(client, docFD) {
|
||||
sendDocMessage(client, docFD, "fooType" as any, "foo");
|
||||
sendDocMessage(client, docFD, "barType" as any, "bar");
|
||||
void(client.sendMessage({docFD, type: "fooType" as any, data: "foo"}));
|
||||
void(client.sendMessage({docFD, type: "barType" as any, data: "bar"}));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -7,6 +7,7 @@ import * as log from 'app/server/lib/log';
|
||||
import {getGristConfig} from 'test/gen-server/testUtils';
|
||||
import {prepareDatabase} from 'test/server/lib/helpers/PrepareDatabase';
|
||||
import {TestServer} from 'test/server/lib/helpers/TestServer';
|
||||
import {waitForIt} from 'test/server/wait';
|
||||
import {createTestDir, EnvironmentSnapshot, setTmpLogLevel} from 'test/server/testUtils';
|
||||
import {assert} from 'chai';
|
||||
import * as cookie from 'cookie';
|
||||
@ -65,7 +66,7 @@ describe('ManyFetches', function() {
|
||||
// memory use limited in Client.ts by jsonMemoryPool.
|
||||
|
||||
// Reduce the limit controlling memory for JSON responses from the default of 500MB to 50MB.
|
||||
await docs.testingHooks.commSetClientJsonMemoryLimit(50 * 1024 * 1024);
|
||||
await docs.testingHooks.commSetClientJsonMemoryLimits({totalSize: 50 * 1024 * 1024});
|
||||
|
||||
// Create a large document where fetches would have a noticeable memory footprint.
|
||||
// 40k rows should produce ~2MB fetch response.
|
||||
@ -133,6 +134,59 @@ describe('ManyFetches', function() {
|
||||
}
|
||||
});
|
||||
|
||||
it('should cope gracefully when client messages fail', async function() {
|
||||
// It used to be that sending data to the client could produce uncaught errors (in particular,
|
||||
// for exceeding V8 JSON limits). This test case fakes errors to make sure they get handled.
|
||||
|
||||
// Create a document, initially empty. We'll add lots of rows later.
|
||||
const {docId} = await createLargeDoc({rows: 0});
|
||||
|
||||
// If the server dies, testingHooks calls may hang. This wrapper prevents that.
|
||||
const serverErrorPromise = docs.getExitPromise().then(() => { throw new Error("server exited"); });
|
||||
|
||||
// Make a connection.
|
||||
const createConnectionFunc = await prepareGristWSConnection(docId);
|
||||
const connectionA = createConnectionFunc();
|
||||
const fetcherA = await connect(connectionA, docId);
|
||||
|
||||
// We'll expect 20k rows, taking up about 1MB. Set a lower limit for a fake exception.
|
||||
const prev = await docs.testingHooks.commSetClientJsonMemoryLimits({
|
||||
jsonResponseReservation: 100 * 1024,
|
||||
maxReservationSize: 200 * 1024,
|
||||
});
|
||||
|
||||
try {
|
||||
// Adding lots of rows will produce an action that gets sent to the connected client.
|
||||
// We've arranged for this send to fail. Promise.race helps notice if the server exits.
|
||||
assert.equal(connectionA.established, true);
|
||||
await Promise.race([serverErrorPromise, addRows(docId, 20_000, 20_000)]);
|
||||
|
||||
// Check that the send in fact failed, and the connection did get interrupted.
|
||||
await waitForIt(() =>
|
||||
assert.equal(connectionA.established, false, "Failed message should interrupt connection"),
|
||||
1000, 100);
|
||||
|
||||
// Restore limits, so that fetch works below.
|
||||
await docs.testingHooks.commSetClientJsonMemoryLimits(prev);
|
||||
|
||||
// Fetch data to make sure that the "addRows" call itself succeeded.
|
||||
const connectionB = createConnectionFunc();
|
||||
const fetcherB = await connect(connectionB, docId);
|
||||
try {
|
||||
fetcherB.startPausedFetch();
|
||||
const data = await Promise.race([serverErrorPromise, fetcherB.completeFetch()]);
|
||||
assert.lengthOf(data.tableData[2], 20_000);
|
||||
assert.lengthOf(data.tableData[3].Num, 20_000);
|
||||
assert.lengthOf(data.tableData[3].Text, 20_000);
|
||||
} finally {
|
||||
fetcherB.end();
|
||||
}
|
||||
|
||||
} finally {
|
||||
fetcherA.end();
|
||||
}
|
||||
});
|
||||
|
||||
// Creates a document with the given number of rows, and about 50 bytes per row.
|
||||
async function createLargeDoc({rows}: {rows: number}): Promise<{docId: string}> {
|
||||
log.info("Preparing a doc of %s rows", rows);
|
||||
@ -142,7 +196,11 @@ describe('ManyFetches', function() {
|
||||
{id: 'Num', type: 'Numeric'},
|
||||
{id: 'Text', type: 'Text'}
|
||||
]]]);
|
||||
const chunk = 10_000;
|
||||
await addRows(docId, rows);
|
||||
return {docId};
|
||||
}
|
||||
|
||||
async function addRows(docId: string, rows: number, chunk = 10_000): Promise<void> {
|
||||
for (let i = 0; i < rows; i += chunk) {
|
||||
const currentNumRows = Math.min(chunk, rows - i);
|
||||
await userApi.getDocAPI(docId).addRows('TestTable', {
|
||||
@ -152,7 +210,6 @@ describe('ManyFetches', function() {
|
||||
Text: Array.from(Array(currentNumRows), (_, n) => `Hello, world, again for the ${i + n}th time.`),
|
||||
});
|
||||
}
|
||||
return {docId};
|
||||
}
|
||||
|
||||
// Get all the info for how to create a GristWSConnection, and returns a connection-creating
|
||||
|
54
test/server/lib/UnhandledErrors.ts
Normal file
54
test/server/lib/UnhandledErrors.ts
Normal file
@ -0,0 +1,54 @@
|
||||
import {prepareDatabase} from 'test/server/lib/helpers/PrepareDatabase';
|
||||
import {TestServer} from 'test/server/lib/helpers/TestServer';
|
||||
import {createTestDir, setTmpLogLevel} from 'test/server/testUtils';
|
||||
import {waitForIt} from 'test/server/wait';
|
||||
import {assert} from 'chai';
|
||||
import fetch from 'node-fetch';
|
||||
import {PassThrough} from 'stream';
|
||||
|
||||
/**
|
||||
* Grist sticks to the Node 18 default and recommendation to give up and exit on uncaught
|
||||
* exceptions, unhandled promise rejections, and unhandled 'error' events. But it makes an effort
|
||||
* to clean up before exiting, and to log the error in a better way.
|
||||
*/
|
||||
describe('UnhandledErrors', function() {
|
||||
this.timeout(30000);
|
||||
|
||||
setTmpLogLevel('warn');
|
||||
|
||||
let testDir: string;
|
||||
|
||||
before(async function() {
|
||||
testDir = await createTestDir('UnhandledErrors');
|
||||
await prepareDatabase(testDir);
|
||||
});
|
||||
|
||||
for (const errType of ['exception', 'rejection', 'error-event']) {
|
||||
it(`should clean up on unhandled ${errType}`, async function() {
|
||||
// Capture server log output, so that we can look to see how the server coped.
|
||||
const output = new PassThrough();
|
||||
const serverLogLines: string[] = [];
|
||||
output.on('data', (data) => serverLogLines.push(data.toString()));
|
||||
|
||||
const server = await TestServer.startServer('home', testDir, errType, undefined, undefined, {output});
|
||||
|
||||
try {
|
||||
assert.equal((await fetch(`${server.serverUrl}/status`)).status, 200);
|
||||
serverLogLines.length = 0;
|
||||
|
||||
// Trigger an unhandled error, and check that the server logged it and attempted cleanup.
|
||||
await server.testingHooks.tickleUnhandledErrors(errType);
|
||||
await waitForIt(() => {
|
||||
assert.isTrue(serverLogLines.some(line => new RegExp(`Fake ${errType}`).test(line)));
|
||||
assert.isTrue(serverLogLines.some(line => /Server .* cleaning up/.test(line)));
|
||||
}, 1000, 100);
|
||||
|
||||
// We expect the server to be dead now.
|
||||
await assert.isRejected(fetch(`${server.serverUrl}/status`), /failed.*ECONNREFUSED/);
|
||||
|
||||
} finally {
|
||||
await server.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
@ -9,6 +9,7 @@ import {exitPromise} from "app/server/lib/serverUtils";
|
||||
import log from "app/server/lib/log";
|
||||
import {delay} from "bluebird";
|
||||
import fetch from "node-fetch";
|
||||
import {Writable} from "stream";
|
||||
|
||||
/**
|
||||
* This starts a server in a separate process.
|
||||
@ -20,10 +21,11 @@ export class TestServer {
|
||||
suitename: string,
|
||||
customEnv?: NodeJS.ProcessEnv,
|
||||
_homeUrl?: string,
|
||||
options: {output?: Writable} = {}, // Pipe server output to the given stream
|
||||
): Promise<TestServer> {
|
||||
|
||||
const server = new TestServer(serverTypes, tempDirectory, suitename);
|
||||
await server.start(_homeUrl, customEnv);
|
||||
await server.start(_homeUrl, customEnv, options);
|
||||
return server;
|
||||
}
|
||||
|
||||
@ -55,14 +57,18 @@ export class TestServer {
|
||||
...process.env
|
||||
};
|
||||
}
|
||||
public async start(_homeUrl?: string, customEnv?: NodeJS.ProcessEnv) {
|
||||
public async start(_homeUrl?: string, customEnv?: NodeJS.ProcessEnv, options: {output?: Writable} = {}) {
|
||||
// put node logs into files with meaningful name that relate to the suite name and server type
|
||||
const fixedName = this._serverTypes.replace(/,/, '_');
|
||||
const nodeLogPath = path.join(this.rootDir, `${this._suiteName}-${fixedName}-node.log`);
|
||||
const nodeLogFd = await fse.open(nodeLogPath, 'a');
|
||||
const serverLog = process.env.VERBOSE ? 'inherit' : nodeLogFd;
|
||||
const serverLog = options.output ? 'pipe' : (process.env.VERBOSE ? 'inherit' : nodeLogFd);
|
||||
// use a path for socket that relates to suite name and server types
|
||||
this.testingSocket = path.join(this.rootDir, `${this._suiteName}-${fixedName}.socket`);
|
||||
if (this.testingSocket.length >= 108) {
|
||||
// Unix socket paths typically can't be longer than this. Who knew. Make the error obvious.
|
||||
throw new Error(`Path of testingSocket too long: ${this.testingSocket.length} (${this.testingSocket})`);
|
||||
}
|
||||
const env = {
|
||||
APP_HOME_URL: _homeUrl,
|
||||
GRIST_TESTING_SOCKET: this.testingSocket,
|
||||
@ -74,6 +80,11 @@ export class TestServer {
|
||||
env,
|
||||
stdio: ['inherit', serverLog, serverLog]
|
||||
});
|
||||
if (options.output) {
|
||||
this._server.stdout!.pipe(options.output);
|
||||
this._server.stderr!.pipe(options.output);
|
||||
}
|
||||
|
||||
this._exitPromise = exitPromise(this._server);
|
||||
|
||||
// Try to be more helpful when server exits by printing out the tail of its log.
|
||||
|
Loading…
Reference in New Issue
Block a user