(core) Manage memory used for websocket responses to reduce the risk of server crashes.

Summary:
- Implements MemoryPool for waiting on memory reservations.
- Uses MemoryPool to control memory used for stringifying JSON responses in Client.ts
- Limits total size of _missedMessages that may be queued for a particular client.
- Upgrades ws library, which may reduce memory usage, and allows pausing the websocket for testing.
  - The upgrade changed subtle behavior corners, requiring various fixes to code and tests.

- dos.ts:
  - Includes Paul's fixes and updates to the dos.ts script for manual stress-testing.
  - Logging tweaks, to avoid excessive dumps on uncaughtError, and include timestamps.

Test Plan:
- Includes a test that measures heap size, and fails without memory management.
- Includes a unittest for MemoryPool
- Some cleanup and additions to TestServer helper; in particular adds makeUserApi() helper used in multiple tests.
- Some fixes related to ws upgrade.

Reviewers: paulfitz

Reviewed By: paulfitz

Differential Revision: https://phab.getgrist.com/D3974
This commit is contained in:
Dmitry S
2023-08-07 10:28:17 -04:00
parent 7c114bf600
commit 526a5df157
14 changed files with 677 additions and 99 deletions

View File

@@ -310,7 +310,11 @@ describe('Comm', function() {
`Expected to see a failed send:\n${logMessages.join('\n')}`);
}
it("should receive all server messages in order when send doesn't fail", async function() {
it("should receive all server messages (small) in order when send doesn't fail", async function() {
await testSendOrdering({noFailedSend: true, useSmallMsgs: true});
});
it("should receive all server messages (large) in order when send doesn't fail", async function() {
await testSendOrdering({noFailedSend: true});
});
@@ -322,17 +326,15 @@ describe('Comm', function() {
await testSendOrdering({closeHappensFirst: true});
});
async function testSendOrdering(options: {noFailedSend?: boolean, closeHappensFirst?: boolean}) {
async function testSendOrdering(
options: {noFailedSend?: boolean, closeHappensFirst?: boolean, useSmallMsgs?: boolean}
) {
const eventsSeen: Array<'failedSend'|'close'> = [];
// Server-side Client object.
let ssClient!: Client;
const {cliComm, forwarder} = await startManagedConnection({
...assortedMethods,
// A method just to help us get a handle on the server-side Client object.
setClient: async (client) => { ssClient = client; },
});
const {cliComm, forwarder} = await startManagedConnection(assortedMethods);
// Intercept the call to _onClose to know when it occurs, since we are trying to hit a
// situation where 'close' and 'failedSend' events happen in either order.
@@ -366,6 +368,7 @@ describe('Comm', function() {
// in the clientConnect message, we are expected to reload the app. In the test, we replace
// the GristWSConnection.
cliComm.on('clientConnect', async (msg: CommClientConnect) => {
ssClient = comm!.getClient(msg.clientId);
if (msg.needReload) {
await delay(0);
cliComm.releaseDocConnection(docId);
@@ -373,11 +376,11 @@ describe('Comm', function() {
}
});
// Make the first event that gives us access to the Client object (ssClient).
await cliComm._makeRequest(null, null, "setClient", "foo", 1);
// Wait for a connect call, which we rely on to get access to the Client object (ssClient).
await waitForCondition(() => (clientConnectSpy.callCount > 0), 1000);
// Send large buffers, to fill up the socket's buffers to get it to block.
const data = "x".repeat(1.0e7);
const data = "x".repeat(options.useSmallMsgs ? 100_000 : 10_000_000);
const makeMessage = (n: number) => ({type: 'docUserAction', n, data});
let n = 0;
@@ -425,7 +428,12 @@ describe('Comm', function() {
// This test helper is used for 3 different situations. Check that we observed that
// situations we were trying to hit.
if (options.noFailedSend) {
assert.deepEqual(eventsSeen, ['close']);
if (options.useSmallMsgs) {
assert.deepEqual(eventsSeen, ['close']);
} else {
// Large messages now cause a send to fail, after filling up buffer, and close the socket.
assert.deepEqual(eventsSeen, ['close', 'close']);
}
} else if (options.closeHappensFirst) {
assert.equal(eventsSeen[0], 'close');
assert.include(eventsSeen, 'failedSend');

View File

@@ -218,7 +218,7 @@ function testDocApi() {
const ws1 = (await userApi.getOrgWorkspaces('current'))[0].id;
// Make sure kiwi isn't allowed here.
await userApi.updateOrgPermissions(ORG_NAME, {users: {[kiwiEmail]: null}});
const kiwiApi = makeUserApi(ORG_NAME, 'kiwi');
const kiwiApi = home.makeUserApi(ORG_NAME, 'kiwi');
await assert.isRejected(kiwiApi.getWorkspaceAccess(ws1), /Forbidden/);
// Add kiwi as an editor for the org.
await assert.isRejected(kiwiApi.getOrgAccess(ORG_NAME), /Forbidden/);
@@ -238,7 +238,7 @@ function testDocApi() {
const ws1 = (await userApi.getOrgWorkspaces('current'))[0].id;
await userApi.updateOrgPermissions(ORG_NAME, {users: {[kiwiEmail]: null}});
// Make sure kiwi isn't allowed here.
const kiwiApi = makeUserApi(ORG_NAME, 'kiwi');
const kiwiApi = home.makeUserApi(ORG_NAME, 'kiwi');
await assert.isRejected(kiwiApi.getWorkspaceAccess(ws1), /Forbidden/);
// Add kiwi as an editor of this workspace.
await userApi.updateWorkspacePermissions(ws1, {users: {[kiwiEmail]: 'editors'}});
@@ -257,7 +257,7 @@ function testDocApi() {
it("should allow only owners to remove a document", async () => {
const ws1 = (await userApi.getOrgWorkspaces('current'))[0].id;
const doc1 = await userApi.newDoc({name: 'testdeleteme1'}, ws1);
const kiwiApi = makeUserApi(ORG_NAME, 'kiwi');
const kiwiApi = home.makeUserApi(ORG_NAME, 'kiwi');
// Kiwi is editor of the document, so he can't delete it.
await userApi.updateDocPermissions(doc1, {users: {'kiwi@getgrist.com': 'editors'}});
@@ -273,7 +273,7 @@ function testDocApi() {
it("should allow only owners to rename a document", async () => {
const ws1 = (await userApi.getOrgWorkspaces('current'))[0].id;
const doc1 = await userApi.newDoc({name: 'testrenameme1'}, ws1);
const kiwiApi = makeUserApi(ORG_NAME, 'kiwi');
const kiwiApi = home.makeUserApi(ORG_NAME, 'kiwi');
// Kiwi is editor of the document, so he can't rename it.
await userApi.updateDocPermissions(doc1, {users: {'kiwi@getgrist.com': 'editors'}});
@@ -3039,7 +3039,7 @@ function testDocApi() {
it("limits daily API usage", async function () {
// Make a new document in a test product with a low daily limit
const api = makeUserApi('testdailyapilimit');
const api = home.makeUserApi('testdailyapilimit');
const workspaceId = await getWorkspaceId(api, 'TestDailyApiLimitWs');
const docId = await api.newDoc({name: 'TestDoc1'}, workspaceId);
const max = testDailyApiLimitFeatures.baseMaxApiUnitsPerDocumentPerDay;
@@ -3067,7 +3067,7 @@ function testDocApi() {
it("limits daily API usage and sets the correct keys in redis", async function () {
this.retries(3);
// Make a new document in a free team site, currently the only real product which limits daily API usage.
const freeTeamApi = makeUserApi('freeteam');
const freeTeamApi = home.makeUserApi('freeteam');
const workspaceId = await getWorkspaceId(freeTeamApi, 'FreeTeamWs');
const docId = await freeTeamApi.newDoc({name: 'TestDoc2'}, workspaceId);
// Rather than making 5000 requests, set high counts directly for the current and next daily and hourly keys
@@ -4329,7 +4329,7 @@ function setup(name: string, cb: () => Promise<void>) {
await cb();
// create TestDoc as an empty doc into Private workspace
userApi = api = makeUserApi(ORG_NAME);
userApi = api = home.makeUserApi(ORG_NAME);
const wid = await getWorkspaceId(api, 'Private');
docIds.TestDoc = await api.newDoc({name: 'TestDoc'}, wid);
});
@@ -4345,15 +4345,6 @@ function setup(name: string, cb: () => Promise<void>) {
});
}
function makeUserApi(org: string, user?: string) {
return new UserAPIImpl(`${home.serverUrl}/o/${org}`, {
headers: {Authorization: `Bearer api_key_for_${user || 'chimpy'}`},
fetch: fetch as any,
newFormData: () => new FormData() as any,
logger: log
});
}
async function getWorkspaceId(api: UserAPIImpl, name: string) {
const workspaces = await api.getOrgWorkspaces('current');
return workspaces.find((w) => w.name === name)!.id;

View File

@@ -0,0 +1,241 @@
import {GristWSConnection} from 'app/client/components/GristWSConnection';
import {TableFetchResult} from 'app/common/ActiveDocAPI';
import {UserAPIImpl} from 'app/common/UserAPI';
import {delay} from 'app/common/delay';
import * as log from 'app/server/lib/log';
import {getGristConfig} from 'test/gen-server/testUtils';
import {prepareDatabase} from 'test/server/lib/helpers/PrepareDatabase';
import {TestServer} from 'test/server/lib/helpers/TestServer';
import {createTestDir, EnvironmentSnapshot, setTmpLogLevel} from 'test/server/testUtils';
import {assert} from 'chai';
import * as cookie from 'cookie';
import fetch from 'node-fetch';
import WebSocket from 'ws';
describe('ManyFetches', function() {
this.timeout(30000);
setTmpLogLevel('warn'); // Set to 'info' to see what heap size actually is.
let oldEnv: EnvironmentSnapshot;
const userName = 'chimpy';
const email = 'chimpy@getgrist.com';
const org = 'docs';
let home: TestServer;
let docs: TestServer;
let userApi: UserAPIImpl;
beforeEach(async function() {
oldEnv = new EnvironmentSnapshot(); // Needed for prepareDatabase, which changes process.env
log.info("Starting servers");
const testDir = await createTestDir("ManyFetches");
await prepareDatabase(testDir);
home = await TestServer.startServer('home', testDir, "home");
docs = await TestServer.startServer('docs', testDir, "docs", {
// The test verifies memory usage by checking heap sizes. The line below limits doc-worker
// process so that it crashes when memory management is wrong. With fetch sizes
// in this test, doc-worker's heap size goes from ~90M to ~330M without memory management;
// this limit is in the middle as another way to verify that memory management helps.
// Without this limit, there is no pressure on node to garbage-collect, so it may use more
// memory than we expect, making the test less reliable.
NODE_OPTIONS: '--max-old-space-size=210',
}, home.serverUrl);
userApi = home.makeUserApi(org, userName);
});
afterEach(async function() {
// stop all servers
await home.stop();
await docs.stop();
oldEnv.restore();
});
// Assert and log; helpful for working on the test (when setTmpLogLevel is 'info').
function assertIsBelow(value: number, expected: number) {
log.info("HeapMB", value, `(expected < ${expected})`);
assert.isBelow(value, expected);
}
it('should limit the memory used to respond to many simultaneuous fetches', async function() {
// Here we create a large document, and fetch it in parallel 200 times, without reading
// responses. This test relies on the fact that the server caches the fetched data, so only
// the serialized responses to clients are responsible for large memory use. This is the
// memory use limited in Client.ts by jsonMemoryPool.
// Reduce the limit controlling memory for JSON responses from the default of 500MB to 50MB.
await docs.testingHooks.commSetClientJsonMemoryLimit(50 * 1024 * 1024);
// Create a large document where fetches would have a noticeable memory footprint.
// 40k rows should produce ~2MB fetch response.
const {docId} = await createLargeDoc({rows: 40_000});
// When we get results, here's a checker that it looks reasonable.
function checkResults(results: TableFetchResult[]) {
assert.lengthOf(results, 100);
for (const res of results) {
assert.lengthOf(res.tableData[2], 40_000);
assert.lengthOf(res.tableData[3].Num, 40_000);
assert.lengthOf(res.tableData[3].Text, 40_000);
}
}
// Prepare to make N requests. For N=100, doc-worker should need ~200M of additional memory
// without memory management.
const N = 100;
// Helper to get doc-worker's heap size.
// If the server dies, testingHooks calls may hang. This wrapper prevents that.
const serverErrorPromise = docs.getExitPromise().then(() => { throw new Error("server exited"); });
const getMemoryUsage = () => Promise.race([docs.testingHooks.getMemoryUsage(), serverErrorPromise]);
const getHeapMB = async () => Math.round((await getMemoryUsage() as NodeJS.MemoryUsage).heapUsed /1024/1024);
assertIsBelow(await getHeapMB(), 120);
// Create all the connections, but don't make the fetches just yet.
const createConnectionFunc = await prepareGristWSConnection(docId);
const connectionsA = Array.from(Array(N), createConnectionFunc);
const fetchersA = await Promise.all(connectionsA.map(c => connect(c, docId)));
const connectionsB = Array.from(Array(N), createConnectionFunc);
const fetchersB = await Promise.all(connectionsB.map(c => connect(c, docId)));
try {
assertIsBelow(await getHeapMB(), 120);
// Start fetches without reading responses. This is a step that should push memory limits.
fetchersA.map(f => f.startPausedFetch());
// Give it a few seconds, enough for server to use what memory it can.
await delay(2000);
assertIsBelow(await getHeapMB(), 200);
// Make N more requests. See that memory hasn't spiked.
fetchersB.map(f => f.startPausedFetch());
await delay(2000);
assertIsBelow(await getHeapMB(), 200);
// Complete the first batch of requests. This allows for the fetches to complete, and for
// memory to get released. Also check that results look reasonable.
checkResults(await Promise.all(fetchersA.map(f => f.completeFetch())));
assertIsBelow(await getHeapMB(), 200);
// Complete the outstanding requests. Memory shouldn't spike.
checkResults(await Promise.all(fetchersB.map(f => f.completeFetch())));
assertIsBelow(await getHeapMB(), 200);
} finally {
fetchersA.map(f => f.end());
fetchersB.map(f => f.end());
}
});
// Creates a document with the given number of rows, and about 50 bytes per row.
async function createLargeDoc({rows}: {rows: number}): Promise<{docId: string}> {
log.info("Preparing a doc of %s rows", rows);
const ws = (await userApi.getOrgWorkspaces('current'))[0].id;
const docId = await userApi.newDoc({name: 'testdoc'}, ws);
await userApi.applyUserActions(docId, [['AddTable', 'TestTable', [
{id: 'Num', type: 'Numeric'},
{id: 'Text', type: 'Text'}
]]]);
const chunk = 10_000;
for (let i = 0; i < rows; i += chunk) {
const currentNumRows = Math.min(chunk, rows - i);
await userApi.getDocAPI(docId).addRows('TestTable', {
// Roughly 8 bytes per row
Num: Array.from(Array(currentNumRows), (_, n) => (i + n) * 100),
// Roughly 40 bytes per row
Text: Array.from(Array(currentNumRows), (_, n) => `Hello, world, again for the ${i + n}th time.`),
});
}
return {docId};
}
// Get all the info for how to create a GristWSConnection, and returns a connection-creating
// function.
async function prepareGristWSConnection(docId: string): Promise<() => GristWSConnection> {
// Use cookies for access to stay as close as possible to regular operation.
const resp = await fetch(`${home.serverUrl}/test/session`);
const sid = cookie.parse(resp.headers.get('set-cookie')).grist_sid;
if (!sid) { throw new Error('no session available'); }
await home.testingHooks.setLoginSessionProfile(sid, {name: userName, email}, org);
// Load the document html.
const pageUrl = `${home.serverUrl}/o/docs/doc/${docId}`;
const headers = {Cookie: `grist_sid=${sid}`};
const doc = await fetch(pageUrl, {headers});
const pageBody = await doc.text();
// Pull out the configuration object embedded in the html.
const gristConfig = getGristConfig(pageBody);
const {assignmentId, getWorker, homeUrl} = gristConfig;
if (!homeUrl) { throw new Error('no homeUrl'); }
if (!assignmentId) { throw new Error('no assignmentId'); }
const docWorkerUrl = getWorker && getWorker[assignmentId];
if (!docWorkerUrl) { throw new Error('no docWorkerUrl'); }
// Place the config object in window.gristConfig as if we were a
// real browser client. GristWSConnection expects to find it there.
globalThis.window = globalThis.window || {};
(globalThis.window as any).gristConfig = gristConfig;
// We return a function that constructs a GristWSConnection.
return function createConnectionFunc() {
let clientId: string = '0';
return GristWSConnection.create(null, {
makeWebSocket(url: string): any { return new WebSocket(url, undefined, { headers }); },
getTimezone() { return Promise.resolve('UTC'); },
getPageUrl() { return pageUrl; },
getDocWorkerUrl() { return Promise.resolve(docWorkerUrl); },
getClientId(did) { return clientId; },
getUserSelector() { return ''; },
updateClientId(did: string, cid: string) { clientId = cid; },
advanceCounter(): string { return '0'; },
log(msg, ...args) {},
warn(msg, ...args) {},
});
};
}
// Actually connect GristWSConnection, open the doc, and return a few methods for next steps.
async function connect(connection: GristWSConnection, docId: string) {
async function getMessage<T>(eventType: string, filter: (msg: T) => boolean): Promise<T> {
return new Promise<T>(resolve => {
function callback(msg: T) {
if (filter(msg)) { connection.off(eventType, callback); resolve(msg); }
}
connection.on(eventType, callback);
});
}
// Launch the websocket
const connectionPromise = getMessage('connectState', (isConnected: boolean) => isConnected);
connection.initialize(null);
await connectionPromise; // Wait for connection to succeed.
const openPromise = getMessage('serverMessage', ({reqId}: {reqId?: number}) => (reqId === 0));
connection.send(JSON.stringify({reqId: 0, method: 'openDoc', args: [docId]}));
await openPromise;
let fetchPromise: Promise<TableFetchResult>;
return {
startPausedFetch: () => {
fetchPromise = getMessage<any>('serverMessage', ({reqId}: {reqId?: number}) => (reqId === 1));
(connection as any)._ws.pause();
connection.send(JSON.stringify({reqId: 1, method: 'fetchTable', args: [0, 'TestTable']}));
},
completeFetch: async (): Promise<TableFetchResult> => {
(connection as any)._ws.resume();
return (await fetchPromise as any).data;
},
end: () => {
connection.dispose();
},
};
}
});

View File

@@ -0,0 +1,115 @@
import {MemoryPool} from 'app/server/lib/MemoryPool';
import {delay} from 'app/common/delay';
import {isLongerThan} from 'app/common/gutil';
import {assert} from 'chai';
import * as sinon from 'sinon';
async function isResolved(promise: Promise<unknown>): Promise<boolean> {
return !await isLongerThan(promise, 0);
}
async function areResolved(...promises: Promise<unknown>[]): Promise<boolean[]> {
return Promise.all(promises.map(p => isResolved(p)));
}
function poolInfo(mpool: MemoryPool): {total: number, reserved: number, available: number, awaiters: number} {
return {
total: mpool.getTotalSize(),
reserved: mpool.getReservedSize(),
available: mpool.getAvailableSize(),
awaiters: mpool.numWaiting(),
};
}
describe("MemoryPool", function() {
afterEach(() => {
sinon.restore();
});
it("should wait for enough space", async function() {
const mpool = new MemoryPool(1000);
const spy = sinon.spy();
let r1: () => void;
let r2: () => void;
let r3: () => void;
let r4: () => void;
const w1 = new Promise<void>(r => { r1 = r; });
const w2 = new Promise<void>(r => { r2 = r; });
const w3 = new Promise<void>(r => { r3 = r; });
const w4 = new Promise<void>(r => { r4 = r; });
const p1 = mpool.withReserved(400, () => { spy(1); return w1; });
const p2 = mpool.withReserved(400, () => { spy(2); return w2; });
const p3 = mpool.withReserved(400, () => { spy(3); return w3; });
const p4 = mpool.withReserved(400, () => { spy(4); return w4; });
// Only two callbacks run initially.
await delay(10);
assert.deepEqual(spy.args, [[1], [2]]);
// Others are waiting for something to finish.
await delay(50);
assert.deepEqual(spy.args, [[1], [2]]);
// Once 2nd task finishes, the next one should run.
r2!();
await delay(10);
assert.deepEqual(spy.args, [[1], [2], [3]]);
await delay(50);
assert.deepEqual(spy.args, [[1], [2], [3]]);
// Once another task finishes, the last one should run.
r3!();
await delay(10);
assert.deepEqual(spy.args, [[1], [2], [3], [4]]);
// Let all tasks finish.
r1!();
r4!();
await delay(10);
assert.deepEqual(spy.args, [[1], [2], [3], [4]]);
await Promise.all([p1, p2, p3, p4]);
});
it("should allow adjusting reservation", async function() {
const mpool = new MemoryPool(1000);
const res1p = mpool.waitAndReserve(600);
const res2p = mpool.waitAndReserve(600);
// Initially only the first reservation can happen.
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 600, available: 400, awaiters: 1});
assert.deepEqual(await areResolved(res1p, res2p), [true, false]);
// Once the first reservation is adjusted, the next one should go.
const res1 = await res1p;
res1.updateReservation(400);
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 1000, available: 0, awaiters: 0});
assert.deepEqual(await areResolved(res1p, res2p), [true, true]);
const res2 = await res2p;
// Try some more complex combinations.
const res3p = mpool.waitAndReserve(200);
const res4p = mpool.waitAndReserve(200);
const res5p = mpool.waitAndReserve(200);
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 1000, available: 0, awaiters: 3});
assert.deepEqual(await areResolved(res3p, res4p, res5p), [false, false, false]);
res1.updateReservation(100); // 300 units freed.
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 900, available: 100, awaiters: 2});
assert.deepEqual(await areResolved(res3p, res4p, res5p), [true, false, false]);
res1.dispose(); // Another 100 freed.
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 1000, available: 0, awaiters: 1});
assert.deepEqual(await areResolved(res3p, res4p, res5p), [true, true, false]);
res2.dispose(); // Lots freed.
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 600, available: 400, awaiters: 0});
assert.deepEqual(await areResolved(res3p, res4p, res5p), [true, true, true]);
(await res5p).dispose();
(await res4p).dispose();
(await res3p).dispose();
assert.deepEqual(poolInfo(mpool), {total: 1000, reserved: 0, available: 1000, awaiters: 0});
});
});

View File

@@ -1,11 +1,8 @@
import {UserAPIImpl} from 'app/common/UserAPI';
import {WebhookSubscription} from 'app/server/lib/DocApi';
import log from 'app/server/lib/log';
import axios from 'axios';
import * as bodyParser from 'body-parser';
import {assert} from 'chai';
import FormData from 'form-data';
import fetch from 'node-fetch';
import {tmpdir} from 'os';
import * as path from 'path';
import {createClient} from 'redis';
@@ -75,7 +72,7 @@ describe('Webhooks-Proxy', function () {
await cb();
// create TestDoc as an empty doc into Private workspace
userApi = api = makeUserApi(ORG_NAME, home.serverUrl);
userApi = api = home.makeUserApi(ORG_NAME);
const wid = await getWorkspaceId(api, 'Private');
docIds.TestDoc = await api.newDoc({name: 'TestDoc'}, wid);
});
@@ -102,7 +99,7 @@ describe('Webhooks-Proxy', function () {
});
function runServerConfigurations(additionaEnvConfiguration: object, subTestCall: Function) {
function runServerConfigurations(additionaEnvConfiguration: NodeJS.ProcessEnv, subTestCall: Function) {
additionaEnvConfiguration = {
ALLOWED_WEBHOOK_DOMAINS: `example.com,localhost:${webhooksTestPort}`,
GRIST_DATA_DIR: dataDir,
@@ -322,16 +319,6 @@ describe('Webhooks-Proxy', function () {
const ORG_NAME = 'docs-1';
function makeUserApi(org: string, homeServerUrl: string, user?: string) {
return new UserAPIImpl(`${homeServerUrl}/o/${org}`, {
headers: {Authorization: `Bearer api_key_for_${user || 'chimpy'}`},
fetch: fetch as any,
newFormData: () => new FormData() as any,
logger: log
});
}
async function getWorkspaceId(api: UserAPIImpl, name: string) {
const workspaces = await api.getOrgWorkspaces('current');
return workspaces.find((w) => w.name === name)!.id;

View File

@@ -1,24 +1,28 @@
import {connectTestingHooks, TestingHooksClient} from "app/server/lib/TestingHooks";
import {ChildProcess, execFileSync, spawn} from "child_process";
import FormData from 'form-data';
import path from "path";
import * as fse from "fs-extra";
import * as testUtils from "test/server/testUtils";
import {UserAPIImpl} from "app/common/UserAPI";
import {exitPromise} from "app/server/lib/serverUtils";
import log from "app/server/lib/log";
import {delay} from "bluebird";
import fetch from "node-fetch";
/**
* This starts a server in a separate process.
*/
export class TestServer {
public static async startServer
(serverTypes: string,
tempDirectory: string,
suitename: string,
additionalConfig?: Object,
_homeUrl?: string): Promise<TestServer> {
public static async startServer(
serverTypes: string,
tempDirectory: string,
suitename: string,
customEnv?: NodeJS.ProcessEnv,
_homeUrl?: string,
): Promise<TestServer> {
const server = new TestServer(serverTypes, tempDirectory, suitename);
// Override some env variables in server configuration to serve our test purpose:
const customEnv = {
...additionalConfig};
await server.start(_homeUrl, customEnv);
return server;
}
@@ -33,9 +37,10 @@ export class TestServer {
private readonly _defaultEnv;
constructor(private _serverTypes: string, private _tmpDir: string, private _suiteName: string) {
constructor(private _serverTypes: string, public readonly rootDir: string, private _suiteName: string) {
this._defaultEnv = {
GRIST_INST_DIR: this._tmpDir,
GRIST_INST_DIR: this.rootDir,
GRIST_DATA_DIR: path.join(this.rootDir, "data"),
GRIST_SERVERS: this._serverTypes,
// with port '0' no need to hard code a port number (we can use testing hooks to find out what
// port server is listening on).
@@ -50,14 +55,14 @@ export class TestServer {
...process.env
};
}
public async start(_homeUrl?: string, customEnv?: object) {
public async start(_homeUrl?: string, customEnv?: NodeJS.ProcessEnv) {
// put node logs into files with meaningful name that relate to the suite name and server type
const fixedName = this._serverTypes.replace(/,/, '_');
const nodeLogPath = path.join(this._tmpDir, `${this._suiteName}-${fixedName}-node.log`);
const nodeLogPath = path.join(this.rootDir, `${this._suiteName}-${fixedName}-node.log`);
const nodeLogFd = await fse.open(nodeLogPath, 'a');
const serverLog = process.env.VERBOSE ? 'inherit' : nodeLogFd;
// use a path for socket that relates to suite name and server types
this.testingSocket = path.join(this._tmpDir, `${this._suiteName}-${fixedName}.socket`);
this.testingSocket = path.join(this.rootDir, `${this._suiteName}-${fixedName}.socket`);
const env = {
APP_HOME_URL: _homeUrl,
GRIST_TESTING_SOCKET: this.testingSocket,
@@ -116,10 +121,25 @@ export class TestServer {
// wait for check
return (await fetch(`${this.serverUrl}/status/hooks`, {timeout: 1000})).ok;
} catch (err) {
log.warn("Failed to initialize server", err);
return false;
}
}
// Get access to the ChildProcess object for this server, e.g. to get its PID.
public getChildProcess(): ChildProcess { return this._server; }
// Returns the promise for the ChildProcess's signal or exit code.
public getExitPromise(): Promise<string|number> { return this._exitPromise; }
public makeUserApi(org: string, user: string = 'chimpy'): UserAPIImpl {
return new UserAPIImpl(`${this.serverUrl}/o/${org}`, {
headers: {Authorization: `Bearer api_key_for_${user}`},
fetch: fetch as unknown as typeof globalThis.fetch,
newFormData: () => new FormData() as any,
logger: log
});
}
private async _waitServerReady() {
// It's important to clear the timeout, because it can prevent node from exiting otherwise,

View File

@@ -12,6 +12,7 @@
import * as _ from 'underscore';
import { assert } from 'chai';
import {tmpdir} from 'os';
import * as path from 'path';
import * as fse from 'fs-extra';
import clone = require('lodash/clone');
@@ -309,6 +310,18 @@ export class EnvironmentSnapshot {
}
}
export async function createTestDir(suiteName: string): Promise<string> {
const tmpRootDir = process.env.TESTDIR || tmpdir();
const workerIdText = process.env.MOCHA_WORKER_ID || '0';
const username = process.env.USER || "nobody";
const testDir = path.join(tmpRootDir, `grist_test_${username}_${suiteName}_${workerIdText}`);
// Remove any previous tmp dir, and create the new one.
await fse.remove(testDir);
await fse.mkdirs(testDir);
log.warn(`Test logs and data are at: ${testDir}/`);
return testDir;
}
export async function getBuildFile(relativePath: string): Promise<string> {
if (await fse.pathExists(path.join('_build', relativePath))) {
return path.join('_build', relativePath);