2023-07-19 17:37:22 +00:00
|
|
|
import {FilterColValues} from 'app/common/ActiveDocAPI';
|
2021-11-29 20:12:45 +00:00
|
|
|
import {ApiError} from 'app/common/ApiError';
|
|
|
|
import {buildColFilter} from 'app/common/ColumnFilterFunc';
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
import {TableDataAction, TableDataActionSet} from 'app/common/DocActions';
|
2023-04-25 21:11:25 +00:00
|
|
|
import {DocData} from 'app/common/DocData';
|
2021-11-29 20:12:45 +00:00
|
|
|
import {DocumentSettings} from 'app/common/DocumentSettings';
|
2021-07-21 08:46:03 +00:00
|
|
|
import * as gristTypes from 'app/common/gristTypes';
|
|
|
|
import * as gutil from 'app/common/gutil';
|
2022-09-02 13:21:03 +00:00
|
|
|
import {nativeCompare} from 'app/common/gutil';
|
|
|
|
import {isTableCensored} from 'app/common/isHiddenTable';
|
2023-07-19 17:37:22 +00:00
|
|
|
import {buildRowFilter, getLinkingFilterFunc} from 'app/common/RowFilterFunc';
|
2022-09-02 13:21:03 +00:00
|
|
|
import {schema, SchemaTypes} from 'app/common/schema';
|
2021-11-29 20:12:45 +00:00
|
|
|
import {SortFunc} from 'app/common/SortFunc';
|
|
|
|
import {Sort} from 'app/common/SortSpec';
|
2021-12-07 11:21:16 +00:00
|
|
|
import {MetaRowRecord, MetaTableData} from 'app/common/TableData';
|
2023-04-25 21:11:25 +00:00
|
|
|
import {BaseFormatter, createFullFormatterFromDocData} from 'app/common/ValueFormatter';
|
2021-11-29 20:12:45 +00:00
|
|
|
import {ActiveDoc} from 'app/server/lib/ActiveDoc';
|
|
|
|
import {RequestWithLogin} from 'app/server/lib/Authorizer';
|
|
|
|
import {docSessionFromRequest} from 'app/server/lib/DocSession';
|
2023-10-16 00:17:43 +00:00
|
|
|
import {optIntegerParam, optJsonParam, optStringParam, stringParam} from 'app/server/lib/requestUtils';
|
2021-11-29 20:12:45 +00:00
|
|
|
import {ServerColumnGetters} from 'app/server/lib/ServerColumnGetters';
|
2021-07-21 08:46:03 +00:00
|
|
|
import * as express from 'express';
|
|
|
|
import * as _ from 'underscore';
|
|
|
|
|
|
|
|
// Helper type for Cell Accessor
|
|
|
|
type Access = (row: number) => any;
|
|
|
|
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
// Interface to document data used from an exporter worker thread (workerExporter.ts). Note that
|
|
|
|
// parameters and returned values are plain data that can be passed over a MessagePort.
|
|
|
|
export interface ActiveDocSource {
|
|
|
|
getDocName(): Promise<string>;
|
|
|
|
fetchMetaTables(): Promise<TableDataActionSet>;
|
|
|
|
fetchTable(tableId: string): Promise<TableDataAction>;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implementation of ActiveDocSource using an ActiveDoc directly.
|
|
|
|
export class ActiveDocSourceDirect implements ActiveDocSource {
|
|
|
|
private _req: RequestWithLogin;
|
|
|
|
|
|
|
|
constructor(private _activeDoc: ActiveDoc, req: express.Request) {
|
|
|
|
this._req = req as RequestWithLogin;
|
|
|
|
}
|
|
|
|
|
|
|
|
public async getDocName() { return this._activeDoc.docName; }
|
|
|
|
public fetchMetaTables() { return this._activeDoc.fetchMetaTables(docSessionFromRequest(this._req)); }
|
|
|
|
public async fetchTable(tableId: string) {
|
|
|
|
const {tableData} = await this._activeDoc.fetchTable(docSessionFromRequest(this._req), tableId, true);
|
|
|
|
return tableData;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
// Helper interface with information about the column
|
2023-04-25 21:11:25 +00:00
|
|
|
export interface ExportColumn {
|
2021-07-21 08:46:03 +00:00
|
|
|
id: number;
|
|
|
|
colId: string;
|
|
|
|
label: string;
|
|
|
|
type: string;
|
2023-04-25 21:11:25 +00:00
|
|
|
formatter: BaseFormatter;
|
2021-07-21 08:46:03 +00:00
|
|
|
parentPos: number;
|
2023-04-25 21:11:25 +00:00
|
|
|
description: string;
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
2023-04-25 21:11:25 +00:00
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
/**
|
|
|
|
* Bare data that is exported - used to convert to various formats.
|
|
|
|
*/
|
|
|
|
export interface ExportData {
|
|
|
|
/**
|
|
|
|
* Table name or table id.
|
|
|
|
*/
|
|
|
|
tableName: string;
|
|
|
|
/**
|
|
|
|
* Document name.
|
|
|
|
*/
|
|
|
|
docName: string;
|
|
|
|
/**
|
|
|
|
* Row ids (filtered and sorted).
|
|
|
|
*/
|
|
|
|
rowIds: number[];
|
|
|
|
/**
|
|
|
|
* Accessor for value in a column.
|
|
|
|
*/
|
|
|
|
access: Access[];
|
|
|
|
/**
|
|
|
|
* Columns information (primary used for formatting).
|
|
|
|
*/
|
|
|
|
columns: ExportColumn[];
|
2021-08-26 16:35:11 +00:00
|
|
|
/**
|
|
|
|
* Document settings
|
|
|
|
*/
|
|
|
|
docSettings: DocumentSettings;
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
|
|
|
|
2023-10-16 00:17:43 +00:00
|
|
|
export type ExportHeader = 'colId' | 'label';
|
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
/**
|
|
|
|
* Export parameters that identifies a section, filters, sort order.
|
|
|
|
*/
|
|
|
|
export interface ExportParameters {
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
tableId: string; // Value of '' is an instruction to export all tables.
|
|
|
|
viewSectionId?: number;
|
|
|
|
sortOrder?: number[];
|
|
|
|
filters?: Filter[];
|
2023-07-19 17:37:22 +00:00
|
|
|
linkingFilter?: FilterColValues;
|
2023-10-16 00:17:43 +00:00
|
|
|
header?: ExportHeader;
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
|
|
|
|
2022-09-14 18:55:44 +00:00
|
|
|
/**
|
|
|
|
* Options parameters for CSV and XLSX export functions.
|
|
|
|
*/
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
export interface DownloadOptions extends ExportParameters {
|
2022-09-14 18:55:44 +00:00
|
|
|
filename: string;
|
2022-09-02 13:21:03 +00:00
|
|
|
}
|
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
/**
|
|
|
|
* Gets export parameters from a request.
|
|
|
|
*/
|
|
|
|
export function parseExportParameters(req: express.Request): ExportParameters {
|
2021-11-29 20:12:45 +00:00
|
|
|
const tableId = stringParam(req.query.tableId, 'tableId');
|
2023-09-05 18:27:35 +00:00
|
|
|
const viewSectionId = optIntegerParam(req.query.viewSection, 'viewSection');
|
(core) support python3 in grist-core, and running engine via docker and/or gvisor
Summary:
* Moves essential plugins to grist-core, so that basic imports (e.g. csv) work.
* Adds support for a `GRIST_SANDBOX_FLAVOR` flag that can systematically override how the data engine is run.
- `GRIST_SANDBOX_FLAVOR=pynbox` is "classic" nacl-based sandbox.
- `GRIST_SANDBOX_FLAVOR=docker` runs engines in individual docker containers. It requires an image specified in `sandbox/docker` (alternative images can be named with `GRIST_SANDBOX` flag - need to contain python and engine requirements). It is a simple reference implementation for sandboxing.
- `GRIST_SANDBOX_FLAVOR=unsandboxed` runs whatever local version of python is specified by a `GRIST_SANDBOX` flag directly, with no sandboxing. Engine requirements must be installed, so an absolute path to a python executable in a virtualenv is easiest to manage.
- `GRIST_SANDBOX_FLAVOR=gvisor` runs the data engine via gvisor's runsc. Experimental, with implementation not included in grist-core. Since gvisor runs on Linux only, this flavor supports wrapping the sandboxes in a single shared docker container.
* Tweaks some recent express query parameter code to work in grist-core, which has a slightly different version of express (smoke test doesn't catch this since in Jenkins core is built within a workspace that has node_modules, and wires get crossed - in a dev environment the problem on master can be seen by doing `buildtools/build_core.sh /tmp/any_path_outside_grist`).
The new sandbox options do not have tests yet, nor does this they change the behavior of grist servers today. They are there to clean up and consolidate a collection of patches I've been using that were getting cumbersome, and make it easier to run experiments.
I haven't looked closely at imports beyond core.
Test Plan: tested manually against regular grist and grist-core, including imports
Reviewers: alexmojaki, dsagal
Reviewed By: alexmojaki
Differential Revision: https://phab.getgrist.com/D2942
2021-07-27 23:43:21 +00:00
|
|
|
const sortOrder = optJsonParam(req.query.activeSortSpec, []) as number[];
|
|
|
|
const filters: Filter[] = optJsonParam(req.query.filters, []);
|
2023-07-19 17:37:22 +00:00
|
|
|
const linkingFilter: FilterColValues = optJsonParam(req.query.linkingFilter, null);
|
2023-10-16 00:17:43 +00:00
|
|
|
const header = optStringParam(req.query.header, 'header', {allowed: ['label', 'colId']}) as ExportHeader | undefined;
|
2021-07-21 08:46:03 +00:00
|
|
|
|
|
|
|
return {
|
|
|
|
tableId,
|
|
|
|
viewSectionId,
|
|
|
|
sortOrder,
|
2022-09-14 18:55:44 +00:00
|
|
|
filters,
|
2023-07-19 17:37:22 +00:00
|
|
|
linkingFilter,
|
2023-10-16 00:17:43 +00:00
|
|
|
header,
|
2021-07-21 08:46:03 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
// Helper for getting filtered metadata tables.
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
async function getMetaTables(activeDocSource: ActiveDocSource): Promise<TableDataActionSet> {
|
|
|
|
return safe(await activeDocSource.fetchMetaTables(), "No metadata available in active document");
|
2022-09-02 13:21:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Makes assertion that value does exist or throws an error
|
2021-07-21 08:46:03 +00:00
|
|
|
function safe<T>(value: T, msg: string) {
|
2021-11-29 20:12:45 +00:00
|
|
|
if (!value) { throw new ApiError(msg, 404); }
|
2021-07-21 08:46:03 +00:00
|
|
|
return value as NonNullable<T>;
|
|
|
|
}
|
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
// Helper for getting table from filtered metadata.
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
function safeTable<TableId extends keyof SchemaTypes>(metaTables: TableDataActionSet, tableId: TableId) {
|
2022-09-02 13:21:03 +00:00
|
|
|
const table = safe(metaTables[tableId], `No table '${tableId}' in document`);
|
|
|
|
const colTypes = safe(schema[tableId], `No table '${tableId}' in document schema`);
|
|
|
|
return new MetaTableData<TableId>(tableId, table, colTypes);
|
2021-12-07 11:21:16 +00:00
|
|
|
}
|
2021-07-21 08:46:03 +00:00
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
// Helper for getting record safely: it throws if the record is missing.
|
2021-12-07 11:21:16 +00:00
|
|
|
function safeRecord<TableId extends keyof SchemaTypes>(table: MetaTableData<TableId>, id: number) {
|
|
|
|
return safe(table.getRecord(id), `No record ${id} in table ${table.tableId}`);
|
|
|
|
}
|
2021-07-21 08:46:03 +00:00
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
// Check that tableRef points to an uncensored table, or throw otherwise.
|
|
|
|
function checkTableAccess(tables: MetaTableData<"_grist_Tables">, tableRef: number): void {
|
|
|
|
if (isTableCensored(tables, tableRef)) {
|
|
|
|
throw new ApiError(`Cannot find or access table`, 404);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
/**
|
|
|
|
* Builds export for all raw tables that are in doc.
|
|
|
|
*/
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
export async function doExportDoc(
|
|
|
|
activeDocSource: ActiveDocSource,
|
|
|
|
handleTable: (data: ExportData) => Promise<void>,
|
|
|
|
): Promise<void> {
|
|
|
|
const metaTables = await getMetaTables(activeDocSource);
|
2022-09-02 13:21:03 +00:00
|
|
|
const tables = safeTable(metaTables, '_grist_Tables');
|
2021-07-21 08:46:03 +00:00
|
|
|
// select raw tables
|
2022-09-02 13:21:03 +00:00
|
|
|
const tableRefs = tables.filterRowIds({ summarySourceTable: 0 });
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
for (const tableRef of tableRefs) {
|
|
|
|
if (!isTableCensored(tables, tableRef)) { // Omit censored tables
|
|
|
|
const data = await doExportTable(activeDocSource, {metaTables, tableRef});
|
|
|
|
await handleTable(data);
|
|
|
|
}
|
|
|
|
}
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Builds export data for section that can be used to produce files in various formats (csv, xlsx).
|
|
|
|
*/
|
|
|
|
export async function exportTable(
|
|
|
|
activeDoc: ActiveDoc,
|
2022-09-02 13:21:03 +00:00
|
|
|
tableRef: number,
|
|
|
|
req: express.Request,
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
{metaTables}: {metaTables?: TableDataActionSet} = {},
|
2022-09-02 13:21:03 +00:00
|
|
|
): Promise<ExportData> {
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
return doExportTable(new ActiveDocSourceDirect(activeDoc, req), {metaTables, tableRef});
|
|
|
|
}
|
|
|
|
|
|
|
|
export async function doExportTable(
|
|
|
|
activeDocSource: ActiveDocSource,
|
|
|
|
options: {metaTables?: TableDataActionSet, tableRef?: number, tableId?: string},
|
|
|
|
) {
|
|
|
|
const metaTables = options.metaTables || await getMetaTables(activeDocSource);
|
2023-04-25 21:11:25 +00:00
|
|
|
const docData = new DocData((tableId) => { throw new Error("Unexpected DocData fetch"); }, metaTables);
|
2022-09-02 13:21:03 +00:00
|
|
|
const tables = safeTable(metaTables, '_grist_Tables');
|
2023-04-25 21:11:25 +00:00
|
|
|
const metaColumns = safeTable(metaTables, '_grist_Tables_column');
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
|
|
|
|
let tableRef: number;
|
|
|
|
if (options.tableRef) {
|
|
|
|
tableRef = options.tableRef;
|
|
|
|
} else {
|
|
|
|
if (!options.tableId) { throw new Error('doExportTable: tableRef or tableId must be given'); }
|
|
|
|
tableRef = tables.findRow('tableId', options.tableId);
|
|
|
|
if (tableRef === 0) {
|
|
|
|
throw new ApiError(`Table ${options.tableId} not found.`, 404);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
checkTableAccess(tables, tableRef);
|
|
|
|
const table = safeRecord(tables, tableRef);
|
2023-04-25 21:11:25 +00:00
|
|
|
|
|
|
|
// Select only columns that belong to this table.
|
|
|
|
const tableColumns = metaColumns.filterRecords({parentId: tableRef})
|
2022-09-02 13:21:03 +00:00
|
|
|
// sort by parentPos and id, which should be the same order as in raw data
|
2023-04-25 21:11:25 +00:00
|
|
|
.sort((c1, c2) => nativeCompare(c1.parentPos, c2.parentPos) || nativeCompare(c1.id, c2.id));
|
|
|
|
|
2021-07-21 08:46:03 +00:00
|
|
|
// Produce a column description matching what user will see / expect to export
|
2023-04-25 21:11:25 +00:00
|
|
|
const columns: ExportColumn[] = tableColumns
|
|
|
|
.filter(tc => !gristTypes.isHiddenCol(tc.colId)) // Exclude helpers
|
|
|
|
.map<ExportColumn>(tc => {
|
2021-07-21 08:46:03 +00:00
|
|
|
// for reference columns, return display column, and copy settings from visible column
|
2023-04-25 21:11:25 +00:00
|
|
|
const displayCol = metaColumns.getRecord(tc.displayCol) || tc;
|
2021-07-21 08:46:03 +00:00
|
|
|
return {
|
|
|
|
id: displayCol.id,
|
|
|
|
colId: displayCol.colId,
|
|
|
|
label: tc.label,
|
2023-04-25 21:11:25 +00:00
|
|
|
type: tc.type,
|
|
|
|
formatter: createFullFormatterFromDocData(docData, tc.id),
|
2023-03-16 21:37:24 +00:00
|
|
|
parentPos: tc.parentPos,
|
2023-04-25 21:11:25 +00:00
|
|
|
description: tc.description,
|
2021-07-21 08:46:03 +00:00
|
|
|
};
|
2023-04-25 21:11:25 +00:00
|
|
|
});
|
2021-07-21 08:46:03 +00:00
|
|
|
|
|
|
|
// fetch actual data
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
const tableData = await activeDocSource.fetchTable(table.tableId);
|
2022-12-21 16:40:00 +00:00
|
|
|
const rowIds = tableData[2];
|
|
|
|
const dataByColId = tableData[3];
|
2021-07-21 08:46:03 +00:00
|
|
|
// sort rows
|
|
|
|
const getters = new ServerColumnGetters(rowIds, dataByColId, columns);
|
|
|
|
// create cell accessors
|
|
|
|
const access = columns.map(col => getters.getColGetter(col.id)!);
|
|
|
|
|
|
|
|
let tableName = table.tableId;
|
|
|
|
// since tables ids are not very friendly, borrow name from a primary view
|
|
|
|
if (table.primaryViewId) {
|
|
|
|
const viewId = table.primaryViewId;
|
2022-09-02 13:21:03 +00:00
|
|
|
const views = safeTable(metaTables, '_grist_Views');
|
2021-12-07 11:21:16 +00:00
|
|
|
const view = safeRecord(views, viewId);
|
2021-07-21 08:46:03 +00:00
|
|
|
tableName = view.name;
|
|
|
|
}
|
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
const docInfo = safeRecord(safeTable(metaTables, '_grist_DocInfo'), 1);
|
2021-08-26 16:35:11 +00:00
|
|
|
const docSettings = gutil.safeJsonParse(docInfo.documentSettings, {});
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
const exportData: ExportData = {
|
2021-07-21 08:46:03 +00:00
|
|
|
tableName,
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
docName: await activeDocSource.getDocName(),
|
2021-07-21 08:46:03 +00:00
|
|
|
rowIds,
|
|
|
|
access,
|
2021-08-26 16:35:11 +00:00
|
|
|
columns,
|
|
|
|
docSettings
|
2021-07-21 08:46:03 +00:00
|
|
|
};
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
return exportData;
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Builds export data for section that can be used to produce files in various formats (csv, xlsx).
|
|
|
|
*/
|
|
|
|
export async function exportSection(
|
|
|
|
activeDoc: ActiveDoc,
|
|
|
|
viewSectionId: number,
|
2021-11-03 11:44:28 +00:00
|
|
|
sortSpec: Sort.SortSpec | null,
|
2021-07-21 08:46:03 +00:00
|
|
|
filters: Filter[] | null,
|
2023-07-19 17:37:22 +00:00
|
|
|
linkingFilter: FilterColValues | null = null,
|
2022-09-02 13:21:03 +00:00
|
|
|
req: express.Request,
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
{metaTables}: {metaTables?: TableDataActionSet} = {},
|
2022-09-02 13:21:03 +00:00
|
|
|
): Promise<ExportData> {
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
return doExportSection(new ActiveDocSourceDirect(activeDoc, req), viewSectionId, sortSpec,
|
2023-07-19 17:37:22 +00:00
|
|
|
filters, linkingFilter, {metaTables});
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
export async function doExportSection(
|
|
|
|
activeDocSource: ActiveDocSource,
|
|
|
|
viewSectionId: number,
|
|
|
|
sortSpec: Sort.SortSpec | null,
|
|
|
|
filters: Filter[] | null,
|
2023-07-19 17:37:22 +00:00
|
|
|
linkingFilter: FilterColValues | null = null,
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
{metaTables}: {metaTables?: TableDataActionSet} = {},
|
|
|
|
): Promise<ExportData> {
|
|
|
|
metaTables = metaTables || await getMetaTables(activeDocSource);
|
2023-04-25 21:11:25 +00:00
|
|
|
const docData = new DocData((tableId) => { throw new Error("Unexpected DocData fetch"); }, metaTables);
|
2022-09-02 13:21:03 +00:00
|
|
|
const viewSections = safeTable(metaTables, '_grist_Views_section');
|
2021-12-07 11:21:16 +00:00
|
|
|
const viewSection = safeRecord(viewSections, viewSectionId);
|
2022-09-02 13:21:03 +00:00
|
|
|
safe(viewSection.tableRef, `Cannot find or access table`);
|
|
|
|
const tables = safeTable(metaTables, '_grist_Tables');
|
|
|
|
checkTableAccess(tables, viewSection.tableRef);
|
2021-12-07 11:21:16 +00:00
|
|
|
const table = safeRecord(tables, viewSection.tableRef);
|
2023-04-25 21:11:25 +00:00
|
|
|
const metaColumns = safeTable(metaTables, '_grist_Tables_column');
|
|
|
|
const columns = metaColumns.filterRecords({parentId: table.id});
|
2022-09-02 13:21:03 +00:00
|
|
|
const viewSectionFields = safeTable(metaTables, '_grist_Views_section_field');
|
2021-12-07 11:21:16 +00:00
|
|
|
const fields = viewSectionFields.filterRecords({parentId: viewSection.id});
|
2022-09-02 13:21:03 +00:00
|
|
|
const savedFilters = safeTable(metaTables, '_grist_Filters')
|
2021-12-07 11:21:16 +00:00
|
|
|
.filterRecords({viewSectionRef: viewSection.id});
|
2021-07-21 08:46:03 +00:00
|
|
|
|
2021-11-19 20:30:11 +00:00
|
|
|
const fieldsByColRef = _.indexBy(fields, 'colRef');
|
|
|
|
const savedFiltersByColRef = _.indexBy(savedFilters, 'colRef');
|
|
|
|
const unsavedFiltersByColRef = _.indexBy(filters ?? [], 'colRef');
|
2021-07-21 08:46:03 +00:00
|
|
|
|
|
|
|
// Produce a column description matching what user will see / expect to export
|
2023-04-25 21:11:25 +00:00
|
|
|
const viewify = (col: GristTablesColumn, field?: GristViewsSectionField): ExportColumn => {
|
|
|
|
const displayCol = metaColumns.getRecord(field?.displayCol || col.displayCol) || col;
|
2021-07-21 08:46:03 +00:00
|
|
|
return {
|
|
|
|
id: displayCol.id,
|
|
|
|
colId: displayCol.colId,
|
|
|
|
label: col.label,
|
|
|
|
type: col.type,
|
2023-04-25 21:11:25 +00:00
|
|
|
formatter: createFullFormatterFromDocData(docData, col.id, field?.id),
|
2021-07-21 08:46:03 +00:00
|
|
|
parentPos: col.parentPos,
|
2023-03-16 21:37:24 +00:00
|
|
|
description: col.description,
|
2021-07-21 08:46:03 +00:00
|
|
|
};
|
|
|
|
};
|
2022-09-28 13:18:58 +00:00
|
|
|
const buildFilters = (col: GristTablesColumn, field?: GristViewsSectionField) => {
|
|
|
|
const filterString = unsavedFiltersByColRef[col.id]?.filter || savedFiltersByColRef[col.id]?.filter;
|
|
|
|
const filterFunc = buildColFilter(filterString, col.type);
|
|
|
|
return {
|
|
|
|
filterFunc,
|
|
|
|
id: col.id,
|
|
|
|
colId: col.colId,
|
|
|
|
type: col.type,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
const columnsForFilters = columns
|
2021-11-19 20:30:11 +00:00
|
|
|
.filter(column => !gristTypes.isHiddenCol(column.colId))
|
2022-09-28 13:18:58 +00:00
|
|
|
.map(column => buildFilters(column, fieldsByColRef[column.id]));
|
2023-04-25 21:11:25 +00:00
|
|
|
const viewColumns: ExportColumn[] = _.sortBy(fields, 'parentPos')
|
|
|
|
.map((field) => viewify(metaColumns.getRecord(field.colRef)!, field));
|
2021-07-21 08:46:03 +00:00
|
|
|
|
|
|
|
// The columns named in sort order need to now become display columns
|
2021-11-03 11:44:28 +00:00
|
|
|
sortSpec = sortSpec || gutil.safeJsonParse(viewSection.sortColRefs, []);
|
|
|
|
sortSpec = sortSpec!.map((colSpec) => {
|
|
|
|
const colRef = Sort.getColRef(colSpec);
|
2024-05-21 16:27:06 +00:00
|
|
|
if (typeof colRef !== 'number') {
|
|
|
|
// colRef might be string for virtual tables, but we don't support them here.
|
|
|
|
throw new Error(`Unsupported colRef type: ${typeof colRef}`);
|
|
|
|
}
|
2023-04-25 21:11:25 +00:00
|
|
|
const col = metaColumns.getRecord(colRef);
|
2021-07-21 08:46:03 +00:00
|
|
|
if (!col) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
const effectiveColRef = viewify(col, fieldsByColRef[colRef]).id;
|
2021-11-03 11:44:28 +00:00
|
|
|
return Sort.swapColRef(colSpec, effectiveColRef);
|
2021-07-21 08:46:03 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
// fetch actual data
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
const tableData = await activeDocSource.fetchTable(table.tableId);
|
2022-12-21 16:40:00 +00:00
|
|
|
let rowIds = tableData[2];
|
|
|
|
const dataByColId = tableData[3];
|
2021-07-21 08:46:03 +00:00
|
|
|
// sort rows
|
|
|
|
const getters = new ServerColumnGetters(rowIds, dataByColId, columns);
|
|
|
|
const sorter = new SortFunc(getters);
|
2021-11-03 11:44:28 +00:00
|
|
|
sorter.updateSpec(sortSpec);
|
2021-07-21 08:46:03 +00:00
|
|
|
rowIds.sort((a, b) => sorter.compare(a, b));
|
|
|
|
// create cell accessors
|
2022-09-28 13:18:58 +00:00
|
|
|
const tableAccess = columnsForFilters.map(col => getters.getColGetter(col.id)!);
|
2021-07-21 08:46:03 +00:00
|
|
|
// create row filter based on all columns filter
|
2022-09-28 13:18:58 +00:00
|
|
|
const rowFilter = columnsForFilters
|
2021-11-19 20:30:11 +00:00
|
|
|
.map((col, c) => buildRowFilter(tableAccess[c], col.filterFunc))
|
2021-07-21 08:46:03 +00:00
|
|
|
.reduce((prevFilter, curFilter) => (id) => prevFilter(id) && curFilter(id), () => true);
|
|
|
|
// filter rows numbers
|
|
|
|
rowIds = rowIds.filter(rowFilter);
|
|
|
|
|
2023-07-19 17:37:22 +00:00
|
|
|
if (linkingFilter) {
|
|
|
|
rowIds = rowIds.filter(getLinkingFilterFunc(getters, linkingFilter));
|
|
|
|
}
|
|
|
|
|
2022-09-02 13:21:03 +00:00
|
|
|
const docInfo = safeRecord(safeTable(metaTables, '_grist_DocInfo'), 1);
|
2021-08-26 16:35:11 +00:00
|
|
|
const docSettings = gutil.safeJsonParse(docInfo.documentSettings, {});
|
|
|
|
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
const exportData: ExportData = {
|
2021-07-21 08:46:03 +00:00
|
|
|
rowIds,
|
2021-08-26 16:35:11 +00:00
|
|
|
docSettings,
|
2021-11-19 20:30:11 +00:00
|
|
|
tableName: table.tableId,
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
docName: await activeDocSource.getDocName(),
|
2021-11-19 20:30:11 +00:00
|
|
|
access: viewColumns.map(col => getters.getColGetter(col.id)!),
|
2021-07-21 08:46:03 +00:00
|
|
|
columns: viewColumns
|
|
|
|
};
|
(core) For exporting XLSX, do it memory-efficiently in a worker thread.
Summary:
- Excel exports were awfully memory-inefficient, causing occasional docWorker
crashes. The fix is to use the "streaming writer" option of ExcelJS
https://github.com/exceljs/exceljs#streaming-xlsx-writercontents. (Empirically
on one example, max memory went down from 3G to 100M)
- It's also CPU intensive and synchronous, and can block node for tens of
seconds. The fix is to use a worker-thread. This diff uses "piscina" library
for a pool of threads.
- Additionally, adds ProcessMonitor that logs memory and cpu usage,
particularly when those change significantly.
- Also introduces request cancellation, so that a long download cancelled by
the user will cancel the work being done in the worker thread.
Test Plan:
Updated previous export tests; memory and CPU performance tested
manually by watching output of ProcessMonitor.
Difference visible in these log excerpts:
Before (total time to serve request 22 sec):
```
Telemetry processMonitor heapUsedMB=2187, heapTotalMB=2234, cpuAverage=1.13, intervalMs=17911
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0.66, intervalMs=5005
Telemetry processMonitor heapUsedMB=2188, heapTotalMB=2234, cpuAverage=0, intervalMs=5005
Telemetry processMonitor heapUsedMB=71, heapTotalMB=75, cpuAverage=0.13, intervalMs=5002
```
After (total time to server request 18 sec):
```
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=0.5, intervalMs=5001
Telemetry processMonitor heapUsedMB=109, heapTotalMB=144, cpuAverage=1.39, intervalMs=5002
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.13, intervalMs=5000
Telemetry processMonitor heapUsedMB=94, heapTotalMB=131, cpuAverage=1.35, intervalMs=5001
```
Note in "Before" that heapTotalMB goes up to 2GB in the first case, and "intervalMs" of 17 seconds indicates that node was unresponsive for that long. In the second case, heapTotalMB stays low, and the main thread remains responsive the whole time.
Reviewers: jarek
Reviewed By: jarek
Differential Revision: https://phab.getgrist.com/D3906
2023-06-01 13:09:50 +00:00
|
|
|
return exportData;
|
2021-07-21 08:46:03 +00:00
|
|
|
}
|
|
|
|
|
2021-12-07 11:21:16 +00:00
|
|
|
type GristViewsSectionField = MetaRowRecord<'_grist_Views_section_field'>
|
|
|
|
type GristTablesColumn = MetaRowRecord<'_grist_Tables_column'>
|
2021-07-21 08:46:03 +00:00
|
|
|
|
|
|
|
// Type for filters passed from the client
|
|
|
|
export interface Filter { colRef: number, filter: string }
|