2020-07-21 13:20:51 +00:00
|
|
|
/**
|
|
|
|
* Module to handle the storage of Grist documents.
|
|
|
|
*
|
|
|
|
* A Grist document is stored as a SQLite database file. We keep everything in a single database
|
|
|
|
* file, including attachments, for the sake of having a single file represent a single "document"
|
|
|
|
* or "data set".
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
import * as sqlite3 from '@gristlabs/sqlite3';
|
|
|
|
import {LocalActionBundle} from 'app/common/ActionBundle';
|
|
|
|
import {DocAction, TableColValues, TableDataAction, toTableDataAction} from 'app/common/DocActions';
|
|
|
|
import * as gristTypes from 'app/common/gristTypes';
|
|
|
|
import * as marshal from 'app/common/marshal';
|
|
|
|
import * as schema from 'app/common/schema';
|
|
|
|
import {ActionHistoryImpl} from 'app/server/lib/ActionHistoryImpl';
|
|
|
|
import {ExpandedQuery} from 'app/server/lib/ExpandedQuery';
|
|
|
|
import {IDocStorageManager} from 'app/server/lib/IDocStorageManager';
|
|
|
|
import * as log from 'app/server/lib/log';
|
|
|
|
import * as assert from 'assert';
|
|
|
|
import * as bluebird from 'bluebird';
|
|
|
|
import * as fse from 'fs-extra';
|
|
|
|
import chunk = require('lodash/chunk');
|
|
|
|
import groupBy = require('lodash/groupBy');
|
|
|
|
import * as _ from 'underscore';
|
|
|
|
import * as util from 'util';
|
|
|
|
import * as uuidv4 from "uuid/v4";
|
2020-10-30 16:53:23 +00:00
|
|
|
import { ISQLiteDB, MigrationHooks, OpenMode, quoteIdent, ResultRow, SchemaInfo, SQLiteDB} from './SQLiteDB';
|
2020-07-21 13:20:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
// Run with environment variable NODE_DEBUG=db (may include additional comma-separated sections)
|
|
|
|
// for verbose logging.
|
|
|
|
const debuglog = util.debuglog('db');
|
|
|
|
|
|
|
|
const maxSQLiteVariables = 500; // Actually could be 999, so this is playing it safe.
|
|
|
|
|
2020-11-02 15:48:47 +00:00
|
|
|
const PENDING_VALUE = [gristTypes.GristObjCode.Pending];
|
|
|
|
|
2020-07-21 13:20:51 +00:00
|
|
|
export class DocStorage implements ISQLiteDB {
|
|
|
|
|
|
|
|
// ======================================================================
|
|
|
|
// Static fields
|
|
|
|
// ======================================================================
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Schema for all system tables, i.e. those that are NOT known by the data engine. Regular
|
|
|
|
* metadata tables (such as _grist_DocInfo) are created via DocActions received from
|
|
|
|
* InitNewDoc useraction.
|
|
|
|
*
|
|
|
|
* The current "Storage Version" used by Grist is the length of the migrations list in its
|
|
|
|
* Schema. We use it to track changes to how data is stored on disk, and changes to the schema
|
|
|
|
* of non-data-engine tables (such as _gristsys_* tables). By contrast, "Schema Version" keeps
|
|
|
|
* track of the version of data-engine metadata. In SQLite, we use "PRAGMA user_version" to
|
|
|
|
* store the storage version number.
|
|
|
|
*/
|
|
|
|
public static docStorageSchema: SchemaInfo = {
|
|
|
|
async create(db: SQLiteDB): Promise<void> {
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_Files (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
ident TEXT UNIQUE,
|
|
|
|
data BLOB
|
|
|
|
)`);
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_Action (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
"actionNum" BLOB DEFAULT 0,
|
|
|
|
"time" BLOB DEFAULT 0,
|
|
|
|
"user" BLOB DEFAULT '',
|
|
|
|
"desc" BLOB DEFAULT '',
|
|
|
|
"otherId" BLOB DEFAULT 0,
|
|
|
|
"linkId" BLOB DEFAULT 0,
|
|
|
|
"json" BLOB DEFAULT ''
|
|
|
|
)`);
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_Action_step (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
"parentId" BLOB DEFAULT 0,
|
|
|
|
"type" BLOB DEFAULT '',
|
|
|
|
"name" BLOB DEFAULT '',
|
|
|
|
"tableId" BLOB DEFAULT '',
|
|
|
|
"colIds" BLOB DEFAULT '',
|
|
|
|
"rowIds" BLOB DEFAULT '',
|
|
|
|
"values" BLOB DEFAULT '',
|
|
|
|
"json" BLOB DEFAULT ''
|
|
|
|
)`);
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_ActionHistory (
|
|
|
|
id INTEGER PRIMARY KEY, -- Plain integer action ID ("actionRef")
|
|
|
|
actionHash TEXT UNIQUE, -- Action checksum
|
|
|
|
parentRef INTEGER, -- id of parent of this action
|
|
|
|
actionNum INTEGER, -- distance from root of tree in actions
|
|
|
|
body BLOB -- content of action
|
|
|
|
)`);
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_ActionHistoryBranch (
|
|
|
|
id INTEGER PRIMARY KEY, -- Numeric branch ID
|
|
|
|
name TEXT UNIQUE, -- Branch name
|
|
|
|
actionRef INTEGER -- Latest action on branch
|
|
|
|
)`);
|
|
|
|
for (const branchName of ['shared', 'local_sent', 'local_unsent']) {
|
|
|
|
await db.run("INSERT INTO _gristsys_ActionHistoryBranch(name) VALUES(?)",
|
|
|
|
branchName);
|
|
|
|
}
|
|
|
|
// This is a single row table (enforced by the CHECK on 'id'), containing non-shared info.
|
|
|
|
// - ownerInstanceId is the id of the instance which owns this copy of the Grist doc.
|
|
|
|
// - docId is also kept here because it should not be changeable by UserActions.
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_FileInfo (
|
|
|
|
id INTEGER PRIMARY KEY CHECK (id = 0),
|
|
|
|
docId TEXT DEFAULT '',
|
|
|
|
ownerInstanceId TEXT DEFAULT ''
|
|
|
|
)`);
|
|
|
|
await db.exec("INSERT INTO _gristsys_FileInfo (id) VALUES (0)");
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_PluginData (
|
|
|
|
id INTEGER PRIMARY KEY, -- Plain integer plugin data id
|
|
|
|
pluginId TEXT NOT NULL, -- Plugin id
|
|
|
|
key TEXT NOT NULL, -- the key
|
|
|
|
value BLOB DEFAULT '' -- the value associated with the key
|
|
|
|
);
|
|
|
|
-- Plugins have unique keys.
|
|
|
|
CREATE UNIQUE INDEX _gristsys_PluginData_unique_key on _gristsys_PluginData(pluginId, key);`);
|
|
|
|
},
|
|
|
|
migrations: [
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 1 does not require a migration. Docs at v1 (or before) may not all
|
|
|
|
// be the same, and are only made uniform by v2.
|
|
|
|
},
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 2. We change the types of all columns to BLOBs.
|
|
|
|
// This applies to all Grist tables, including metadata.
|
|
|
|
const migrationLabel = "DocStorage.docStorageSchema.migrations[v1->v2]";
|
|
|
|
const oldMaxPosDefault = String(Math.pow(2, 31) - 1);
|
|
|
|
|
|
|
|
function _upgradeTable(tableId: string) {
|
|
|
|
log.debug(`${migrationLabel}: table ${tableId}`);
|
|
|
|
// This returns rows with (at least) {name, type, dflt_value}.
|
|
|
|
return db.all(`PRAGMA table_info(${quoteIdent(tableId)})`)
|
|
|
|
.then(infoRows => {
|
|
|
|
const colListSql = infoRows.map(info => quoteIdent(info.name)).join(', ');
|
|
|
|
const colSpecSql = infoRows.map(_sqlColSpec).join(', ');
|
|
|
|
const tmpTableId = DocStorage._makeTmpTableId(tableId);
|
|
|
|
debuglog(`${migrationLabel}: ${tableId} (${colSpecSql})`);
|
|
|
|
return db.runEach(
|
|
|
|
`CREATE TABLE ${quoteIdent(tmpTableId)} (${colSpecSql})`,
|
|
|
|
`INSERT INTO ${quoteIdent(tmpTableId)} SELECT ${colListSql} FROM ${quoteIdent(tableId)}`,
|
|
|
|
`DROP TABLE ${quoteIdent(tableId)}`,
|
|
|
|
`ALTER TABLE ${quoteIdent(tmpTableId)} RENAME TO ${quoteIdent(tableId)}`
|
|
|
|
);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
function _sqlColSpec(info: ResultRow): string {
|
|
|
|
if (info.name === 'id') { return 'id INTEGER PRIMARY KEY'; }
|
|
|
|
// Fix the default for PositionNumber and ManualPos types, if set to a wrong old value.
|
|
|
|
const dfltValue = (info.type === 'REAL' && info.dflt_value === oldMaxPosDefault) ?
|
|
|
|
DocStorage._formattedDefault('PositionNumber') :
|
|
|
|
// The string "undefined" is also an invalid default; fix that too.
|
|
|
|
(info.dflt_value === 'undefined' ? 'NULL' : info.dflt_value);
|
|
|
|
|
|
|
|
return DocStorage._sqlColSpecFromDBInfo(Object.assign({}, info, {
|
|
|
|
type: 'BLOB',
|
|
|
|
dflt_value: dfltValue
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some migration-type steps pre-date storage migrations. We can do them once for the first
|
|
|
|
// proper migration (i.e. this one, to v2), and then never worry about them for upgraded docs.
|
|
|
|
|
|
|
|
// Create table for files that wasn't always created in the past.
|
|
|
|
await db.exec(`CREATE TABLE IF NOT EXISTS _gristsys_Files (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
ident TEXT UNIQUE,
|
|
|
|
data BLOB
|
|
|
|
)`);
|
|
|
|
// Create _gristsys_Action.linkId column that wasn't always created in the past.
|
|
|
|
try {
|
|
|
|
await db.exec('ALTER TABLE _gristsys_Action ADD COLUMN linkId INTEGER');
|
|
|
|
log.debug("${migrationLabel}: Column linkId added to _gristsys_Action");
|
|
|
|
} catch (err) {
|
|
|
|
if (!(/duplicate/.test(err.message))) {
|
|
|
|
// ok if column already existed
|
|
|
|
throw err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Deal with the transition to blob types
|
|
|
|
const tblRows = await db.all("SELECT name FROM sqlite_master WHERE type='table'");
|
|
|
|
for (const tblRow of tblRows) {
|
|
|
|
// Note that _gristsys_Action tables in the past used Grist actions to create appropriate
|
|
|
|
// tables, so docs from that period would use BLOBs. For consistency, we upgrade those tables
|
|
|
|
// too.
|
|
|
|
if (tblRow.name.startsWith('_grist_') || !tblRow.name.startsWith('_') ||
|
|
|
|
tblRow.name.startsWith('_gristsys_Action')) {
|
|
|
|
await _upgradeTable(tblRow.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 3. Convert old _gristsys_Action* tables to _gristsys_ActionHistory*.
|
|
|
|
await db.exec(`CREATE TABLE IF NOT EXISTS _gristsys_ActionHistory (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
actionHash TEXT UNIQUE,
|
|
|
|
parentRef INTEGER,
|
|
|
|
actionNum INTEGER,
|
|
|
|
body BLOB
|
|
|
|
)`);
|
|
|
|
await db.exec(`CREATE TABLE IF NOT EXISTS _gristsys_ActionHistoryBranch (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
name TEXT UNIQUE,
|
|
|
|
actionRef INTEGER
|
|
|
|
)`);
|
|
|
|
for (const branchName of ['shared', 'local_sent', 'local_unsent']) {
|
|
|
|
await db.run("INSERT OR IGNORE INTO _gristsys_ActionHistoryBranch(name) VALUES(?)",
|
|
|
|
branchName);
|
|
|
|
}
|
|
|
|
// Migrate any ActionLog information as best we can
|
|
|
|
const actions = await db.all("SELECT * FROM _gristsys_Action ORDER BY actionNum");
|
|
|
|
const steps = groupBy(await db.all("SELECT * FROM _gristsys_Action_step ORDER BY id"),
|
|
|
|
'parentId');
|
|
|
|
await db.execTransaction(async () => {
|
|
|
|
const history = new ActionHistoryImpl(db);
|
|
|
|
await history.initialize();
|
|
|
|
for (const action of actions) {
|
|
|
|
const step = steps[action.actionNum] || [];
|
|
|
|
const crudeTranslation: LocalActionBundle = {
|
|
|
|
actionNum: history.getNextHubActionNum(),
|
|
|
|
actionHash: null,
|
|
|
|
parentActionHash: null,
|
|
|
|
envelopes: [],
|
|
|
|
info: [
|
|
|
|
0,
|
|
|
|
{
|
|
|
|
time: action.time,
|
|
|
|
user: action.user,
|
|
|
|
inst: "",
|
|
|
|
desc: action.desc,
|
|
|
|
otherId: action.otherId,
|
|
|
|
linkId: action.linkId
|
|
|
|
}
|
|
|
|
],
|
|
|
|
// Take what was logged as a UserAction and treat it as a DocAction. Summarization
|
|
|
|
// currently depends on stored+undo fields to understand what changed in an ActionBundle.
|
|
|
|
// DocActions were not logged prior to this version, so we have to fudge things a little.
|
|
|
|
stored: [[0, JSON.parse(action.json) as DocAction]],
|
|
|
|
calc: [],
|
|
|
|
userActions: [JSON.parse(action.json)],
|
|
|
|
undo: step.map(row => JSON.parse(row.json))
|
|
|
|
};
|
|
|
|
await history.recordNextShared(crudeTranslation);
|
|
|
|
}
|
|
|
|
await db.run("DELETE FROM _gristsys_Action_step");
|
|
|
|
await db.run("DELETE FROM _gristsys_Action");
|
|
|
|
});
|
|
|
|
},
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 4. Maintain docId and ownerInstanceId in a single-row special table;
|
|
|
|
// for standalone sharing.
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_FileInfo (
|
|
|
|
id INTEGER PRIMARY KEY CHECK (id = 0),
|
|
|
|
docId TEXT DEFAULT '',
|
|
|
|
ownerInstanceId TEXT DEFAULT ''
|
|
|
|
)`);
|
|
|
|
await db.exec("INSERT INTO _gristsys_FileInfo (id) VALUES (0)");
|
|
|
|
},
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 5. Add a table to maintain per-plugin data, for plugins' Storage API.
|
|
|
|
await db.exec(`CREATE TABLE _gristsys_PluginData (
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
pluginId TEXT NOT NULL,
|
|
|
|
key TEXT NOT NULL,
|
|
|
|
value BLOB DEFAULT ''
|
|
|
|
);
|
|
|
|
CREATE UNIQUE INDEX IF NOT EXISTS _gristsys_PluginData_unique_key on _gristsys_PluginData(pluginId, key);`);
|
|
|
|
},
|
|
|
|
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 6. Migration to fix columns in user tables which have an incorrect
|
|
|
|
// DEFAULT for their Grist type, due to bug T462.
|
|
|
|
const migrationLabel = "DocStorage.docStorageSchema.migrations[v5->v6]";
|
|
|
|
|
|
|
|
const colRows: ResultRow[] = await db.all('SELECT t.tableId, c.colId, c.type ' +
|
|
|
|
'FROM _grist_Tables_column c JOIN _grist_Tables t ON c.parentId=t.id');
|
|
|
|
const docSchema = new Map<string, string>(); // Maps tableId.colId to grist type.
|
|
|
|
for (const {tableId, colId, type} of colRows) {
|
|
|
|
docSchema.set(`${tableId}.${colId}`, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fixes defaults and affected null values in a particular table.
|
|
|
|
async function _fixTable(tableId: string) {
|
|
|
|
log.debug(`${migrationLabel}: table ${tableId}`);
|
|
|
|
// This returns rows with (at least) {name, type, dflt_value}.
|
|
|
|
const infoRows: ResultRow[] = await db.all(`PRAGMA table_info(${quoteIdent(tableId)})`);
|
|
|
|
const origColSpecSql = infoRows.map(_sqlColSpec).join(', ');
|
|
|
|
|
|
|
|
// Get the column SQL for what the columns should be, and the value SQL for how to
|
|
|
|
// prepare the values to fill them in.
|
|
|
|
const fixes = infoRows.map((r) => _getInfoAndValuesSql(r, tableId));
|
|
|
|
const newColSpecSql = fixes.map(pair => pair[0]).map(_sqlColSpec).join(', ');
|
|
|
|
const valuesSql = fixes.map(pair => pair[1]).join(', ');
|
|
|
|
|
|
|
|
// Rebuild the table only if any column's SQL (e.g. DEFAULT values) have changed.
|
|
|
|
if (newColSpecSql === origColSpecSql) {
|
|
|
|
debuglog(`${migrationLabel}: ${tableId} unchanged: (${newColSpecSql})`);
|
|
|
|
} else {
|
|
|
|
debuglog(`${migrationLabel}: ${tableId} changed: (${newColSpecSql})`);
|
|
|
|
const tmpTableId = DocStorage._makeTmpTableId(tableId);
|
|
|
|
return db.runEach(
|
|
|
|
`CREATE TABLE ${quoteIdent(tmpTableId)} (${newColSpecSql})`,
|
|
|
|
`INSERT INTO ${quoteIdent(tmpTableId)} SELECT ${valuesSql} FROM ${quoteIdent(tableId)}`,
|
|
|
|
`DROP TABLE ${quoteIdent(tableId)}`,
|
|
|
|
`ALTER TABLE ${quoteIdent(tmpTableId)} RENAME TO ${quoteIdent(tableId)}`
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look up the type for a single column, and if the default changed to non-NULL, construct
|
|
|
|
// the updated column SQL and the value SQL for how to prepare values.
|
|
|
|
function _getInfoAndValuesSql(info: ResultRow, tableId: string): [ResultRow, string] {
|
|
|
|
const qColId = quoteIdent(info.name);
|
|
|
|
const gristType = docSchema.get(`${tableId}.${info.name}`);
|
|
|
|
if (gristType) {
|
|
|
|
const dflt = DocStorage._formattedDefault(gristType);
|
|
|
|
if (info.dflt_value === 'NULL' && dflt !== 'NULL') {
|
|
|
|
return [{...info, dflt_value: dflt}, `IFNULL(${qColId}, ${dflt}) as ${qColId}`];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return [info, qColId];
|
|
|
|
}
|
|
|
|
|
|
|
|
function _sqlColSpec(info: ResultRow): string {
|
|
|
|
if (info.name === 'id') { return 'id INTEGER PRIMARY KEY'; }
|
|
|
|
return DocStorage._sqlColSpecFromDBInfo(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through all user tables and fix them.
|
|
|
|
const tblRows = await db.all("SELECT name FROM sqlite_master WHERE type='table'");
|
|
|
|
for (const tblRow of tblRows) {
|
|
|
|
if (!tblRow.name.startsWith('_')) {
|
|
|
|
await _fixTable(tblRow.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
2020-11-02 15:48:47 +00:00
|
|
|
|
|
|
|
async function(db: SQLiteDB): Promise<void> {
|
|
|
|
// Storage version 7. Migration to store formulas in SQLite.
|
|
|
|
// Here, we only create empty columns for each formula column in the document. We let
|
|
|
|
// ActiveDoc, when it calculates formulas on open, detect that this migration just
|
|
|
|
// happened, and save the calculated results.
|
|
|
|
const colRows: ResultRow[] = await db.all('SELECT t.tableId, c.colId, c.type ' +
|
|
|
|
'FROM _grist_Tables_column c JOIN _grist_Tables t ON c.parentId=t.id WHERE c.isFormula');
|
|
|
|
|
|
|
|
// Go table by table.
|
|
|
|
const tableColRows = groupBy(colRows, 'tableId');
|
|
|
|
for (const tableId of Object.keys(tableColRows)) {
|
|
|
|
// There should be no columns conflicting with formula columns, but we check and skip
|
|
|
|
// them if there are.
|
|
|
|
const infoRows = await db.all(`PRAGMA table_info(${quoteIdent(tableId)})`);
|
|
|
|
const presentCols = new Set([...infoRows.map(row => row.name)]);
|
|
|
|
const newCols = tableColRows[tableId].filter(c => !presentCols.has(c.colId));
|
|
|
|
|
|
|
|
// Create all new columns.
|
|
|
|
for (const {colId, type} of newCols) {
|
|
|
|
await db.exec(`ALTER TABLE ${quoteIdent(tableId)} ` +
|
|
|
|
`ADD COLUMN ${DocStorage._columnDef(colId, type)}`);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill them in with PENDING_VALUE. This way, on first load and Calculate, they would go
|
|
|
|
// from "Loading..." to their proper value. After the migration, they should never have
|
|
|
|
// PENDING_VALUE again.
|
|
|
|
const colListSql = newCols.map(c => `${quoteIdent(c.colId)}=?`).join(', ');
|
|
|
|
const types = newCols.map(c => c.type);
|
|
|
|
const sqlParams = DocStorage._encodeColumnsToRows(types, newCols.map(c => [PENDING_VALUE]));
|
|
|
|
await db.run(`UPDATE ${quoteIdent(tableId)} SET ${colListSql}`, sqlParams[0]);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
2020-07-21 13:20:51 +00:00
|
|
|
]
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Decodes a database row object, returning a new object with decoded values. This is needed for
|
|
|
|
* Grist data, which is encoded.
|
|
|
|
*/
|
|
|
|
public static decodeRowValues(dbRow: ResultRow): any {
|
|
|
|
return _.mapObject(dbRow, val => DocStorage._decodeValue(val));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to distinguish which tables contain information about the metadata
|
|
|
|
* that docstorage needs to keep track of
|
|
|
|
*/
|
|
|
|
private static _isMetadataTable(tableId: string): boolean {
|
|
|
|
return tableId === "_grist_Tables" || tableId === "_grist_Tables_column";
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Shortcut to get the SQL default for the given Grist type.
|
|
|
|
*/
|
|
|
|
private static _formattedDefault(colType: string): any {
|
|
|
|
return gristTypes.getDefaultForType(colType, {sqlFormatted: true});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Join array of strings by prefixing each one with sep.
|
|
|
|
*/
|
|
|
|
private static _prefixJoin(sep: string, array: string[]): string {
|
|
|
|
return array.length ? sep + array.join(sep) : '';
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to make a tmp table given a tableId
|
|
|
|
*
|
|
|
|
* @param {String} tableId
|
|
|
|
* @returns {String}
|
|
|
|
*/
|
|
|
|
private static _makeTmpTableId(tableId: string): string {
|
|
|
|
return '_tmp_' + tableId;
|
|
|
|
}
|
|
|
|
|
|
|
|
private static _sqlColSpecFromDBInfo(info: ResultRow): string {
|
|
|
|
return `${quoteIdent(info.name)} ${info.type} DEFAULT ${info.dflt_value}`;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Converts an array of columns to an array of rows (suitable to use as sqlParams), encoding all
|
|
|
|
* values as needed, according to an array of Grist type strings (must be parallel to columns).
|
|
|
|
*/
|
|
|
|
private static _encodeColumnsToRows(types: string[], valueColumns: any[]): any[] {
|
|
|
|
const marshaller = new marshal.Marshaller({version: 2});
|
|
|
|
const rows = _.unzip(valueColumns);
|
|
|
|
for (const row of rows) {
|
|
|
|
for (let i = 0; i < row.length; i++) {
|
|
|
|
row[i] = DocStorage._encodeValue(marshaller, types[i], row[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rows;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Encodes a single value for storing in SQLite. Numbers and text are stored as is, but complex
|
|
|
|
* types are marshalled and stored as BLOBs. We also marshal binary data, so that for encoded
|
|
|
|
* data, all BLOBs consistently contain marshalled data.
|
|
|
|
*
|
|
|
|
* Note that SQLite may contain tables that aren't used for Grist data (e.g. attachments), for
|
|
|
|
* which such encoding/marshalling is not used, and e.g. binary data is stored to BLOBs directly.
|
|
|
|
*/
|
|
|
|
private static _encodeValue(marshaller: marshal.Marshaller, type: string, val: any): Uint8Array {
|
|
|
|
if (Array.isArray(val) || val instanceof Uint8Array || Buffer.isBuffer(val) ||
|
|
|
|
Number.isNaN(val) ||
|
|
|
|
(type === 'Bool' && (val === 0 || val === 1)) ||
|
|
|
|
(type !== 'Bool' && (val === true || val === false))) {
|
|
|
|
marshaller.marshal(val);
|
|
|
|
return marshaller.dump();
|
|
|
|
} else {
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Decodes Grist data received from SQLite; the inverse of _encodeValue().
|
|
|
|
*/
|
|
|
|
private static _decodeValue(val: any, type: string = 'Any'): any {
|
|
|
|
if (val instanceof Uint8Array || Buffer.isBuffer(val)) {
|
|
|
|
val = marshal.loads(val);
|
|
|
|
}
|
|
|
|
if (type === 'Bool' && (val === 0 || val === 1)) {
|
|
|
|
// Boolean values come in as 0/1. If the column is of type "Bool", interpret those as
|
|
|
|
// true/false (note that the data engine does this too).
|
|
|
|
return Boolean(val);
|
|
|
|
} else {
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper to return SQL snippet for column definition, using its colId and Grist type.
|
|
|
|
*/
|
|
|
|
private static _columnDef(colId: string, colType: string): string {
|
|
|
|
return `${quoteIdent(colId)} BLOB DEFAULT ${DocStorage._formattedDefault(colType)}`;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ======================================================================
|
|
|
|
// Instance fields
|
|
|
|
// ======================================================================
|
|
|
|
|
|
|
|
public docPath: string; // path to document file on disk
|
|
|
|
private _db: SQLiteDB|null; // database handle
|
|
|
|
|
|
|
|
// Maintains { tableId: { colId: gristType } } mapping for all tables, including grist metadata
|
|
|
|
// tables (obtained from auto-generated schema.js).
|
|
|
|
private _docSchema: {[tableId: string]: {[colId: string]: string}};
|
|
|
|
|
|
|
|
public constructor(public storageManager: IDocStorageManager, public docName: string) {
|
|
|
|
this.docPath = this.storageManager.getPath(docName);
|
|
|
|
this._db = null;
|
|
|
|
this._docSchema = Object.assign({}, schema.schema);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Opens an existing SQLite database and prepares it for use.
|
|
|
|
*/
|
2020-10-30 16:53:23 +00:00
|
|
|
public openFile(hooks: MigrationHooks = {}): Promise<void> {
|
2020-07-21 13:20:51 +00:00
|
|
|
// It turns out to be important to return a bluebird promise, a lot of code outside
|
|
|
|
// of DocStorage ultimately depends on this.
|
2020-10-30 16:53:23 +00:00
|
|
|
return bluebird.Promise.resolve(this._openFile(OpenMode.OPEN_EXISTING, hooks))
|
2020-07-21 13:20:51 +00:00
|
|
|
.then(() => this._initDB())
|
|
|
|
.then(() => this._updateMetadata());
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a new SQLite database. Will throw an error if the database already exists.
|
|
|
|
* After a database is created it should be initialized by applying the InitNewDoc action.
|
|
|
|
*/
|
|
|
|
public createFile(): Promise<void> {
|
|
|
|
// It turns out to be important to return a bluebird promise, a lot of code outside
|
|
|
|
// of DocStorage ultimately depends on this.
|
2020-10-30 16:53:23 +00:00
|
|
|
return bluebird.Promise.resolve(this._openFile(OpenMode.CREATE_EXCL, {}))
|
2020-07-21 13:20:51 +00:00
|
|
|
.then(() => this._initDB());
|
|
|
|
// Note that we don't call _updateMetadata() as there are no metadata tables yet anyway.
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a backup and calls cb() within a transaction. Returns the backup path. In case of
|
|
|
|
* failure, adds .backupPath property to the error object (and transaction is rolled back).
|
|
|
|
*/
|
|
|
|
public execWithBackup(cb: (db: SQLiteDB) => Promise<void>): Promise<string> {
|
|
|
|
let backupPath: string;
|
|
|
|
return this.storageManager.makeBackup(this.docName, "migrate-db")
|
|
|
|
.then((_backupPath: string) => {
|
|
|
|
backupPath = _backupPath;
|
|
|
|
log.info(`DocStorage[${this.docName}]: backup made at ${backupPath}`);
|
|
|
|
return this.execTransaction(cb);
|
|
|
|
})
|
|
|
|
.then(() => backupPath)
|
|
|
|
.catch((err: any) => {
|
|
|
|
// TODO: deal with typing for this kind of error (although nothing seems to depend
|
|
|
|
// on it yet.
|
|
|
|
err.backupPath = backupPath;
|
|
|
|
throw err;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initializes the database with proper settings.
|
|
|
|
*/
|
|
|
|
public _initDB(): Promise<void> {
|
|
|
|
// Set options for speed across multiple OSes/Filesystems.
|
|
|
|
// WAL is fast and safe (guarantees consistency accross crashes), but has disadvantages
|
|
|
|
// including generating unwanted extra files that can be tricky to deal with in renaming, etc
|
|
|
|
// the options for WAL are commented out
|
|
|
|
// Setting synchronous to OFF is the fastest method, but is not as safe, and could lead to
|
|
|
|
// a database being corrupted if the computer it is running on crashes.
|
|
|
|
// TODO: Switch setting to FULL, but don't wait for SQLite transactions to finish before
|
|
|
|
// returning responses to the user. Instead send error messages on unexpected errors.
|
|
|
|
return this._getDB().exec(
|
|
|
|
// "PRAGMA wal_autochceckpoint = 1000;" +
|
|
|
|
// "PRAGMA page_size = 4096;" +
|
|
|
|
// "PRAGMA journal_size_limit = 0;" +
|
|
|
|
// "PRAGMA journal_mode = WAL;" +
|
|
|
|
// "PRAGMA auto_vacuum = 0;" +
|
|
|
|
// "PRAGMA synchronous = NORMAL"
|
|
|
|
"PRAGMA synchronous = OFF;"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Queries the database for Grist metadata and updates this._docSchema. It extends the auto-
|
|
|
|
* generated mapping in app/common/schema.js, to all tables, as `{tableId: {colId: gristType}}`.
|
|
|
|
*/
|
|
|
|
public _updateMetadata(): Promise<void> {
|
|
|
|
return this.all('SELECT t.tableId, c.colId, c.type ' +
|
|
|
|
'FROM _grist_Tables_column c JOIN _grist_Tables t ON c.parentId=t.id')
|
|
|
|
.then((rows: ResultRow[]) => {
|
|
|
|
const s: {[key: string]: any} = {};
|
|
|
|
for (const {tableId, colId, type} of rows) {
|
|
|
|
const table = s.hasOwnProperty(tableId) ? s[tableId] : (s[tableId] = {});
|
|
|
|
table[colId] = type;
|
|
|
|
}
|
|
|
|
// Note that schema is what's imported from app/common/schema.js
|
|
|
|
this._docSchema = Object.assign(s, schema.schema);
|
|
|
|
})
|
|
|
|
.catch(err => {
|
|
|
|
// This replicates previous logic for _updateMetadata.
|
|
|
|
if (err.message.startsWith('SQLITE_ERROR: no such table')) {
|
|
|
|
err.message = `NO_METADATA_ERROR: ${this.docName} has no metadata`;
|
|
|
|
err.cause.code = 'NO_METADATA_ERROR';
|
|
|
|
}
|
|
|
|
throw err;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Closes the SQLite database.
|
|
|
|
*/
|
|
|
|
public async shutdown(): Promise<void> {
|
|
|
|
if (!this._db) {
|
|
|
|
log.debug('DocStorage shutdown (trivial) success');
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
const db = this._getDB();
|
|
|
|
this._db = null;
|
|
|
|
await db.close();
|
|
|
|
log.debug('DocStorage shutdown success');
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Attaches the file to the document.
|
|
|
|
*
|
|
|
|
* TODO: This currently does not make the attachment available to the sandbox code. This is likely
|
|
|
|
* to be needed in the future, and a suitable API will need to be provided. Note that large blobs
|
|
|
|
* would be (very?) inefficient until node-sqlite3 adds support for incremental reading from a
|
|
|
|
* blob: https://github.com/mapbox/node-sqlite3/issues/424.
|
|
|
|
*
|
|
|
|
* @param {String} sourcePath: The path of the file containing the attachment data.
|
|
|
|
* @param {String} fileIdent: The unique identifier of the file in the database. ActiveDoc uses the
|
|
|
|
* checksum of the file's contents with the original extension.
|
|
|
|
* @returns {Promise[Boolean]} True if the file got attached; false if this ident already exists.
|
|
|
|
*/
|
|
|
|
public findOrAttachFile(sourcePath: string, fileIdent: string): Promise<boolean> {
|
|
|
|
return this.execTransaction(db => {
|
|
|
|
// Try to insert a new record with the given ident. It'll fail UNIQUE constraint if exists.
|
|
|
|
return db.run('INSERT INTO _gristsys_Files (ident) VALUES (?)', fileIdent)
|
|
|
|
// Only if this succeeded, do the work of reading the file and inserting its data.
|
|
|
|
.then(() => fse.readFile(sourcePath))
|
|
|
|
.then(data =>
|
|
|
|
db.run('UPDATE _gristsys_Files SET data=? WHERE ident=?', data, fileIdent))
|
|
|
|
.then(() => true)
|
|
|
|
// If UNIQUE constraint failed, this ident must already exists, so return false.
|
|
|
|
.catch(err => {
|
|
|
|
if (/^SQLITE_CONSTRAINT: UNIQUE constraint failed/.test(err.message)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
throw err;
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reads and returns the data for the given attachment.
|
|
|
|
* @param {String} fileIdent: The unique identifier of a file, as used by findOrAttachFile.
|
|
|
|
* @returns {Promise[Buffer]} The data buffer associated with fileIdent.
|
|
|
|
*/
|
|
|
|
public getFileData(fileIdent: string): Promise<Buffer> {
|
|
|
|
return this.get('SELECT data FROM _gristsys_Files WHERE ident=?', fileIdent)
|
|
|
|
.then(row => row && row.data);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetches the given table from the database. See fetchQuery() for return value.
|
|
|
|
*/
|
|
|
|
public fetchTable(tableId: string): Promise<Buffer> {
|
|
|
|
return this.fetchQuery({tableId, filters: {}});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns as a number the next row id for the given table.
|
|
|
|
*/
|
|
|
|
public async getNextRowId(tableId: string): Promise<number> {
|
|
|
|
const colData = await this.get(`SELECT MAX(id) as maxId FROM ${quoteIdent(tableId)}`);
|
|
|
|
if (!colData) {
|
|
|
|
throw new Error(`Error in DocStorage.getNextRowId: no table ${tableId}`);
|
|
|
|
}
|
|
|
|
return colData.maxId ? colData.maxId + 1 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetches all rows of the table with the given rowIds.
|
|
|
|
*/
|
|
|
|
public async fetchActionData(tableId: string, rowIds: number[], colIds?: string[]): Promise<TableDataAction> {
|
|
|
|
const colSpec = colIds ? ['id', ...colIds].map((c) => quoteIdent(c)).join(', ') : '*';
|
|
|
|
let fullValues: TableColValues|undefined;
|
|
|
|
|
|
|
|
// There is a limit to the number of arguments that may be passed in, so fetch data in chunks.
|
|
|
|
for (const rowIdChunk of chunk(rowIds, maxSQLiteVariables)) {
|
|
|
|
const sqlArg = rowIdChunk.map(() => '?').join(',');
|
|
|
|
const marshalled: Buffer = await this._getDB().allMarshal(
|
|
|
|
`SELECT ${colSpec} FROM ${quoteIdent(tableId)} WHERE id IN (${sqlArg})`, rowIdChunk);
|
|
|
|
|
|
|
|
const colValues: TableColValues = marshal.loads(marshalled);
|
|
|
|
if (!fullValues) {
|
|
|
|
fullValues = colValues;
|
|
|
|
} else {
|
|
|
|
for (const col of Object.keys(colValues)) {
|
|
|
|
fullValues[col].push(...colValues[col]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return toTableDataAction(tableId, fullValues || {id: []}); // Return empty TableColValues if rowIds was empty.
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetches a subset of the data specified by the given query, and returns an encoded TableData
|
|
|
|
* object, which is a marshalled dict mapping column ids (including 'id') to arrays of values.
|
|
|
|
*
|
|
|
|
* This now essentially subsumes the old fetchTable() method.
|
|
|
|
* Note that text is marshalled as unicode and blobs as binary strings (used to be binary strings
|
|
|
|
* for both before 2017-11-09). This allows blobs to be used exclusively for encoding types that
|
|
|
|
* are not easily stored as sqlite's native types.
|
|
|
|
*/
|
|
|
|
public async fetchQuery(query: ExpandedQuery): Promise<Buffer> {
|
|
|
|
// Convert query to SQL.
|
|
|
|
const params: any[] = [];
|
|
|
|
const whereParts: string[] = [];
|
|
|
|
for (const colId of Object.keys(query.filters)) {
|
|
|
|
const values = query.filters[colId];
|
|
|
|
// If values is empty, "IN ()" works in SQLite (always false), but wouldn't work in Postgres.
|
|
|
|
whereParts.push(`${quoteIdent(query.tableId)}.${quoteIdent(colId)} IN (${values.map(() => '?').join(', ')})`);
|
|
|
|
params.push(...values);
|
|
|
|
}
|
|
|
|
const whereClause = whereParts.length > 0 ? `WHERE ${whereParts.join(' AND ')}` : '';
|
|
|
|
const limitClause = (typeof query.limit === 'number') ? `LIMIT ${query.limit}` : '';
|
|
|
|
const joinClauses = query.joins ? query.joins.join(' ') : '';
|
|
|
|
const selects = query.selects ? query.selects.join(', ') : '*';
|
|
|
|
const sql = `SELECT ${selects} FROM ${quoteIdent(query.tableId)} ` +
|
|
|
|
`${joinClauses} ${whereClause} ${limitClause}`;
|
|
|
|
return this._getDB().allMarshal(sql, params);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetches all tables from the database, and returns a dictionary mapping table names to encoded
|
|
|
|
* TableData objects (marshalled dicts mapping column ids to arrays of values).
|
|
|
|
*/
|
|
|
|
public fetchAllTables(): Promise<{[key: string]: Buffer|null}> {
|
|
|
|
const tables: {[key: string]: Buffer|null} = {};
|
|
|
|
return bluebird.Promise.each(
|
|
|
|
this.all("SELECT name FROM sqlite_master WHERE type='table'"),
|
|
|
|
(row: ResultRow) => {
|
|
|
|
if (!row.name.startsWith('_gristsys_')) {
|
|
|
|
return this.fetchTable(row.name)
|
|
|
|
.then(data => {
|
|
|
|
tables[row.name] = data;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.return(tables);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Unmarshals and decodes data received from db.allMarshal() method (which we added to node-sqlite3).
|
|
|
|
* The data is a dictionary mapping column ids (including 'id') to arrays of values. This should
|
|
|
|
* be used for Grist data, which is encoded. For non-Grist data, use `marshal.loads()`.
|
|
|
|
*
|
|
|
|
* Note that we do NOT use this when loading data from a document, since the whole point of
|
|
|
|
* db.allMarshal() is to pass data directly to Python data engine without parsing in Node.
|
|
|
|
*/
|
|
|
|
public decodeMarshalledData(marshalledData: Buffer | Uint8Array, tableId: string): TableColValues {
|
|
|
|
const columnValues: TableColValues = marshal.loads(marshalledData);
|
|
|
|
// Decode in-place to avoid unnecessary array creation.
|
|
|
|
for (const col of Object.keys(columnValues)) {
|
|
|
|
const type = this._getGristType(tableId, col);
|
|
|
|
const column = columnValues[col];
|
|
|
|
for (let i = 0; i < column.length; i++) {
|
|
|
|
column[i] = DocStorage._decodeValue(column[i], type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return columnValues;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Applies stored actions received from data engine to the database by converting them to SQL
|
|
|
|
* statements and executing a serialized transaction.
|
|
|
|
* @param {Array[DocAction]} docActions - Array of doc actions from DataEngine.
|
|
|
|
* @returns {Promise} - An empty promise, resolved if successfully committed to db.
|
|
|
|
*/
|
|
|
|
public async applyStoredActions(docActions: DocAction[]): Promise<void> {
|
|
|
|
debuglog('DocStorage.applyStoredActions');
|
|
|
|
|
|
|
|
return bluebird.Promise.each(docActions, (action: DocAction) => {
|
|
|
|
const actionType = action[0];
|
|
|
|
const f = (this as any)["_process_" + actionType];
|
|
|
|
if (!_.isFunction(f)) {
|
|
|
|
log.error("Unknown action: " + actionType);
|
|
|
|
} else {
|
|
|
|
return f.apply(this, action.slice(1))
|
|
|
|
.then(() => {
|
|
|
|
const tableId = action[1]; // The first argument is always tableId;
|
|
|
|
if (DocStorage._isMetadataTable(tableId) && actionType !== 'AddTable') {
|
|
|
|
// We only need to update the metadata for actions that change
|
|
|
|
// the metadata. We don't update on AddTable actions
|
|
|
|
// because the additional of a table gives no additional data
|
|
|
|
// and if we tried to update when only _grist_Tables was added
|
|
|
|
// without _grist_Tables_column, we would get an error
|
|
|
|
return this._updateMetadata();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process AddTable action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {Array[Object]} columns - List of column objects with schema attributes.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_AddTable(tableId: string, columns: any[]): Promise<void> {
|
|
|
|
const colSpecSql =
|
|
|
|
DocStorage._prefixJoin(', ',
|
2020-11-02 15:48:47 +00:00
|
|
|
columns.map(c => DocStorage._columnDef(c.id, c.type)));
|
2020-07-21 13:20:51 +00:00
|
|
|
|
|
|
|
// Every table needs an "id" column, and it should be an "integer primary key" type so that it
|
|
|
|
// serves as the alias for the SQLite built-in "rowid" column. See
|
|
|
|
// https://www.sqlite.org/lang_createtable.html#rowid for details.
|
|
|
|
const sql = `CREATE TABLE ${quoteIdent(tableId)} (id INTEGER PRIMARY KEY${colSpecSql})`;
|
|
|
|
log.debug('AddTable SQL : ' + sql);
|
|
|
|
|
|
|
|
return this.exec(sql);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process UpdateRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table Id.
|
|
|
|
* @param {String} rowId - Row Id.
|
|
|
|
* @param {Object} columnValues - Column object with keys as column names.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_UpdateRecord(tableId: string, rowId: string, columnValues: any): Promise<void> {
|
|
|
|
// Do some small preprocessing to make this look like a BulkUpdateRecord
|
|
|
|
return this._process_BulkUpdateRecord(tableId, [rowId], _.mapObject(columnValues, (val: any) => [val]));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process AddRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {Integer} rowId - Row ID.
|
|
|
|
* @param {Object} columnValues - Column object with keys as column names.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_AddRecord(tableId: string, rowId: number, columnValues: any): Promise<void> {
|
|
|
|
// Do some small preprocessing to make this look like a BulkAddRecord
|
|
|
|
return this._process_BulkAddRecord(tableId, [rowId], _.mapObject(columnValues, (val: any) => [val]));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process BulkUpdateRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table Id.
|
|
|
|
* @param {Array[String]} rowIds - List of Row Ids.
|
|
|
|
* @param {Object} columnValues - Column object with keys as column names and arrays of values.
|
|
|
|
* @returns {Promise} - Promise for SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_BulkUpdateRecord(tableId: string, rowIds: string[], columnValues: any): Promise<void> {
|
|
|
|
const cols = Object.keys(columnValues);
|
|
|
|
if (!rowIds.length || !cols.length) { return Promise.resolve(); } // Nothing to do.
|
|
|
|
|
|
|
|
const colListSql = cols.map(c => quoteIdent(c) + '=?').join(', ');
|
|
|
|
const sql = `UPDATE ${quoteIdent(tableId)} SET ${colListSql} WHERE id=?`;
|
|
|
|
|
|
|
|
const types = cols.map(c => this._getGristType(tableId, c));
|
|
|
|
const sqlParams = DocStorage._encodeColumnsToRows(types, cols.map(c => columnValues[c]).concat([rowIds]));
|
|
|
|
|
|
|
|
debuglog("DocStorage._maybeBulkUpdateRecord SQL: %s (%s rows)", sql, sqlParams.length);
|
|
|
|
return this._applyMaybeBulkUpdateOrAddSql(sql, sqlParams);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process BulkAddRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {Array[Integer]} rowIds - Array of row IDs to be inserted.
|
|
|
|
* @param {Array[Object]} columnValues - Array of column info objects.
|
|
|
|
* @returns {Promise} - Promise for SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_BulkAddRecord(tableId: string, rowIds: number[], columnValues: {[key: string]: any}): Promise<void> {
|
|
|
|
if (rowIds.length === 0) { return Promise.resolve(); } // no rows means nothing to do
|
|
|
|
|
|
|
|
const cols = Object.keys(columnValues);
|
|
|
|
const colListSql = cols.map(c => quoteIdent(c) + ', ').join('');
|
|
|
|
const placeholders = cols.map(c => '?, ').join('');
|
|
|
|
const sql = `INSERT INTO ${quoteIdent(tableId)} (${colListSql}id) VALUES (${placeholders}?)`;
|
|
|
|
|
|
|
|
const types = cols.map(c => this._getGristType(tableId, c));
|
|
|
|
const sqlParams =
|
|
|
|
DocStorage._encodeColumnsToRows(types,
|
|
|
|
cols.map(c => columnValues[c]).concat([rowIds]));
|
|
|
|
|
|
|
|
debuglog("DocStorage._maybeBulkAddRecord SQL: %s (%s rows)", sql, sqlParams.length);
|
|
|
|
return this._applyMaybeBulkUpdateOrAddSql(sql, sqlParams);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process RemoveRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {String} rowId - Row ID.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_RemoveRecord(tableId: string, rowId: string): Promise<void> {
|
|
|
|
const sql = "DELETE FROM " + quoteIdent(tableId) + " WHERE id=?";
|
|
|
|
debuglog("RemoveRecord SQL: " + sql, [rowId]);
|
|
|
|
return this.run(sql, [rowId]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process ReplaceTableData action. It is identical to BulkAddRecord, but
|
|
|
|
* deletes all data from the table first.
|
|
|
|
*/
|
|
|
|
public _process_ReplaceTableData(tableId: string, rowIds: number[], columnValues: any[]): Promise<void> {
|
|
|
|
return this.exec("DELETE FROM " + quoteIdent(tableId))
|
|
|
|
.then(() => this._process_BulkAddRecord(tableId, rowIds, columnValues));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process BulkRemoveRecord action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {Array[Integer]} rowIds - Array of row IDs to be deleted.
|
|
|
|
* @returns {Promise} - Promise for SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_BulkRemoveRecord(tableId: string, rowIds: number[]): Promise<void> {
|
|
|
|
if (rowIds.length === 0) { return Promise.resolve(); }// If we have nothing to remove, done.
|
|
|
|
|
|
|
|
const chunkSize = 10;
|
|
|
|
const preSql = "DELETE FROM " + quoteIdent(tableId) + " WHERE id IN (";
|
|
|
|
const postSql = ")";
|
|
|
|
const q = _.constant('?');
|
|
|
|
const chunkParams = _.range(chunkSize).map(q).join(',');
|
|
|
|
const numChunks = Math.floor(rowIds.length / chunkSize);
|
|
|
|
const numLeftovers = rowIds.length % chunkSize;
|
|
|
|
|
|
|
|
let chunkPromise;
|
|
|
|
|
|
|
|
if (numChunks > 0) {
|
|
|
|
debuglog("DocStorage.BulkRemoveRecord: splitting " + rowIds.length +
|
|
|
|
" deletes into chunks of size " + chunkSize);
|
|
|
|
chunkPromise = this.prepare(preSql + chunkParams + postSql)
|
|
|
|
.then(function(stmt) {
|
|
|
|
return bluebird.Promise.each(_.range(0, numChunks * chunkSize, chunkSize), function(index: number) {
|
|
|
|
debuglog("DocStorage.BulkRemoveRecord: chunk delete " + index + "-" + (index + chunkSize - 1));
|
|
|
|
return bluebird.Promise.fromCallback((cb: any) => stmt.run(rowIds.slice(index, index + chunkSize), cb));
|
|
|
|
})
|
|
|
|
.then(function() {
|
|
|
|
return bluebird.Promise.fromCallback((cb: any) => stmt.finalize(cb));
|
|
|
|
});
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
chunkPromise = Promise.resolve();
|
|
|
|
}
|
|
|
|
|
|
|
|
return chunkPromise.then(() => {
|
|
|
|
if (numLeftovers > 0) {
|
|
|
|
debuglog("DocStorage.BulkRemoveRecord: leftover delete " + (numChunks * chunkSize) + "-" + (rowIds.length - 1));
|
|
|
|
const leftoverParams = _.range(numLeftovers).map(q).join(',');
|
|
|
|
return this.run(preSql + leftoverParams + postSql,
|
|
|
|
rowIds.slice(numChunks * chunkSize, rowIds.length));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process AddColumn action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table Id.
|
|
|
|
* @param {String} colId - Column Id.
|
|
|
|
* @param {Object} colInfo - Column info object.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public async _process_AddColumn(tableId: string, colId: string, colInfo: any): Promise<void> {
|
|
|
|
await this.exec(
|
|
|
|
`ALTER TABLE ${quoteIdent(tableId)} ADD COLUMN ${DocStorage._columnDef(colId, colInfo.type)}`);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process RenameColumn action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {String} fromColId - Column ID to rename.
|
|
|
|
* @param {String} toColId - New Column ID.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public async _process_RenameColumn(tableId: string, fromColId: string, toColId: string): Promise<void> {
|
|
|
|
if (fromColId === 'id' || fromColId === 'manualSort' || tableId.startsWith('_grist')) {
|
|
|
|
throw new Error('Cannot rename internal Grist column');
|
|
|
|
}
|
2020-11-02 15:48:47 +00:00
|
|
|
await this.exec(
|
|
|
|
`ALTER TABLE ${quoteIdent(tableId)} RENAME COLUMN ${quoteIdent(fromColId)} TO ${quoteIdent(toColId)}`);
|
2020-07-21 13:20:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process ModifyColumn action.
|
|
|
|
*
|
|
|
|
* Note that this requires access to the _grist_ tables, unlike many of the other actions.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {String} colId - Column ID.
|
|
|
|
* @param {Object} colInfo - Column info object.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public async _process_ModifyColumn(tableId: string, colId: string, colInfo: any): Promise<void> {
|
|
|
|
if (!colInfo) {
|
|
|
|
log.error("ModifyColumn action called without params.");
|
|
|
|
return;
|
|
|
|
}
|
2020-11-02 15:48:47 +00:00
|
|
|
return this._alterColumn(tableId, colId, colId, colInfo.type);
|
2020-07-21 13:20:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process RemoveColumn action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @param {String} colId - Column ID to rename.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_RemoveColumn(tableId: string, colId: string): Promise<void> {
|
|
|
|
const quote = quoteIdent;
|
|
|
|
const tmpTableId = DocStorage._makeTmpTableId(tableId);
|
|
|
|
|
|
|
|
// Note that SQLite does not support easily dropping columns. To drop a column from a table, we
|
|
|
|
// need to follow the instructions at https://sqlite.org/lang_altertable.html Since we don't use
|
|
|
|
// indexes or triggers, we skip a few steps.
|
|
|
|
|
|
|
|
// This returns rows with (at least) {name, type, dflt_value}.
|
|
|
|
return this.all(`PRAGMA table_info(${quote(tableId)})`)
|
|
|
|
.then(infoRows => {
|
|
|
|
const newInfoRows = infoRows.filter(row => (row.name !== colId && row.name !== 'id'));
|
|
|
|
if (newInfoRows.length === infoRows.length) {
|
|
|
|
// Column was not found. That's ok, and happens when deleting formula column.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
const colListSql = DocStorage._prefixJoin(', ', newInfoRows.map(info => quote(info.name)));
|
|
|
|
const colSpecSql = DocStorage._prefixJoin(', ', newInfoRows.map(DocStorage._sqlColSpecFromDBInfo));
|
|
|
|
return this._getDB().runEach(
|
|
|
|
`CREATE TABLE ${quote(tmpTableId)} (id INTEGER PRIMARY KEY${colSpecSql})`,
|
|
|
|
`INSERT INTO ${quote(tmpTableId)} SELECT id${colListSql} FROM ${quote(tableId)}`,
|
|
|
|
`DROP TABLE ${quote(tableId)}`,
|
|
|
|
`ALTER TABLE ${quote(tmpTableId)} RENAME TO ${quote(tableId)}`
|
|
|
|
);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process RenameTable action.
|
|
|
|
*
|
|
|
|
* @param {string} fromTableId - Old table id
|
|
|
|
* @param {string} toTableId - New table id
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_RenameTable(fromTableId: string, toTableId: string): Promise<void> {
|
|
|
|
const sql: string[] = [];
|
|
|
|
|
|
|
|
if (fromTableId === toTableId) {
|
|
|
|
return Promise.resolve();
|
|
|
|
} else if (fromTableId.toLowerCase() === toTableId.toLowerCase()) {
|
|
|
|
const tmpTableId = DocStorage._makeTmpTableId(fromTableId);
|
|
|
|
sql.push("ALTER TABLE " + quoteIdent(fromTableId) +
|
|
|
|
" RENAME TO " + quoteIdent(tmpTableId));
|
|
|
|
fromTableId = tmpTableId;
|
|
|
|
}
|
|
|
|
|
|
|
|
sql.push("ALTER TABLE " + quoteIdent(fromTableId) +
|
|
|
|
" RENAME TO " + quoteIdent(toTableId));
|
|
|
|
|
|
|
|
log.debug("RenameTable SQL: " + sql);
|
|
|
|
return bluebird.Promise.each(sql, (stmt: string) => this.exec(stmt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to process RemoveTable action.
|
|
|
|
*
|
|
|
|
* @param {String} tableId - Table ID.
|
|
|
|
* @returns {Promise} - A promise for the SQL execution.
|
|
|
|
*/
|
|
|
|
public _process_RemoveTable(tableId: string): Promise<void> {
|
|
|
|
const sql = "DROP TABLE " + quoteIdent(tableId);
|
|
|
|
|
|
|
|
log.debug("RemoveTable SQL: " + sql);
|
|
|
|
|
|
|
|
return this.exec(sql);
|
|
|
|
}
|
|
|
|
|
|
|
|
public renameDocTo(newName: string): Promise<void> {
|
|
|
|
log.debug('DocStorage.renameDocTo: %s -> %s', this.docName, newName);
|
|
|
|
return this.shutdown()
|
|
|
|
.then(() => this.storageManager.renameDoc(this.docName, newName))
|
|
|
|
.catch(err => {
|
|
|
|
log.error("DocStorage: renameDocTo %s -> %s failed: %s", this.docName, newName, err.message);
|
|
|
|
return this.openFile()
|
|
|
|
.then(function() {
|
|
|
|
throw err;
|
|
|
|
});
|
|
|
|
})
|
|
|
|
.then(() => {
|
|
|
|
this.docName = newName;
|
|
|
|
this.docPath = this.storageManager.getPath(newName);
|
|
|
|
return this.openFile();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
public all(sql: string, ...args: any[]): Promise<ResultRow[]> {
|
|
|
|
return this._getDB().all(sql, ...args);
|
|
|
|
}
|
|
|
|
|
|
|
|
public run(sql: string, ...args: any[]): Promise<void> {
|
|
|
|
return this._markAsChanged(this._getDB().run(sql, ...args));
|
|
|
|
}
|
|
|
|
|
|
|
|
public exec(sql: string): Promise<void> {
|
|
|
|
return this._markAsChanged(this._getDB().exec(sql));
|
|
|
|
}
|
|
|
|
|
|
|
|
public prepare(sql: string, ...args: any[]): Promise<sqlite3.Statement> {
|
|
|
|
return this._getDB().prepare(sql, ...args);
|
|
|
|
}
|
|
|
|
|
|
|
|
public get(sql: string, ...args: any[]): Promise<ResultRow|undefined> {
|
|
|
|
return this._getDB().get(sql, ...args);
|
|
|
|
}
|
|
|
|
|
|
|
|
public execTransaction<T>(transx: (db1: SQLiteDB) => Promise<T>): Promise<T> {
|
|
|
|
const db = this._getDB();
|
|
|
|
return this._markAsChanged(db.execTransaction(() => transx(db)));
|
|
|
|
}
|
|
|
|
|
|
|
|
public runAndGetId(sql: string, ...params: any[]): Promise<number> {
|
|
|
|
const db = this._getDB();
|
|
|
|
return this._markAsChanged(db.runAndGetId(sql, ...params));
|
|
|
|
}
|
|
|
|
|
|
|
|
public requestVacuum(): Promise<boolean> {
|
|
|
|
const db = this._getDB();
|
|
|
|
return this._markAsChanged(db.requestVacuum());
|
|
|
|
}
|
|
|
|
|
|
|
|
public async getPluginDataItem(pluginId: string, key: string): Promise<any> {
|
|
|
|
const row = await this.get('SELECT value from _gristsys_PluginData WHERE pluginId = ? and key = ?', pluginId, key);
|
|
|
|
if (row) {
|
|
|
|
return row.value;
|
|
|
|
}
|
|
|
|
return undefined;
|
|
|
|
}
|
|
|
|
|
|
|
|
public async hasPluginDataItem(pluginId: string, key: string): Promise<any> {
|
|
|
|
const row = await this.get('SELECT value from _gristsys_PluginData WHERE pluginId=? and key=?', pluginId, key);
|
|
|
|
return typeof row !== 'undefined';
|
|
|
|
}
|
|
|
|
|
|
|
|
public setPluginDataItem(pluginId: string, key: string, value: string): Promise<void> {
|
|
|
|
return this.run('INSERT OR REPLACE into _gristsys_PluginData (pluginId, key, value) values (?, ?, ?)',
|
|
|
|
pluginId, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
public removePluginDataItem(pluginId: string, key: string): Promise<void> {
|
|
|
|
return this.run('DELETE from _gristsys_PluginData where pluginId = ? and key = ?', pluginId, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
public clearPluginDataItem(pluginId: string): Promise<void> {
|
|
|
|
return this.run('DELETE from _gristsys_PluginData where pluginId = ?', pluginId);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get a list of indexes. For use in tests.
|
|
|
|
*/
|
|
|
|
public async testGetIndexes(): Promise<IndexInfo[]> {
|
|
|
|
return this._getIndexes();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create the specified indexes if they don't already exist. Remove indexes we
|
|
|
|
* created in the past that are not listed (leaving other indexes untouched).
|
|
|
|
*/
|
|
|
|
public async updateIndexes(desiredIndexes: IndexColumns[]) {
|
|
|
|
// Find all indexes on user tables.
|
|
|
|
const indexes = await this._getIndexes();
|
|
|
|
// Keep track of indexes prior to calling this method and after the call to this method
|
|
|
|
// as two sets of "tableId.colId" strings.
|
|
|
|
const pre = new Set<string>(indexes.map(index => `${index.tableId}.${index.colId}`));
|
|
|
|
const post = new Set<string>();
|
|
|
|
for (const index of desiredIndexes) {
|
|
|
|
const idx = `${index.tableId}.${index.colId}`;
|
|
|
|
if (!pre.has(idx)) {
|
|
|
|
const name = `auto_index_${uuidv4().replace(/-/g, '_')}`;
|
|
|
|
log.debug(`DocStorage.updateIndexes: doc ${this.docName} adding index ${name} for ` +
|
|
|
|
`table ${index.tableId}, column ${index.colId}`);
|
|
|
|
await this.exec(`CREATE INDEX ${name} ON ${quoteIdent(index.tableId)}(${quoteIdent(index.colId)})`);
|
|
|
|
log.debug(`DocStorage.updateIndexes: doc ${this.docName} added index ${name} for ` +
|
|
|
|
`table ${index.tableId}, column ${index.colId}`);
|
|
|
|
}
|
|
|
|
post.add(idx);
|
|
|
|
}
|
|
|
|
for (const index of indexes) {
|
|
|
|
const idx = `${index.tableId}.${index.colId}`;
|
|
|
|
if (!post.has(idx) && index.indexId.startsWith('auto_index_')) {
|
|
|
|
log.debug(`DocStorage.updateIndexes: doc ${this.docName} dropping index ${index.indexId} for ` +
|
|
|
|
`table ${index.tableId}, column ${index.colId}`);
|
|
|
|
await this.exec(`DROP INDEX ${index.indexId}`);
|
|
|
|
log.debug(`DocStorage.updateIndexes: doc ${this.docName} dropped index ${index.indexId} for ` +
|
|
|
|
`table ${index.tableId}, column ${index.colId}`);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private async _markAsChanged<T>(promise: Promise<T>): Promise<T> {
|
|
|
|
try {
|
|
|
|
return await promise;
|
|
|
|
} finally {
|
|
|
|
this.storageManager.markAsChanged(this.docName);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a new or opens an existing SQLite database, depending on mode.
|
|
|
|
* @return {Promise<number>} Promise for user_version stored in the database.
|
|
|
|
*/
|
2020-10-30 16:53:23 +00:00
|
|
|
private async _openFile(mode: number, hooks: MigrationHooks): Promise<number> {
|
2020-07-21 13:20:51 +00:00
|
|
|
try {
|
2020-10-30 16:53:23 +00:00
|
|
|
this._db = await SQLiteDB.openDB(this.docPath, DocStorage.docStorageSchema, mode, hooks);
|
2020-07-21 13:20:51 +00:00
|
|
|
log.debug("DB %s open successfully", this.docName);
|
|
|
|
return this._db.getMigrationVersion();
|
|
|
|
} catch (err) {
|
|
|
|
log.debug("DB %s open error: %s", this.docName, err);
|
|
|
|
throw err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper for applying Bulk Update or Add Record sql
|
|
|
|
*/
|
|
|
|
private _applyMaybeBulkUpdateOrAddSql(sql: string, sqlParams: any[]): Promise<void> {
|
|
|
|
if (sqlParams.length === 1) {
|
|
|
|
return this.run(sql, sqlParams[0]);
|
|
|
|
} else {
|
|
|
|
return this.prepare(sql)
|
|
|
|
.then(function(stmt) {
|
|
|
|
return bluebird.Promise.each(sqlParams, function(param: string) {
|
|
|
|
return bluebird.Promise.fromCallback((cb: any) => stmt.run(param, cb));
|
|
|
|
})
|
|
|
|
.then(function() {
|
|
|
|
return bluebird.Promise.fromCallback((cb: any) => stmt.finalize(cb));
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Read SQLite's metadata for tableId, and generate SQL for the altered version of the table.
|
|
|
|
* @param {string} colId: Existing colId to change or delete. We'll return null if it's missing.
|
|
|
|
* @param {string} newColId: New colId.
|
|
|
|
* @param {string|null} newColType: New grist type, or null to keep unchanged.
|
|
|
|
* @return {Promise<string|null>} New table SQL, or null when nothing changed or colId is missing.
|
|
|
|
*/
|
|
|
|
private async _rebuildTableSql(tableId: string, colId: string, newColId: string,
|
|
|
|
newColType: string|null = null): Promise<RebuildResult|null> {
|
|
|
|
// This returns rows with (at least) {name, type, dflt_value}.
|
|
|
|
assert(newColId, 'newColId required');
|
|
|
|
let infoRows = await this.all(`PRAGMA table_info(${quoteIdent(tableId)})`);
|
|
|
|
|
|
|
|
// Skip "id" column, and find the column we are modifying.
|
|
|
|
infoRows = infoRows.filter(row => (row.name !== 'id'));
|
|
|
|
const colInfo = infoRows.find(info => (info.name === colId));
|
|
|
|
if (!colInfo) {
|
|
|
|
return null; // Column not found.
|
|
|
|
}
|
|
|
|
const oldDefault = colInfo.dflt_value;
|
|
|
|
const newDefault = newColType ? DocStorage._formattedDefault(newColType) : oldDefault;
|
|
|
|
const newInfo = {name: newColId, type: 'BLOB', dflt_value: newDefault};
|
|
|
|
// Check if anything actually changed, and only rebuild the table then.
|
|
|
|
if (Object.keys(newInfo).every(p => ((newInfo as any)[p] === colInfo[p]))) {
|
|
|
|
return null; // No changes.
|
|
|
|
}
|
|
|
|
Object.assign(colInfo, newInfo);
|
|
|
|
const colSpecSql = DocStorage._prefixJoin(', ', infoRows.map(DocStorage._sqlColSpecFromDBInfo));
|
|
|
|
return {
|
|
|
|
sql: `CREATE TABLE ${quoteIdent(tableId)} (id INTEGER PRIMARY KEY${colSpecSql})`,
|
|
|
|
oldDefault,
|
|
|
|
newDefault,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper to alter a table to new table SQL, which is appropriate for renaming columns, or
|
|
|
|
* changing default values for a column, i.e. changes that don't affect on-disk content in any
|
|
|
|
* way. See https://sqlite.org/lang_altertable.html.
|
|
|
|
*/
|
|
|
|
private async _alterTableSoft(tableId: string, newTableSql: string): Promise<void> {
|
|
|
|
// Procedure according to https://sqlite.org/lang_altertable.html: "appropriate for ... renaming
|
|
|
|
// columns, or adding or removing or changing default values on a column."
|
|
|
|
const row = await this.get("PRAGMA schema_version");
|
|
|
|
assert(row && row.schema_version, "Could not retrieve schema_version.");
|
|
|
|
const newSchemaVersion = row!.schema_version + 1;
|
|
|
|
const tmpTableId = DocStorage._makeTmpTableId(tableId);
|
|
|
|
await this._getDB().runEach(
|
|
|
|
"PRAGMA writable_schema=ON",
|
|
|
|
["UPDATE sqlite_master SET sql=? WHERE type='table' and name=?", [newTableSql, tableId]],
|
|
|
|
`PRAGMA schema_version=${newSchemaVersion}`,
|
|
|
|
"PRAGMA writable_schema=OFF",
|
|
|
|
// The following are not in the instructions, but are needed for SQLite to notice the
|
|
|
|
// changes for subsequent queries.
|
|
|
|
`ALTER TABLE ${quoteIdent(tableId)} RENAME TO ${quoteIdent(tmpTableId)}`,
|
|
|
|
`ALTER TABLE ${quoteIdent(tmpTableId)} RENAME TO ${quoteIdent(tableId)}`
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
private async _alterColumn(tableId: string, colId: string, newColId: string,
|
|
|
|
newColType: string|null = null): Promise<void> {
|
|
|
|
const result = await this._rebuildTableSql(tableId, colId, newColId, newColType);
|
|
|
|
if (result) {
|
|
|
|
if (result.oldDefault !== result.newDefault) {
|
|
|
|
// This isn't strictly necessary, but addresses a SQLite quirk that breaks our tests
|
|
|
|
// (although likely unnoticeable in practice): an added column has "holes" for existing
|
|
|
|
// records that show up as the default value but don't actually store that default. When
|
|
|
|
// we do the soft-alter here, those values reflect the new default, i.e. change
|
|
|
|
// unexpectedly. Setting the default values explicitly prevents this unexpected change.
|
|
|
|
const dflt = result.oldDefault;
|
|
|
|
const q = quoteIdent;
|
|
|
|
// (Note that comparison below must use "IS" rather than "=" to work for NULLs.)
|
|
|
|
await this.exec(`UPDATE ${q(tableId)} SET ${q(colId)}=${dflt} WHERE ${q(colId)} IS ${dflt}`);
|
|
|
|
}
|
|
|
|
await this._alterTableSoft(tableId, result.sql);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private _getGristType(tableId: string, colId: string): string {
|
|
|
|
return (this._docSchema[tableId] && this._docSchema[tableId][colId]) || 'Any';
|
|
|
|
}
|
|
|
|
|
|
|
|
private _getDB(): SQLiteDB {
|
|
|
|
if (!this._db) {
|
|
|
|
throw new Error("Tried to use DocStorage database before it was opened");
|
|
|
|
}
|
|
|
|
return this._db;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get a list of user indexes
|
|
|
|
*/
|
|
|
|
private async _getIndexes(): Promise<IndexInfo[]> {
|
|
|
|
// Find all indexes on user tables.
|
|
|
|
return await this.all("SELECT tbl_name as tableId, il.name as indexId, ii.name as colId " +
|
|
|
|
"FROM sqlite_master AS m, " +
|
|
|
|
"pragma_index_list(m.name) AS il, " +
|
|
|
|
"pragma_index_info(il.name) AS ii " +
|
|
|
|
"WHERE m.type='table' " +
|
|
|
|
"AND tbl_name NOT LIKE '_grist%' " +
|
|
|
|
"ORDER BY tableId, colId") as any;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
interface RebuildResult {
|
|
|
|
sql: string;
|
|
|
|
oldDefault: string;
|
|
|
|
newDefault: string;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A summary of columns a database index is covering or should cover.
|
|
|
|
export interface IndexColumns {
|
|
|
|
tableId: string; // name of table
|
|
|
|
colId: string; // column indexed (only single-column indexes supported for now)
|
|
|
|
}
|
|
|
|
|
|
|
|
// A summary of a database index, including its name.
|
|
|
|
export interface IndexInfo extends IndexColumns {
|
|
|
|
indexId: string; // name of index
|
|
|
|
}
|