gristlabs_grist-core/app/server/lib/gristSessions.ts

172 lines
6.6 KiB
TypeScript
Raw Normal View History

import session from '@gristlabs/express-session';
import {parseSubdomain} from 'app/common/gristUrls';
import {isNumber} from 'app/common/gutil';
import {RequestWithOrg} from 'app/server/lib/extractOrg';
import {GristServer} from 'app/server/lib/GristServer';
import {fromCallback} from 'app/server/lib/serverUtils';
import {Sessions} from 'app/server/lib/Sessions';
import {promisifyAll} from 'bluebird';
Improve session ID security (#1059) Follow-up of #994. This PR revises the session ID generation logic to improve security in the absence of a secure session secret. It also adds a section in the admin panel "security" section to nag system admins when GRIST_SESSION_SECRET is not set. Following is an excerpt from internal conversation. TL;DR: Grist's current implementation generates semi-secure session IDs and uses a publicly known default signing key to sign them when the environment variable GRIST_SESSION_SECRET is not set. This PR generates cryptographically secure session IDs to dismiss security concerns around an insecure signing key, and encourages system admins to configure their own signing key anyway. > The session secret is required by expressjs/session to sign its session IDs. It's designed as an extra protection against session hijacking by randomly guessing session IDs and hitting a valid one. While it is easy to encourage users to set a distinct session secret, this is unnecessary if session IDs are generated in a cryptographically secure way. As of now Grist uses version 4 UUIDs as session IDs (see app/server/lib/gristSessions.ts - it uses shortUUID.generate which invokes uuid.v4 under the hood). These contain 122 bits of entropy, technically insufficient to be considered cryptographically secure. In practice, this is never considered a real vulnerability. To compare, RSA2048 is still very commonly used in web servers, yet it only has 112 bits of security (>=128 bits = "secure", rule of thumb in cryptography). But for peace of mind I propose using crypto.getRandomValues to generate real 128-bit random values. This should render session ID signing unnecessary and hence dismiss security concerns around an insecure signing key.
2024-06-25 19:43:25 +00:00
import * as crypto from 'crypto';
import * as express from 'express';
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
import assignIn = require('lodash/assignIn');
import * as path from 'path';
export const cookieName = process.env.GRIST_SESSION_COOKIE || 'grist_sid';
export const COOKIE_MAX_AGE =
process.env.COOKIE_MAX_AGE === 'none' ? null :
isNumber(process.env.COOKIE_MAX_AGE || '') ? Number(process.env.COOKIE_MAX_AGE) :
90 * 24 * 60 * 60 * 1000; // 90 days in milliseconds
// RedisStore and SqliteStore are expected to provide a set/get interface for sessions.
export interface SessionStore {
getAsync(sid: string): Promise<any>;
setAsync(sid: string, session: any): Promise<void>;
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
close(): Promise<void>;
}
/**
*
* A V1 session. A session can be associated with a number of users.
* There may be a preferred association between users and organizations:
* specifically, if from the url we can tell that we are showing material
* for a given organization, we should pick a user that has access to that
* organization.
*
* This interface plays no role at all yet! Working on refactoring existing
* sessions step by step to get closer to this.
*
*/
export interface IGristSession {
// V1 Hosted Grist - known available users.
users: Array<{
userId?: number;
}>;
// V1 Hosted Grist - known user/org relationships.
orgs: Array<{
orgId: number;
userId: number;
}>;
}
function createSessionStoreFactory(sessionsDB: string): () => SessionStore {
if (process.env.REDIS_URL) {
// Note that ./build excludes this module from the electron build.
const RedisStore = require('connect-redis')(session);
promisifyAll(RedisStore.prototype);
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
return () => {
const store = new RedisStore({
url: process.env.REDIS_URL,
});
return assignIn(store, {
async close() {
// Quit the client, so that it doesn't attempt to reconnect (which matters for some
// tests), and so that node becomes close-able.
await fromCallback(cb => store.client.quit(cb));
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
}});
};
} else {
const SQLiteStore = require('@gristlabs/connect-sqlite3')(session);
promisifyAll(SQLiteStore.prototype);
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
return () => {
const store = new SQLiteStore({
dir: path.dirname(sessionsDB),
db: path.basename(sessionsDB), // SQLiteStore no longer appends a .db suffix.
table: 'sessions',
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
});
// In testing, and monorepo's "yarn start", session is accessed from multiple
// processes, so could hit lock failures.
// connect-sqlite3 has a concurrentDb: true flag that can be set, but
// it puts the database in WAL mode, which would have implications
// for self-hosters (a second file to think about). Instead we just
// set a busy timeout.
store.db.run('PRAGMA busy_timeout = 1000');
(core) support GRIST_WORKER_GROUP to place worker into an exclusive group Summary: In an emergency, we may want to serve certain documents with "old" workers as we fix problems. This diff adds some support for that. * Creates duplicate task definitions and services for staging and production doc workers (called grist-docs-staging2 and grist-docs-prod2), pulling from distinct docker tags (staging2 and prod2). The services are set to have zero workers until we need them. * These new workers are started with a new env variable `GRIST_WORKER_GROUP` set to `secondary`. * The `GRIST_WORKER_GROUP` variable, if set, makes the worker available to documents in the named group, and only that group. * An unauthenticated `/assign` endpoint is added to documents which, when POSTed to, checks that the doc is served by a worker in the desired group for that doc (as set manually in redis), and if not frees the doc up for reassignment. This makes it possible to move individual docs between workers without redeployments. The bash scripts added are a record of how the task definitions + services were created. The services could just have been copied manually, but the task definitions will need to be updated whenever the definitions for the main doc workers are updated, so it is worth scripting that. For example, if a certain document were to fail on a new deployment of Grist, but rolling back the full deployment wasn't practical: * Set prod2 tag in docker to desired codebase for that document * Set desired_count for grist-docs-prod2 service to non-zero * Set doc-<docid>-group for that doc in redis to secondary * Hit /api/docs/<docid>/assign to move the doc to grist-docs-prod2 (If the document needs to be reverted to a previous snapshot, that currently would need doing manually - could be made simpler, but not in scope of this diff). Test Plan: added tests Reviewers: dsagal Reviewed By: dsagal Differential Revision: https://phab.getgrist.com/D2649
2020-11-02 19:24:46 +00:00
return assignIn(store, { async close() {}});
};
}
}
export function getAllowedOrgForSessionID(sessionID: string): {org: string, host: string}|null {
if (sessionID.startsWith('c-') && sessionID.includes('@')) {
const [, org, host] = sessionID.split('@');
if (!host) { throw new Error('Invalid session ID'); }
return {org, host};
}
// Otherwise sessions start with 'g-', but we also accept older sessions without a prefix.
return null;
}
/**
* Set up Grist Sessions, either in a sqlite db or via redis.
* @param instanceRoot: path to storage area in case we need to make a sqlite db.
*/
export function initGristSessions(instanceRoot: string, server: GristServer) {
// TODO: We may need to evaluate the usage of space in the SQLite store grist-sessions.db
// since entries are created on the first get request.
const sessionsDB: string = path.join(instanceRoot, 'grist-sessions.db');
// The extra step with the creator function is used in server.js to create a new session store
// after unpausing the server.
const sessionStoreCreator = createSessionStoreFactory(sessionsDB);
const sessionStore = sessionStoreCreator();
// Use a separate session IDs for custom domains than for native ones. Because a custom domain
// cookie could be stolen (with some effort) by the custom domain's owner, we limit the damage
// by only honoring custom-domain cookies for requests to that domain.
const generateId = (req: RequestWithOrg) => {
Improve session ID security (#1059) Follow-up of #994. This PR revises the session ID generation logic to improve security in the absence of a secure session secret. It also adds a section in the admin panel "security" section to nag system admins when GRIST_SESSION_SECRET is not set. Following is an excerpt from internal conversation. TL;DR: Grist's current implementation generates semi-secure session IDs and uses a publicly known default signing key to sign them when the environment variable GRIST_SESSION_SECRET is not set. This PR generates cryptographically secure session IDs to dismiss security concerns around an insecure signing key, and encourages system admins to configure their own signing key anyway. > The session secret is required by expressjs/session to sign its session IDs. It's designed as an extra protection against session hijacking by randomly guessing session IDs and hitting a valid one. While it is easy to encourage users to set a distinct session secret, this is unnecessary if session IDs are generated in a cryptographically secure way. As of now Grist uses version 4 UUIDs as session IDs (see app/server/lib/gristSessions.ts - it uses shortUUID.generate which invokes uuid.v4 under the hood). These contain 122 bits of entropy, technically insufficient to be considered cryptographically secure. In practice, this is never considered a real vulnerability. To compare, RSA2048 is still very commonly used in web servers, yet it only has 112 bits of security (>=128 bits = "secure", rule of thumb in cryptography). But for peace of mind I propose using crypto.getRandomValues to generate real 128-bit random values. This should render session ID signing unnecessary and hence dismiss security concerns around an insecure signing key.
2024-06-25 19:43:25 +00:00
// Generate 256 bits of cryptographically random data to use as the session ID.
// This ensures security against brute-force session hijacking even without signing the session ID.
const randomNumbers = crypto.getRandomValues(new Uint8Array(32));
const uid = Buffer.from(randomNumbers).toString("hex");
return req.isCustomHost ? `c-${uid}@${req.org}@${req.get('host')}` : `g-${uid}`;
};
const sessionSecret = server.create.sessionSecret();
const sessionMiddleware = session({
secret: sessionSecret,
resave: false,
saveUninitialized: false,
name: cookieName,
requestDomain: getCookieDomain,
genid: generateId,
cookie: {
sameSite: 'lax',
// We do not initially set max-age, leaving the cookie as a
// session cookie until there's a successful login. On the
// redis back-end, the session associated with the cookie will
// persist for 24 hours if there is no successful login. Once
// there is a successful login, max-age will be set to
// COOKIE_MAX_AGE, making the cookie a persistent cookie. The
// session associated with the cookie will receive an updated
// time-to-live, so that it persists for COOKIE_MAX_AGE.
},
store: sessionStore
});
const sessions = new Sessions(sessionSecret, sessionStore);
return {sessions, sessionSecret, sessionStore, sessionMiddleware};
}
export function getCookieDomain(req: express.Request) {
const mreq = req as RequestWithOrg;
if (mreq.isCustomHost) {
// For custom hosts, omit the domain to make it a "host-only" cookie, to avoid it being
// included into subdomain requests (since we would not control all the subdomains).
return undefined;
}
const adaptDomain = process.env.GRIST_ADAPT_DOMAIN === 'true';
const fixedDomain = process.env.GRIST_SESSION_DOMAIN || process.env.GRIST_DOMAIN;
if (adaptDomain) {
const reqDomain = parseSubdomain(req.get('host'));
if (reqDomain.base) { return reqDomain.base.split(':')[0]; }
}
return fixedDomain;
}