mirror of
https://github.com/gristlabs/grist-core.git
synced 2024-10-27 20:44:07 +00:00
bcbf57d590
Summary: This uses a newer version of mocha in grist-core so that tests can be run in parallel. That allows more tests to be moved without slowing things down overall. Tests moved are venerable browser tests; only the ones that "just work" or worked without too much trouble to are moved, in order to keep the diff from growing too large. Will wrestle with more in follow up. Parallelism is at the file level, rather than the individual test. The newer version of mocha isn't needed for grist-saas repo; tests are parallelized in our internal CI by other means. I've chosen to allocate files to workers in a cruder way than our internal CI, based on initial characters rather than an automated process. The automated process would need some reworking to be compatible with mocha running in parallel mode. Test Plan: this diff was tested first on grist-core, then ported to grist-saas so saas repo history will correctly track history of moved files. Reviewers: jarek Reviewed By: jarek Subscribers: jarek Differential Revision: https://phab.getgrist.com/D3927
120 lines
4.2 KiB
JavaScript
120 lines
4.2 KiB
JavaScript
var _ = require('underscore');
|
|
var assert = require('assert');
|
|
var Chance = require('chance');
|
|
var utils = require('../utils');
|
|
var marshal = require('app/common/marshal');
|
|
|
|
/**
|
|
* This test measures the complete encoding/decoding time of several ways to serialize an array of
|
|
* data. This is intended both to choose a good serialization format, and to optimize its
|
|
* implementation. This test is supposed to work both in Node and in browsers.
|
|
*/
|
|
describe("Serialization", function() {
|
|
|
|
function marshalV0(data) {
|
|
var m = new marshal.Marshaller({stringToBuffer: true, version: 0});
|
|
m.marshal(data);
|
|
return m.dump();
|
|
}
|
|
|
|
function marshalV2(data) {
|
|
var m = new marshal.Marshaller({stringToBuffer: true, version: 2});
|
|
m.marshal(data);
|
|
return m.dump();
|
|
}
|
|
|
|
function unmarshal(buffer) {
|
|
var m = new marshal.Unmarshaller({bufferToString: true});
|
|
var value;
|
|
m.on('value', function(v) { value = v; });
|
|
m.push(buffer);
|
|
m.removeAllListeners();
|
|
return value;
|
|
}
|
|
|
|
var encoders = {
|
|
"marshal_v0": {enc: marshalV0, dec: unmarshal},
|
|
"marshal_v2": {enc: marshalV2, dec: unmarshal},
|
|
"json": {enc: JSON.stringify, dec: JSON.parse},
|
|
};
|
|
|
|
describe("correctness", function() {
|
|
var data;
|
|
before(function() {
|
|
// Generate an array of random data using the Chance module
|
|
var chance = new Chance(1274323391); // seed is arbitrary
|
|
data = {
|
|
'floats1k': chance.n(chance.floating, 1000),
|
|
'strings1k': chance.n(chance.string, 1000),
|
|
};
|
|
});
|
|
|
|
_.each(encoders, function(encoder, name) {
|
|
it(name, function() {
|
|
assert.deepEqual(encoder.dec(encoder.enc(data.floats1k)), data.floats1k);
|
|
assert.deepEqual(encoder.dec(encoder.enc(data.strings1k)), data.strings1k);
|
|
});
|
|
});
|
|
});
|
|
|
|
utils.timing.describe("timings", function() {
|
|
var data, encoded = {}, results = {};
|
|
before(function() {
|
|
this.timeout(10000);
|
|
// Generate an array of random data using the Chance module
|
|
var chance = new Chance(1274323391); // seed is arbitrary
|
|
data = {
|
|
'floats100k': chance.n(chance.floating, 100000),
|
|
'strings100k': chance.n(chance.string, 100000),
|
|
};
|
|
// And prepare an encoded version for each encoder so that we can time decoding.
|
|
_.each(data, function(values, key) {
|
|
_.each(encoders, function(encoder, name) {
|
|
encoded[key + ":" + name] = encoder.enc(values);
|
|
});
|
|
});
|
|
});
|
|
|
|
function test_encode(name, key, expectedMs) {
|
|
utils.timing.it(expectedMs, "encodes " + key + " with " + name, function() {
|
|
utils.repeat(5, encoders[name].enc, data[key]);
|
|
});
|
|
}
|
|
|
|
function test_decode(name, key, expectedMs) {
|
|
utils.timing.it(expectedMs, "decodes " + key + " with " + name, function() {
|
|
var ret = utils.repeat(5, encoders[name].dec, encoded[key + ":" + name]);
|
|
results[key + ":" + name] = ret;
|
|
});
|
|
}
|
|
|
|
after(function() {
|
|
// Verify the results of decoding tests outside the timed test case.
|
|
_.each(results, function(result, keyName) {
|
|
var key = keyName.split(":")[0];
|
|
assert.deepEqual(result, data[key], "wrong result decoding " + keyName);
|
|
});
|
|
});
|
|
|
|
// Note that these tests take quite a bit longer when running ALL tests than when running them
|
|
// separately, so the expected times are artificially inflated below to let them pass. This
|
|
// may be because memory allocation is slower due to memory fragmentation. Just running gc()
|
|
// before the tests doesn't remove the discrepancy.
|
|
// Also note that the expected time needs to be high enough for both node and browser.
|
|
test_encode('marshal_v0', 'floats100k', 1600);
|
|
test_decode('marshal_v0', 'floats100k', 600);
|
|
test_encode('marshal_v0', 'strings100k', 1000);
|
|
test_decode('marshal_v0', 'strings100k', 800);
|
|
|
|
test_encode('marshal_v2', 'floats100k', 160);
|
|
test_decode('marshal_v2', 'floats100k', 160);
|
|
test_encode('marshal_v2', 'strings100k', 1000);
|
|
test_decode('marshal_v2', 'strings100k', 800);
|
|
|
|
test_encode('json', 'floats100k', 120);
|
|
test_decode('json', 'floats100k', 120);
|
|
test_encode('json', 'strings100k', 80);
|
|
test_decode('json', 'strings100k', 80);
|
|
});
|
|
});
|