2015-09-13 20:25:26 +00:00
|
|
|
///
|
2018-05-12 10:25:19 +00:00
|
|
|
/// Copyright 2015-2018 Oliver Giles
|
2015-09-13 20:25:26 +00:00
|
|
|
///
|
|
|
|
/// This file is part of Laminar
|
|
|
|
///
|
|
|
|
/// Laminar is free software: you can redistribute it and/or modify
|
|
|
|
/// it under the terms of the GNU General Public License as published by
|
|
|
|
/// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
/// (at your option) any later version.
|
|
|
|
///
|
|
|
|
/// Laminar is distributed in the hope that it will be useful,
|
|
|
|
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
/// GNU General Public License for more details.
|
|
|
|
///
|
|
|
|
/// You should have received a copy of the GNU General Public License
|
|
|
|
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
|
|
|
|
///
|
|
|
|
#include "laminar.h"
|
|
|
|
#include "server.h"
|
2015-09-19 12:36:03 +00:00
|
|
|
#include "conf.h"
|
2015-12-06 11:36:12 +00:00
|
|
|
#include "log.h"
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
#include <sys/wait.h>
|
2015-09-19 15:24:20 +00:00
|
|
|
#include <fstream>
|
2015-12-06 12:47:43 +00:00
|
|
|
#include <zlib.h>
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
#include <boost/filesystem.hpp>
|
|
|
|
namespace fs = boost::filesystem;
|
|
|
|
|
2017-07-31 05:56:58 +00:00
|
|
|
#define COMPRESS_LOG_MIN_SIZE 1024
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
#include <rapidjson/stringbuffer.h>
|
|
|
|
#include <rapidjson/writer.h>
|
|
|
|
|
|
|
|
// rapidjson::Writer with a StringBuffer is used a lot in Laminar for
|
|
|
|
// preparing JSON messages to send to Websocket clients. A small wrapper
|
|
|
|
// class here reduces verbosity later for this common use case.
|
|
|
|
class Json : public rapidjson::Writer<rapidjson::StringBuffer> {
|
|
|
|
public:
|
|
|
|
Json() : rapidjson::Writer<rapidjson::StringBuffer>(buf) { StartObject(); }
|
|
|
|
template<typename T>
|
2018-01-04 06:40:10 +00:00
|
|
|
Json& set(const char* key, T value) { String(key); Int64(value); return *this; }
|
2015-09-13 20:25:26 +00:00
|
|
|
Json& startObject(const char* key) { String(key); StartObject(); return *this; }
|
|
|
|
Json& startArray(const char* key) { String(key); StartArray(); return *this; }
|
|
|
|
const char* str() { EndObject(); return buf.GetString(); }
|
|
|
|
private:
|
|
|
|
rapidjson::StringBuffer buf;
|
|
|
|
};
|
|
|
|
template<> Json& Json::set(const char* key, const char* value) { String(key); String(value); return *this; }
|
|
|
|
template<> Json& Json::set(const char* key, std::string value) { String(key); String(value.c_str()); return *this; }
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Default values when none were supplied in $LAMINAR_CONF_FILE (/etc/laminar.conf)
|
2017-08-12 12:22:32 +00:00
|
|
|
constexpr const char* INTADDR_RPC_DEFAULT = "unix-abstract:laminar";
|
2015-09-13 20:25:26 +00:00
|
|
|
constexpr const char* INTADDR_HTTP_DEFAULT = "*:8080";
|
2015-09-19 15:24:20 +00:00
|
|
|
constexpr const char* ARCHIVE_URL_DEFAULT = "/archive";
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 05:51:46 +00:00
|
|
|
// helper for appending to boost::filesystem::path
|
|
|
|
fs::path operator+(fs::path p, const char* ext) {
|
|
|
|
std::string leaf = p.leaf().string();
|
|
|
|
leaf += ext;
|
|
|
|
return p.remove_leaf()/leaf;
|
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
typedef std::string str;
|
|
|
|
|
2015-09-19 12:36:03 +00:00
|
|
|
Laminar::Laminar() {
|
2015-09-19 15:24:20 +00:00
|
|
|
archiveUrl = ARCHIVE_URL_DEFAULT;
|
|
|
|
if(char* envArchive = getenv("LAMINAR_ARCHIVE_URL"))
|
|
|
|
archiveUrl = envArchive;
|
2017-12-09 17:03:43 +00:00
|
|
|
numKeepRunDirs = 0;
|
2015-09-19 12:36:03 +00:00
|
|
|
homeDir = getenv("LAMINAR_HOME") ?: "/var/lib/laminar";
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
db = new Database((fs::path(homeDir)/"laminar.sqlite").string().c_str());
|
|
|
|
// Prepare database for first use
|
|
|
|
// TODO: error handling
|
|
|
|
db->exec("CREATE TABLE IF NOT EXISTS builds("
|
2015-12-06 10:53:06 +00:00
|
|
|
"name TEXT, number INT UNSIGNED, node TEXT, queuedAt INT, "
|
|
|
|
"startedAt INT, completedAt INT, result INT, output TEXT, "
|
2015-12-06 12:47:43 +00:00
|
|
|
"outputLen INT, parentJob TEXT, parentBuild INT, reason TEXT, "
|
2015-12-06 10:53:06 +00:00
|
|
|
"PRIMARY KEY (name, number))");
|
|
|
|
db->exec("CREATE INDEX IF NOT EXISTS idx_completion_time ON builds("
|
|
|
|
"completedAt DESC)");
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
// retrieve the last build numbers
|
|
|
|
db->stmt("SELECT name, MAX(number) FROM builds GROUP BY name")
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<str,uint>([this](str name, uint build){
|
2015-09-13 20:25:26 +00:00
|
|
|
buildNums[name] = build;
|
|
|
|
});
|
|
|
|
|
2018-02-03 14:47:41 +00:00
|
|
|
srv = nullptr;
|
|
|
|
|
2018-04-20 09:54:39 +00:00
|
|
|
// Load configuration, may be called again in response to an inotify event
|
|
|
|
// that the configuration files have been modified
|
2015-09-13 20:25:26 +00:00
|
|
|
loadConfiguration();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::registerClient(LaminarClient* client) {
|
|
|
|
clients.insert(client);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::deregisterClient(LaminarClient* client) {
|
|
|
|
clients.erase(client);
|
|
|
|
}
|
|
|
|
|
2017-08-10 05:25:20 +00:00
|
|
|
void Laminar::registerWaiter(LaminarWaiter *waiter) {
|
|
|
|
waiters.insert(waiter);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::deregisterWaiter(LaminarWaiter *waiter) {
|
|
|
|
waiters.erase(waiter);
|
|
|
|
}
|
|
|
|
|
2017-12-21 06:19:45 +00:00
|
|
|
bool Laminar::setParam(std::string job, uint buildNum, std::string param, std::string value) {
|
2015-11-01 10:28:22 +00:00
|
|
|
if(Run* run = activeRun(job, buildNum)) {
|
|
|
|
run->params[param] = value;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-12-21 06:19:45 +00:00
|
|
|
void Laminar::populateArtifacts(Json &j, std::string job, uint num) const {
|
2015-11-01 10:28:22 +00:00
|
|
|
fs::path dir(fs::path(homeDir)/"archive"/job/std::to_string(num));
|
|
|
|
if(fs::is_directory(dir)) {
|
2017-12-21 06:19:45 +00:00
|
|
|
size_t prefixLen = (fs::path(homeDir)/"archive").string().length();
|
|
|
|
size_t scopeLen = dir.string().length();
|
2015-11-19 20:43:08 +00:00
|
|
|
for(fs::recursive_directory_iterator it(dir); it != fs::recursive_directory_iterator(); ++it) {
|
|
|
|
if(!fs::is_regular_file(*it))
|
2015-11-01 10:28:22 +00:00
|
|
|
continue;
|
|
|
|
j.StartObject();
|
2015-11-19 20:43:08 +00:00
|
|
|
j.set("url", archiveUrl + it->path().string().substr(prefixLen));
|
|
|
|
j.set("filename", it->path().string().substr(scopeLen+1));
|
2018-06-01 07:14:59 +00:00
|
|
|
j.set("size", fs::file_size(it->path()));
|
2015-11-01 10:28:22 +00:00
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::sendStatus(LaminarClient* client) {
|
|
|
|
if(client->scope.type == MonitorScope::LOG) {
|
|
|
|
// If the requested job is currently in progress
|
2015-11-01 10:28:22 +00:00
|
|
|
if(const Run* run = activeRun(client->scope.job, client->scope.num)) {
|
|
|
|
client->sendMessage(run->log.c_str());
|
2015-09-13 20:25:26 +00:00
|
|
|
} else { // it must be finished, fetch it from the database
|
2015-12-06 12:47:43 +00:00
|
|
|
db->stmt("SELECT output, outputLen FROM builds WHERE name = ? AND number = ?")
|
2015-09-13 20:25:26 +00:00
|
|
|
.bind(client->scope.job, client->scope.num)
|
2017-07-31 05:56:58 +00:00
|
|
|
.fetch<str,int>([=](str maybeZipped, unsigned long sz) {
|
2017-12-21 06:19:45 +00:00
|
|
|
str log(sz+1,'\0');
|
2017-08-12 14:32:55 +00:00
|
|
|
if(sz >= COMPRESS_LOG_MIN_SIZE) {
|
2017-12-21 06:19:45 +00:00
|
|
|
int res = ::uncompress((uint8_t*) log.data(), &sz,
|
|
|
|
(const uint8_t*) maybeZipped.data(), maybeZipped.size());
|
2017-07-31 05:56:58 +00:00
|
|
|
if(res == Z_OK)
|
|
|
|
client->sendMessage(log);
|
|
|
|
else
|
|
|
|
LLOG(ERROR, "Failed to uncompress log");
|
|
|
|
} else {
|
|
|
|
client->sendMessage(maybeZipped);
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
}
|
2015-09-26 20:54:27 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Json j;
|
|
|
|
j.set("type", "status");
|
2017-07-13 18:58:19 +00:00
|
|
|
j.set("title", getenv("LAMINAR_TITLE") ?: "Laminar");
|
2018-02-03 14:52:46 +00:00
|
|
|
j.set("time", time(nullptr));
|
2015-09-26 20:54:27 +00:00
|
|
|
j.startObject("data");
|
|
|
|
if(client->scope.type == MonitorScope::RUN) {
|
|
|
|
db->stmt("SELECT queuedAt,startedAt,completedAt, result, reason FROM builds WHERE name = ? AND number = ?")
|
2015-09-13 20:25:26 +00:00
|
|
|
.bind(client->scope.job, client->scope.num)
|
2015-09-26 20:54:27 +00:00
|
|
|
.fetch<time_t, time_t, time_t, int, std::string>([&](time_t queued, time_t started, time_t completed, int result, std::string reason) {
|
|
|
|
j.set("queued", started-queued);
|
2015-09-13 20:25:26 +00:00
|
|
|
j.set("started", started);
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set("completed", completed);
|
2015-09-13 20:25:26 +00:00
|
|
|
j.set("result", to_string(RunState(result)));
|
|
|
|
j.set("reason", reason);
|
|
|
|
});
|
2015-11-01 10:28:22 +00:00
|
|
|
if(const Run* run = activeRun(client->scope.job, client->scope.num)) {
|
|
|
|
j.set("queued", run->startedAt - run->queuedAt);
|
|
|
|
j.set("started", run->startedAt);
|
|
|
|
j.set("reason", run->reason());
|
2015-12-06 11:15:05 +00:00
|
|
|
j.set("result", to_string(RunState::RUNNING));
|
2015-11-01 10:28:22 +00:00
|
|
|
db->stmt("SELECT completedAt - startedAt FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(run->name)
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<uint>([&](uint lastRuntime){
|
2017-07-13 18:58:39 +00:00
|
|
|
j.set("etc", run->startedAt + lastRuntime);
|
2015-11-01 10:28:22 +00:00
|
|
|
});
|
|
|
|
}
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set("latestNum", int(buildNums[client->scope.job]));
|
2015-09-19 15:24:20 +00:00
|
|
|
j.startArray("artifacts");
|
2015-11-01 10:28:22 +00:00
|
|
|
populateArtifacts(j, client->scope.job, client->scope.num);
|
2015-09-19 15:24:20 +00:00
|
|
|
j.EndArray();
|
2015-09-13 20:25:26 +00:00
|
|
|
} else if(client->scope.type == MonitorScope::JOB) {
|
2018-06-01 11:51:34 +00:00
|
|
|
const uint runsPerPage = 10;
|
2015-09-13 20:25:26 +00:00
|
|
|
j.startArray("recent");
|
2018-06-01 11:51:34 +00:00
|
|
|
db->stmt("SELECT number,startedAt,completedAt,result,reason FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT ?,?")
|
|
|
|
.bind(client->scope.job, client->scope.page * runsPerPage, runsPerPage)
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<uint,time_t,time_t,int,str>([&](uint build,time_t started,time_t completed,int result,str reason){
|
2015-09-13 20:25:26 +00:00
|
|
|
j.StartObject();
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set("number", build)
|
2017-11-06 17:08:14 +00:00
|
|
|
.set("completed", completed)
|
2015-09-13 20:25:26 +00:00
|
|
|
.set("started", started)
|
|
|
|
.set("result", to_string(RunState(result)))
|
2015-09-26 20:54:27 +00:00
|
|
|
.set("reason", reason)
|
2015-09-13 20:25:26 +00:00
|
|
|
.EndObject();
|
|
|
|
});
|
|
|
|
j.EndArray();
|
2018-06-01 11:51:34 +00:00
|
|
|
db->stmt("SELECT COUNT(*) FROM builds WHERE name = ?")
|
|
|
|
.bind(client->scope.job)
|
|
|
|
.fetch<uint>([&](uint nRuns){
|
|
|
|
j.set("pages", (nRuns-1) / runsPerPage + 1);
|
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
j.startArray("running");
|
2017-12-20 07:02:12 +00:00
|
|
|
auto p = activeJobs.byJobName().equal_range(client->scope.job);
|
2015-09-13 20:25:26 +00:00
|
|
|
for(auto it = p.first; it != p.second; ++it) {
|
|
|
|
const std::shared_ptr<Run> run = *it;
|
|
|
|
j.StartObject();
|
|
|
|
j.set("number", run->build);
|
|
|
|
j.set("node", run->node->name);
|
|
|
|
j.set("started", run->startedAt);
|
2015-12-06 11:15:05 +00:00
|
|
|
j.set("result", to_string(RunState::RUNNING));
|
2015-11-01 10:34:18 +00:00
|
|
|
j.set("reason", run->reason());
|
2015-09-13 20:25:26 +00:00
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
j.EndArray();
|
2015-09-26 20:54:27 +00:00
|
|
|
int nQueued = 0;
|
2017-12-21 06:19:45 +00:00
|
|
|
for(const auto& run : queuedJobs) {
|
2015-09-13 20:25:26 +00:00
|
|
|
if (run->name == client->scope.job) {
|
2015-09-26 20:54:27 +00:00
|
|
|
nQueued++;
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set("nQueued", nQueued);
|
|
|
|
db->stmt("SELECT number,startedAt FROM builds WHERE name = ? AND result = ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(client->scope.job, int(RunState::SUCCESS))
|
|
|
|
.fetch<int,time_t>([&](int build, time_t started){
|
|
|
|
j.startObject("lastSuccess");
|
|
|
|
j.set("number", build).set("started", started);
|
|
|
|
j.EndObject();
|
|
|
|
});
|
|
|
|
db->stmt("SELECT number,startedAt FROM builds WHERE name = ? AND result <> ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(client->scope.job, int(RunState::SUCCESS))
|
|
|
|
.fetch<int,time_t>([&](int build, time_t started){
|
|
|
|
j.startObject("lastFailed");
|
|
|
|
j.set("number", build).set("started", started);
|
|
|
|
j.EndObject();
|
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
} else if(client->scope.type == MonitorScope::ALL) {
|
|
|
|
j.startArray("jobs");
|
2017-11-07 06:21:01 +00:00
|
|
|
db->stmt("SELECT name,number,startedAt,completedAt,result FROM builds GROUP BY name ORDER BY number DESC")
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<str,uint,time_t,time_t,int>([&](str name,uint number, time_t started, time_t completed, int result){
|
2015-09-13 20:25:26 +00:00
|
|
|
j.StartObject();
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set("name", name);
|
2017-11-07 06:21:01 +00:00
|
|
|
j.set("number", number);
|
|
|
|
j.set("result", to_string(RunState(result)));
|
|
|
|
j.set("started", started);
|
|
|
|
j.set("completed", completed);
|
2015-09-26 20:54:27 +00:00
|
|
|
j.startArray("tags");
|
|
|
|
for(const str& t: jobTags[name]) {
|
|
|
|
j.String(t.c_str());
|
|
|
|
}
|
|
|
|
j.EndArray();
|
|
|
|
j.EndObject();
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
j.EndArray();
|
2017-11-07 06:21:01 +00:00
|
|
|
j.startArray("running");
|
2017-12-21 06:19:45 +00:00
|
|
|
for(const auto& run : activeJobs.byStartedAt()) {
|
2017-11-07 06:21:01 +00:00
|
|
|
j.StartObject();
|
|
|
|
j.set("name", run->name);
|
|
|
|
j.set("number", run->build);
|
|
|
|
j.set("node", run->node->name);
|
|
|
|
j.set("started", run->startedAt);
|
|
|
|
j.startArray("tags");
|
|
|
|
for(const str& t: jobTags[run->name]) {
|
|
|
|
j.String(t.c_str());
|
|
|
|
}
|
|
|
|
j.EndArray();
|
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
j.EndArray();
|
2015-09-13 20:25:26 +00:00
|
|
|
} else { // Home page
|
|
|
|
j.startArray("recent");
|
2015-09-26 20:54:27 +00:00
|
|
|
db->stmt("SELECT * FROM builds ORDER BY completedAt DESC LIMIT 15")
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<str,uint,str,time_t,time_t,time_t,int>([&](str name,uint build,str node,time_t,time_t started,time_t completed,int result){
|
2015-09-13 20:25:26 +00:00
|
|
|
j.StartObject();
|
|
|
|
j.set("name", name)
|
|
|
|
.set("number", build)
|
|
|
|
.set("node", node)
|
|
|
|
.set("started", started)
|
2017-11-06 17:08:14 +00:00
|
|
|
.set("completed", completed)
|
2015-09-13 20:25:26 +00:00
|
|
|
.set("result", to_string(RunState(result)))
|
|
|
|
.EndObject();
|
|
|
|
});
|
|
|
|
j.EndArray();
|
|
|
|
j.startArray("running");
|
2017-12-21 06:19:45 +00:00
|
|
|
for(const auto& run : activeJobs.byStartedAt()) {
|
2015-09-13 20:25:26 +00:00
|
|
|
j.StartObject();
|
|
|
|
j.set("name", run->name);
|
|
|
|
j.set("number", run->build);
|
|
|
|
j.set("node", run->node->name);
|
|
|
|
j.set("started", run->startedAt);
|
2015-11-01 10:34:18 +00:00
|
|
|
db->stmt("SELECT completedAt - startedAt FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(run->name)
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<uint>([&](uint lastRuntime){
|
2017-07-13 18:58:39 +00:00
|
|
|
j.set("etc", run->startedAt + lastRuntime);
|
2015-11-01 10:34:18 +00:00
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
j.EndArray();
|
|
|
|
j.startArray("queued");
|
2017-12-21 06:19:45 +00:00
|
|
|
for(const auto& run : queuedJobs) {
|
2015-09-13 20:25:26 +00:00
|
|
|
j.StartObject();
|
|
|
|
j.set("name", run->name);
|
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
j.EndArray();
|
|
|
|
int execTotal = 0;
|
|
|
|
int execBusy = 0;
|
|
|
|
for(const auto& it : nodes) {
|
2018-04-20 11:18:10 +00:00
|
|
|
const std::shared_ptr<Node>& node = it.second;
|
|
|
|
execTotal += node->numExecutors;
|
|
|
|
execBusy += node->busyExecutors;
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
j.set("executorsTotal", execTotal);
|
|
|
|
j.set("executorsBusy", execBusy);
|
|
|
|
j.startArray("buildsPerDay");
|
|
|
|
for(int i = 6; i >= 0; --i) {
|
|
|
|
j.StartObject();
|
|
|
|
db->stmt("SELECT result, COUNT(*) FROM builds WHERE completedAt > ? AND completedAt < ? GROUP by result")
|
2017-12-21 06:19:45 +00:00
|
|
|
.bind(86400*(time(nullptr)/86400 - i), 86400*(time(nullptr)/86400 - (i-1)))
|
2015-09-13 20:25:26 +00:00
|
|
|
.fetch<int,int>([&](int result, int num){
|
|
|
|
j.set(to_string(RunState(result)).c_str(), num);
|
|
|
|
});
|
|
|
|
j.EndObject();
|
|
|
|
}
|
|
|
|
j.EndArray();
|
|
|
|
j.startObject("buildsPerJob");
|
2018-06-01 07:33:25 +00:00
|
|
|
db->stmt("SELECT name, COUNT(*) c FROM builds WHERE completedAt > ? GROUP BY name ORDER BY c DESC LIMIT 5")
|
2017-12-21 06:19:45 +00:00
|
|
|
.bind(time(nullptr) - 86400)
|
2015-09-13 20:25:26 +00:00
|
|
|
.fetch<str, int>([&](str job, int count){
|
|
|
|
j.set(job.c_str(), count);
|
|
|
|
});
|
|
|
|
j.EndObject();
|
2015-09-26 20:54:27 +00:00
|
|
|
j.startObject("timePerJob");
|
2018-06-01 07:33:25 +00:00
|
|
|
db->stmt("SELECT name, AVG(completedAt-startedAt) av FROM builds WHERE completedAt > ? GROUP BY name ORDER BY av DESC LIMIT 5")
|
2017-12-21 06:19:45 +00:00
|
|
|
.bind(time(nullptr) - 7 * 86400)
|
|
|
|
.fetch<str, uint>([&](str job, uint time){
|
2015-09-26 20:54:27 +00:00
|
|
|
j.set(job.c_str(), time);
|
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
j.EndObject();
|
2015-09-26 20:54:27 +00:00
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2015-09-26 20:54:27 +00:00
|
|
|
j.EndObject();
|
|
|
|
client->sendMessage(j.str());
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Laminar::~Laminar() {
|
|
|
|
delete db;
|
|
|
|
delete srv;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::run() {
|
2015-09-19 12:36:03 +00:00
|
|
|
const char* listen_rpc = getenv("LAMINAR_BIND_RPC") ?: INTADDR_RPC_DEFAULT;
|
|
|
|
const char* listen_http = getenv("LAMINAR_BIND_HTTP") ?: INTADDR_HTTP_DEFAULT;
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
srv = new Server(*this, listen_rpc, listen_http);
|
2018-04-06 15:04:50 +00:00
|
|
|
srv->addWatchPath(fs::path(fs::path(homeDir)/"cfg"/"nodes").string().c_str());
|
|
|
|
srv->addWatchPath(fs::path(fs::path(homeDir)/"cfg"/"jobs").string().c_str());
|
2015-09-13 20:25:26 +00:00
|
|
|
srv->start();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Laminar::stop() {
|
|
|
|
srv->stop();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Laminar::loadConfiguration() {
|
2017-12-09 17:03:43 +00:00
|
|
|
if(const char* ndirs = getenv("LAMINAR_KEEP_RUNDIRS"))
|
2017-12-21 06:19:45 +00:00
|
|
|
numKeepRunDirs = static_cast<uint>(atoi(ndirs));
|
2015-09-19 13:29:07 +00:00
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
std::set<std::string> knownNodes;
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2015-09-19 12:36:03 +00:00
|
|
|
fs::path nodeCfg = fs::path(homeDir)/"cfg"/"nodes";
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2015-09-19 12:36:03 +00:00
|
|
|
if(fs::is_directory(nodeCfg)) {
|
2015-11-19 20:43:08 +00:00
|
|
|
for(fs::directory_iterator it(nodeCfg); it != fs::directory_iterator(); ++it) {
|
2017-08-06 05:19:58 +00:00
|
|
|
if(!fs::is_regular_file(it->status()) || it->path().extension() != ".conf")
|
2015-09-19 12:36:03 +00:00
|
|
|
continue;
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2017-08-06 05:19:58 +00:00
|
|
|
StringMap conf = parseConfFile(it->path().string().c_str());
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
std::string nodeName = it->path().stem().string();
|
|
|
|
auto existingNode = nodes.find(nodeName);
|
2018-05-14 17:54:12 +00:00
|
|
|
std::shared_ptr<Node> node = existingNode == nodes.end() ? nodes.emplace(nodeName, std::shared_ptr<Node>(new Node)).first->second : existingNode->second;
|
2018-04-20 11:18:10 +00:00
|
|
|
node->name = nodeName;
|
|
|
|
node->numExecutors = conf.get<int>("EXECUTORS", 6);
|
2015-09-24 20:02:11 +00:00
|
|
|
|
|
|
|
std::string tags = conf.get<std::string>("TAGS");
|
|
|
|
if(!tags.empty()) {
|
|
|
|
std::istringstream iss(tags);
|
|
|
|
std::set<std::string> tagList;
|
2017-08-12 13:42:14 +00:00
|
|
|
std::string tag;
|
|
|
|
while(std::getline(iss, tag, ','))
|
|
|
|
tagList.insert(tag);
|
2018-04-20 11:18:10 +00:00
|
|
|
node->tags = tagList;
|
2015-09-24 20:02:11 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
knownNodes.insert(nodeName);
|
2015-09-19 12:36:03 +00:00
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
// remove any nodes whose config files disappeared.
|
|
|
|
// if there are no known nodes, take care not to remove and re-add the default node
|
|
|
|
for(auto it = nodes.begin(); it != nodes.end();) {
|
|
|
|
if((it->first == "" && knownNodes.size() == 0) || knownNodes.find(it->first) != knownNodes.end())
|
|
|
|
it++;
|
|
|
|
else
|
|
|
|
it = nodes.erase(it);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
// add a default node
|
|
|
|
if(nodes.empty()) {
|
|
|
|
std::shared_ptr<Node> node(new Node);
|
|
|
|
node->name = "";
|
|
|
|
node->numExecutors = 6;
|
|
|
|
nodes.emplace("", node);
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2015-09-24 20:02:11 +00:00
|
|
|
fs::path jobsDir = fs::path(homeDir)/"cfg"/"jobs";
|
|
|
|
if(fs::is_directory(jobsDir)) {
|
2015-11-19 20:43:08 +00:00
|
|
|
for(fs::directory_iterator it(jobsDir); it != fs::directory_iterator(); ++it) {
|
2017-08-06 05:19:58 +00:00
|
|
|
if(!fs::is_regular_file(it->status()) || it->path().extension() != ".conf")
|
2015-09-24 20:02:11 +00:00
|
|
|
continue;
|
|
|
|
|
2017-08-06 05:19:58 +00:00
|
|
|
StringMap conf = parseConfFile(it->path().string().c_str());
|
2015-09-24 20:02:11 +00:00
|
|
|
|
|
|
|
std::string tags = conf.get<std::string>("TAGS");
|
|
|
|
if(!tags.empty()) {
|
|
|
|
std::istringstream iss(tags);
|
|
|
|
std::set<std::string> tagList;
|
2017-08-12 13:42:14 +00:00
|
|
|
std::string tag;
|
|
|
|
while(std::getline(iss, tag, ','))
|
|
|
|
tagList.insert(tag);
|
2017-08-06 05:19:58 +00:00
|
|
|
jobTags[it->path().stem().string()] = tagList;
|
2015-09-24 20:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<Run> Laminar::queueJob(std::string name, ParamMap params) {
|
2017-07-31 05:51:46 +00:00
|
|
|
if(!fs::exists(fs::path(homeDir)/"cfg"/"jobs"/name+".run")) {
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(ERROR, "Non-existent job", name);
|
2015-09-13 20:25:26 +00:00
|
|
|
return nullptr;
|
2015-11-01 10:35:07 +00:00
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
std::shared_ptr<Run> run = std::make_shared<Run>();
|
|
|
|
run->name = name;
|
2017-12-21 06:19:45 +00:00
|
|
|
run->queuedAt = time(nullptr);
|
2015-09-13 20:25:26 +00:00
|
|
|
for(auto it = params.begin(); it != params.end();) {
|
|
|
|
if(it->first[0] == '=') {
|
|
|
|
if(it->first == "=parentJob") {
|
|
|
|
run->parentName = it->second;
|
|
|
|
} else if(it->first == "=parentBuild") {
|
|
|
|
run->parentBuild = atoi(it->second.c_str());
|
|
|
|
} else if(it->first == "=reason") {
|
|
|
|
run->reasonMsg = it->second;
|
|
|
|
} else {
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(ERROR, "Unknown internal job parameter", it->first);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
it = params.erase(it);
|
|
|
|
} else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
run->params = params;
|
|
|
|
queuedJobs.push_back(run);
|
|
|
|
|
|
|
|
// notify clients
|
|
|
|
Json j;
|
|
|
|
j.set("type", "job_queued")
|
|
|
|
.startObject("data")
|
|
|
|
.set("name", name)
|
|
|
|
.EndObject();
|
|
|
|
const char* msg = j.str();
|
|
|
|
for(LaminarClient* c : clients) {
|
|
|
|
if(c->scope.wantsStatus(name))
|
|
|
|
c->sendMessage(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
assignNewJobs();
|
|
|
|
return run;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Laminar::stepRun(std::shared_ptr<Run> run) {
|
|
|
|
bool complete = run->step();
|
|
|
|
if(!complete) {
|
2017-12-09 18:27:08 +00:00
|
|
|
srv->addDescriptor(run->fd, [this, run](const char* b,size_t n){
|
|
|
|
handleRunLog(run, std::string(b,n));
|
2016-07-25 11:59:45 +00:00
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
return complete;
|
|
|
|
}
|
|
|
|
|
2017-12-09 18:27:08 +00:00
|
|
|
void Laminar::handleRunLog(std::shared_ptr<Run> run, std::string s) {
|
|
|
|
run->log += s;
|
|
|
|
for(LaminarClient* c : clients) {
|
|
|
|
if(c->scope.wantsLog(run->name, run->build))
|
|
|
|
c->sendMessage(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-25 11:59:45 +00:00
|
|
|
// Reaps a zombie and steps the corresponding Run to its next state.
|
|
|
|
// Should be called on SIGCHLD
|
2018-02-24 16:53:11 +00:00
|
|
|
void Laminar::reapChildren() {
|
2015-09-13 20:25:26 +00:00
|
|
|
int ret = 0;
|
2016-07-25 11:59:45 +00:00
|
|
|
pid_t pid;
|
2017-12-21 06:19:45 +00:00
|
|
|
constexpr int bufsz = 1024;
|
|
|
|
static thread_local char buf[bufsz];
|
2016-07-25 11:59:45 +00:00
|
|
|
while((pid = waitpid(-1, &ret, WNOHANG)) > 0) {
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "Reaping", pid);
|
2017-12-20 07:02:12 +00:00
|
|
|
auto it = activeJobs.byPid().find(pid);
|
2015-09-13 20:25:26 +00:00
|
|
|
std::shared_ptr<Run> run = *it;
|
2017-12-09 18:27:08 +00:00
|
|
|
// The main event loop might schedule this SIGCHLD handler before the final
|
|
|
|
// output handler (from addDescriptor). In that case, because it keeps a
|
|
|
|
// shared_ptr to the run it would successfully add to the log output buffer,
|
|
|
|
// but by then reapAdvance would have stored the log and ensured no-one cares.
|
|
|
|
// Preempt this case by forcing a final (non-blocking) read here.
|
2017-12-21 06:19:45 +00:00
|
|
|
for(ssize_t n = read(run->fd, buf, bufsz); n > 0; n = read(run->fd, buf, bufsz)) {
|
|
|
|
handleRunLog(run, std::string(buf, static_cast<size_t>(n)));
|
2017-12-09 18:27:08 +00:00
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
bool completed = true;
|
2017-12-20 07:02:12 +00:00
|
|
|
activeJobs.byPid().modify(it, [&](std::shared_ptr<Run> run){
|
2015-09-13 20:25:26 +00:00
|
|
|
run->reaped(ret);
|
|
|
|
completed = stepRun(run);
|
|
|
|
});
|
|
|
|
if(completed)
|
|
|
|
run->complete();
|
|
|
|
}
|
2018-02-24 16:53:11 +00:00
|
|
|
|
|
|
|
assignNewJobs();
|
|
|
|
}
|
|
|
|
|
2018-04-06 15:04:50 +00:00
|
|
|
void Laminar::notifyConfigChanged()
|
|
|
|
{
|
|
|
|
loadConfiguration();
|
|
|
|
// config change may allow stuck jobs to dequeue
|
|
|
|
assignNewJobs();
|
|
|
|
}
|
|
|
|
|
2018-02-24 16:53:11 +00:00
|
|
|
void Laminar::abortAll() {
|
|
|
|
for(std::shared_ptr<Run> run : activeJobs) {
|
|
|
|
run->abort();
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2015-09-24 20:02:11 +00:00
|
|
|
bool Laminar::nodeCanQueue(const Node& node, const Run& run) const {
|
|
|
|
// if a node is too busy, it can't take the job
|
|
|
|
if(node.busyExecutors >= node.numExecutors)
|
|
|
|
return false;
|
|
|
|
|
2017-08-06 05:21:17 +00:00
|
|
|
// if the node has no tags, allow the build
|
|
|
|
if(node.tags.size() == 0)
|
2015-09-24 20:02:11 +00:00
|
|
|
return true;
|
|
|
|
|
2017-08-06 05:21:17 +00:00
|
|
|
auto it = jobTags.find(run.name);
|
|
|
|
// if the job has no tags, it cannot be run on this node
|
|
|
|
if(it == jobTags.end())
|
2015-09-24 20:02:11 +00:00
|
|
|
return false;
|
|
|
|
|
2017-08-06 05:21:17 +00:00
|
|
|
// otherwise, allow the build if job and node have a tag in common
|
2015-09-24 20:02:11 +00:00
|
|
|
for(const std::string& tag : it->second) {
|
|
|
|
if(node.tags.find(tag) != node.tags.end())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
void Laminar::assignNewJobs() {
|
|
|
|
auto it = queuedJobs.begin();
|
|
|
|
while(it != queuedJobs.end()) {
|
|
|
|
bool assigned = false;
|
|
|
|
for(auto& sn : nodes) {
|
2018-04-20 11:18:10 +00:00
|
|
|
std::shared_ptr<Node> node = sn.second;
|
2015-09-13 20:25:26 +00:00
|
|
|
std::shared_ptr<Run> run = *it;
|
2018-04-20 11:18:10 +00:00
|
|
|
if(nodeCanQueue(*node.get(), *run)) {
|
2017-08-07 05:15:35 +00:00
|
|
|
fs::path cfgDir = fs::path(homeDir)/"cfg";
|
2017-12-02 18:52:19 +00:00
|
|
|
boost::system::error_code err;
|
2017-08-07 05:15:35 +00:00
|
|
|
|
|
|
|
// create a workspace for this job if it doesn't exist
|
|
|
|
fs::path ws = fs::path(homeDir)/"run"/run->name/"workspace";
|
|
|
|
if(!fs::exists(ws)) {
|
2017-12-02 18:52:19 +00:00
|
|
|
if(!fs::create_directories(ws, err)) {
|
2017-08-07 05:15:35 +00:00
|
|
|
LLOG(ERROR, "Could not create job workspace", run->name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// prepend the workspace init script
|
|
|
|
if(fs::exists(cfgDir/"jobs"/run->name+".init"))
|
|
|
|
run->addScript((cfgDir/"jobs"/run->name+".init").string(), ws.string());
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2017-12-21 06:19:45 +00:00
|
|
|
uint buildNum = buildNums[run->name] + 1;
|
2017-08-07 05:15:35 +00:00
|
|
|
// create the run directory
|
2017-08-07 05:06:49 +00:00
|
|
|
fs::path rd = fs::path(homeDir)/"run"/run->name/std::to_string(buildNum);
|
2017-12-02 18:52:19 +00:00
|
|
|
bool createWorkdir = true;
|
|
|
|
if(fs::is_directory(rd)) {
|
|
|
|
LLOG(WARNING, "Working directory already exists, removing", rd.string());
|
|
|
|
fs::remove_all(rd, err);
|
|
|
|
if(err) {
|
|
|
|
LLOG(WARNING, "Failed to remove working directory", err.message());
|
|
|
|
createWorkdir = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(createWorkdir && !fs::create_directory(rd, err)) {
|
2017-08-07 05:06:49 +00:00
|
|
|
LLOG(ERROR, "Could not create working directory", rd.string());
|
2015-09-13 20:25:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-08-07 05:06:49 +00:00
|
|
|
run->runDir = rd.string();
|
2017-08-07 05:15:35 +00:00
|
|
|
|
2015-09-19 11:40:00 +00:00
|
|
|
// create an archive directory
|
2015-11-01 10:35:46 +00:00
|
|
|
fs::path archive = fs::path(homeDir)/"archive"/run->name/std::to_string(buildNum);
|
2015-12-06 10:37:24 +00:00
|
|
|
if(fs::is_directory(archive)) {
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(WARNING, "Archive directory already exists", archive.string());
|
2015-12-06 10:37:24 +00:00
|
|
|
} else if(!fs::create_directories(archive)) {
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(ERROR, "Could not create archive directory", archive.string());
|
2015-09-19 11:40:00 +00:00
|
|
|
break;
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
// add scripts
|
|
|
|
// global before-run script
|
|
|
|
if(fs::exists(cfgDir/"before"))
|
|
|
|
run->addScript((cfgDir/"before").string());
|
|
|
|
// per-node before-run script
|
2018-04-20 11:18:10 +00:00
|
|
|
if(fs::exists(cfgDir/"nodes"/node->name+".before"))
|
|
|
|
run->addScript((cfgDir/"nodes"/node->name+".before").string());
|
2015-09-13 20:25:26 +00:00
|
|
|
// job before-run script
|
2017-07-31 05:51:46 +00:00
|
|
|
if(fs::exists(cfgDir/"jobs"/run->name+".before"))
|
|
|
|
run->addScript((cfgDir/"jobs"/run->name+".before").string());
|
2015-09-13 20:25:26 +00:00
|
|
|
// main run script. must exist.
|
2017-07-31 05:51:46 +00:00
|
|
|
run->addScript((cfgDir/"jobs"/run->name+".run").string());
|
2015-09-13 20:25:26 +00:00
|
|
|
// job after-run script
|
2017-07-31 05:51:46 +00:00
|
|
|
if(fs::exists(cfgDir/"jobs"/run->name+".after"))
|
|
|
|
run->addScript((cfgDir/"jobs"/run->name+".after").string());
|
2015-09-13 20:25:26 +00:00
|
|
|
// per-node after-run script
|
2018-04-20 11:18:10 +00:00
|
|
|
if(fs::exists(cfgDir/"nodes"/node->name+".after"))
|
|
|
|
run->addScript((cfgDir/"nodes"/node->name+".after").string());
|
2015-09-13 20:25:26 +00:00
|
|
|
// global after-run script
|
|
|
|
if(fs::exists(cfgDir/"after"))
|
|
|
|
run->addScript((cfgDir/"after").string());
|
|
|
|
|
2015-09-19 12:55:16 +00:00
|
|
|
// add environment files
|
|
|
|
if(fs::exists(cfgDir/"env"))
|
|
|
|
run->addEnv((cfgDir/"env").string());
|
2018-04-20 11:18:10 +00:00
|
|
|
if(fs::exists(cfgDir/"nodes"/node->name+".env"))
|
|
|
|
run->addEnv((cfgDir/"nodes"/node->name+".env").string());
|
2017-07-31 05:51:46 +00:00
|
|
|
if(fs::exists(cfgDir/"jobs"/run->name+".env"))
|
|
|
|
run->addEnv((cfgDir/"jobs"/run->name+".env").string());
|
2015-09-19 12:55:16 +00:00
|
|
|
|
2018-05-12 10:25:19 +00:00
|
|
|
// add job timeout if specified
|
|
|
|
if(fs::exists(cfgDir/"jobs"/run->name+".conf")) {
|
|
|
|
int timeout = parseConfFile(fs::path(cfgDir/"jobs"/run->name+".conf").string().c_str()).get<int>("TIMEOUT", 0);
|
|
|
|
if(timeout > 0) {
|
|
|
|
// A raw pointer to run is used here so as not to have a circular reference.
|
|
|
|
// The captured raw pointer is safe because if the Run is destroyed the Promise
|
|
|
|
// will be cancelled and the callback never called.
|
|
|
|
Run* r = run.get();
|
|
|
|
r->timeout = srv->addTimeout(timeout, [r](){
|
|
|
|
r->abort();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-01 10:35:46 +00:00
|
|
|
// start the job
|
2018-04-20 11:18:10 +00:00
|
|
|
node->busyExecutors++;
|
|
|
|
run->node = node;
|
2017-12-21 06:19:45 +00:00
|
|
|
run->startedAt = time(nullptr);
|
2015-11-01 10:35:46 +00:00
|
|
|
run->laminarHome = homeDir;
|
|
|
|
run->build = buildNum;
|
|
|
|
// set the last known result if exists
|
|
|
|
db->stmt("SELECT result FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(run->name)
|
|
|
|
.fetch<int>([=](int result){
|
|
|
|
run->lastResult = RunState(result);
|
|
|
|
});
|
|
|
|
// update next build number
|
|
|
|
buildNums[run->name] = buildNum;
|
|
|
|
|
2018-04-20 11:18:10 +00:00
|
|
|
LLOG(INFO, "Queued job to node", run->name, run->build, node->name);
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
// notify clients
|
|
|
|
Json j;
|
|
|
|
j.set("type", "job_started")
|
|
|
|
.startObject("data")
|
|
|
|
.set("queueIndex", std::distance(it,queuedJobs.begin()))
|
|
|
|
.set("name", run->name)
|
2015-11-01 10:34:18 +00:00
|
|
|
.set("queued", run->startedAt - run->queuedAt)
|
2015-09-13 20:25:26 +00:00
|
|
|
.set("started", run->startedAt)
|
|
|
|
.set("number", run->build)
|
|
|
|
.set("reason", run->reason());
|
|
|
|
db->stmt("SELECT completedAt - startedAt FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1")
|
|
|
|
.bind(run->name)
|
2017-12-21 06:19:45 +00:00
|
|
|
.fetch<uint>([&](uint etc){
|
|
|
|
j.set("etc", time(nullptr) + etc);
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
j.EndObject();
|
|
|
|
const char* msg = j.str();
|
|
|
|
for(LaminarClient* c : clients) {
|
2015-11-01 10:29:06 +00:00
|
|
|
if(c->scope.wantsStatus(run->name, run->build)
|
|
|
|
// The run page also should know that another job has started
|
|
|
|
// (so maybe it can show a previously hidden "next" button).
|
|
|
|
// Hence this small hack:
|
|
|
|
|| (c->scope.type == MonitorScope::Type::RUN && c->scope.job == run->name))
|
2015-09-13 20:25:26 +00:00
|
|
|
c->sendMessage(msg);
|
|
|
|
}
|
|
|
|
|
2018-05-12 14:56:56 +00:00
|
|
|
// notify the rpc client if the start command was used
|
|
|
|
run->started.fulfiller->fulfill();
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
// setup run completion handler
|
2015-12-06 12:47:43 +00:00
|
|
|
run->notifyCompletion = [this](Run* r) { runFinished(r); };
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
// trigger the first step of the run
|
|
|
|
if(stepRun(run)) {
|
|
|
|
// should never happen
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "No steps for run");
|
2015-09-13 20:25:26 +00:00
|
|
|
run->complete();
|
|
|
|
}
|
|
|
|
|
|
|
|
assigned = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(assigned) {
|
|
|
|
activeJobs.insert(*it);
|
|
|
|
it = queuedJobs.erase(it);
|
|
|
|
} else
|
|
|
|
++it;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
2015-09-19 13:41:19 +00:00
|
|
|
|
2015-12-06 12:47:43 +00:00
|
|
|
void Laminar::runFinished(Run * r) {
|
2018-04-20 11:18:10 +00:00
|
|
|
std::shared_ptr<Node> node = r->node;
|
2015-09-19 13:41:19 +00:00
|
|
|
|
|
|
|
node->busyExecutors--;
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "Run completed", r->name, to_string(r->result));
|
2017-12-21 06:19:45 +00:00
|
|
|
time_t completedAt = time(nullptr);
|
2015-12-06 12:47:43 +00:00
|
|
|
|
|
|
|
// compress log
|
2017-07-31 05:56:58 +00:00
|
|
|
std::string maybeZipped = r->log;
|
2015-12-06 12:47:43 +00:00
|
|
|
size_t logsize = r->log.length();
|
2017-07-31 05:56:58 +00:00
|
|
|
if(r->log.length() >= COMPRESS_LOG_MIN_SIZE) {
|
|
|
|
std::string zipped(r->log.size(), '\0');
|
2017-09-22 16:00:55 +00:00
|
|
|
unsigned long zippedSize = zipped.size();
|
2017-12-21 06:19:45 +00:00
|
|
|
if(::compress((uint8_t*) zipped.data(), &zippedSize,
|
|
|
|
(const uint8_t*) r->log.data(), logsize) == Z_OK) {
|
2017-07-31 05:56:58 +00:00
|
|
|
zipped.resize(zippedSize);
|
|
|
|
std::swap(maybeZipped, zipped);
|
|
|
|
}
|
|
|
|
}
|
2015-12-06 12:47:43 +00:00
|
|
|
|
2016-07-23 15:07:33 +00:00
|
|
|
std::string reason = r->reason();
|
2015-12-06 12:47:43 +00:00
|
|
|
db->stmt("INSERT INTO builds VALUES(?,?,?,?,?,?,?,?,?,?,?,?)")
|
2015-09-19 13:41:19 +00:00
|
|
|
.bind(r->name, r->build, node->name, r->queuedAt, r->startedAt, completedAt, int(r->result),
|
2017-07-31 05:56:58 +00:00
|
|
|
maybeZipped, logsize, r->parentName, r->parentBuild, reason)
|
2015-09-19 13:41:19 +00:00
|
|
|
.exec();
|
|
|
|
|
|
|
|
// notify clients
|
|
|
|
Json j;
|
|
|
|
j.set("type", "job_completed")
|
|
|
|
.startObject("data")
|
|
|
|
.set("name", r->name)
|
|
|
|
.set("number", r->build)
|
2015-11-01 10:34:18 +00:00
|
|
|
.set("queued", r->startedAt - r->queuedAt)
|
|
|
|
.set("completed", completedAt)
|
2015-09-19 13:41:19 +00:00
|
|
|
.set("started", r->startedAt)
|
|
|
|
.set("result", to_string(r->result))
|
2015-11-01 10:28:22 +00:00
|
|
|
.set("reason", r->reason());
|
|
|
|
j.startArray("artifacts");
|
|
|
|
populateArtifacts(j, r->name, r->build);
|
|
|
|
j.EndArray();
|
|
|
|
j.EndObject();
|
2015-09-19 13:41:19 +00:00
|
|
|
const char* msg = j.str();
|
|
|
|
for(LaminarClient* c : clients) {
|
|
|
|
if(c->scope.wantsStatus(r->name, r->build))
|
|
|
|
c->sendMessage(msg);
|
|
|
|
}
|
|
|
|
|
2017-08-10 05:25:20 +00:00
|
|
|
// notify the waiters
|
|
|
|
for(LaminarWaiter* w : waiters) {
|
|
|
|
w->complete(r);
|
|
|
|
}
|
2015-09-19 13:41:19 +00:00
|
|
|
|
2017-12-09 17:03:43 +00:00
|
|
|
// erase reference to run from activeJobs
|
2017-12-20 07:02:12 +00:00
|
|
|
activeJobs.byRunPtr().erase(r);
|
2017-12-09 17:03:43 +00:00
|
|
|
|
|
|
|
// remove old run directories
|
|
|
|
// We cannot count back the number of directories to keep from the currently
|
|
|
|
// finishing job because there may well be older, still-running instances of
|
|
|
|
// this job and we don't want to delete their rundirs. So instead, check
|
|
|
|
// whether there are any more active runs of this job, and if so, count back
|
|
|
|
// from the oldest among them. If there are none, count back from the latest
|
|
|
|
// known build number of this job, which may not be that of the run that
|
|
|
|
// finished here.
|
2017-12-20 07:02:12 +00:00
|
|
|
auto it = activeJobs.byJobName().equal_range(r->name);
|
2017-12-09 17:03:43 +00:00
|
|
|
uint oldestActive = (it.first == it.second)? buildNums[r->name] : (*it.first)->build - 1;
|
2017-12-21 06:19:45 +00:00
|
|
|
for(int i = static_cast<int>(oldestActive - numKeepRunDirs); i > 0; i--) {
|
2017-12-09 17:03:43 +00:00
|
|
|
fs::path d = fs::path(homeDir)/"run"/r->name/std::to_string(i);
|
|
|
|
// Once the directory does not exist, it's probably not worth checking
|
|
|
|
// any further. 99% of the time this loop should only ever have 1 iteration
|
|
|
|
// anyway so hence this (admittedly debatable) optimization.
|
|
|
|
if(!fs::exists(d))
|
|
|
|
break;
|
|
|
|
fs::remove_all(d);
|
|
|
|
}
|
2015-09-19 13:41:19 +00:00
|
|
|
}
|
2015-09-19 15:24:20 +00:00
|
|
|
|
2017-12-29 09:14:10 +00:00
|
|
|
// Small helper function to return the full contents of a file given its path.
|
|
|
|
// It reads in the whole file into the given string reference.
|
|
|
|
// This is a terrible way to serve files (especially large ones). Hopefully
|
|
|
|
// no-one uses this function and configures their webservers appropriately.
|
|
|
|
static bool slurp(fs::path path, std::string& output) {
|
|
|
|
if(!fs::is_regular_file(path))
|
2015-09-19 15:24:20 +00:00
|
|
|
return false;
|
2017-12-29 09:14:10 +00:00
|
|
|
std::ifstream fstr(path.string());
|
2015-09-19 15:24:20 +00:00
|
|
|
fstr.seekg(0, std::ios::end);
|
2017-12-21 06:19:45 +00:00
|
|
|
ssize_t sz = fstr.tellg();
|
|
|
|
if(fstr.good()) {
|
2017-12-29 09:14:10 +00:00
|
|
|
output.resize(static_cast<size_t>(sz));
|
2015-09-19 15:24:20 +00:00
|
|
|
fstr.seekg(0);
|
2017-12-29 09:14:10 +00:00
|
|
|
fstr.read(&output[0], sz);
|
2015-09-19 15:24:20 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2017-12-29 09:14:10 +00:00
|
|
|
|
|
|
|
bool Laminar::getArtefact(std::string path, std::string& result) {
|
|
|
|
if(archiveUrl != ARCHIVE_URL_DEFAULT) {
|
|
|
|
// we shouldn't have got here. Probably an invalid link.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
fs::path file(fs::path(homeDir)/"archive"/path);
|
|
|
|
return slurp(file, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string Laminar::getCustomCss()
|
|
|
|
{
|
|
|
|
fs::path file(fs::path(homeDir)/"custom"/"style.css");
|
|
|
|
std::string result;
|
|
|
|
slurp(file, result);
|
|
|
|
return result;
|
|
|
|
}
|