From 0384fc9a0a38b972e09e6a81a2dea8455bb5ba36 Mon Sep 17 00:00:00 2001 From: Oliver Giles Date: Fri, 13 Dec 2019 10:42:22 +0200 Subject: [PATCH] Replace nodes/tags with contexts The nodes/tags system has not been particularly successful, it's not as intuitive as it could be, and in an attempt to be a single feature to address many use cases, ends up addressing none of them particularly well. This commit replaces nodes and tags with contexts. Each job may define which context(s) the job may be associated with. Laminar will only pop the job off the waiting queue when it can be assigned to a context. A context defines an integer number of executors, which represents how many runs can be simultaneously assigned to it. A context may provide extra environment variables. Essentially, a context replaces a node, and tags are gone. You just assign jobs to contexts directly, and you can use a glob expression. This should be more intuitive. For grouping jobs in the WebUI, a separate mechanism called "groups" is provided. --- UserManual.md | 82 +++++++++++------- pkg/centos7-x86_64.sh | 2 +- pkg/centos8-x86_64.sh | 2 +- pkg/debian10-amd64.sh | 2 +- pkg/debian10-armhf.sh | 2 +- pkg/debian9-amd64.sh | 2 +- pkg/debian9-armhf.sh | 2 +- src/{node.h => context.h} | 32 ++++--- src/laminar.cpp | 175 ++++++++++++++++---------------------- src/laminar.h | 13 +-- src/resources/index.html | 4 +- src/resources/js/app.js | 38 ++++----- src/run.cpp | 19 ++--- src/run.h | 8 +- test/unit-run.cpp | 18 ++-- 15 files changed, 193 insertions(+), 208 deletions(-) rename src/{node.h => context.h} (57%) diff --git a/UserManual.md b/UserManual.md index f5b2e71..7b57a29 100644 --- a/UserManual.md +++ b/UserManual.md @@ -96,10 +96,6 @@ Laminar uses Sever Sent Events to provide a responsive, auto-updating display wi If you use a reverse proxy to host Laminar at a subfolder instead of a subdomain root, the `` needs to be updated to ensure all links point to their proper targets. This can be done by setting `LAMINAR_BASE_URL` in `/etc/laminar.conf`. -## Set the page title - -Change `LAMINAR_TITLE` in `/etc/laminar.conf` to your preferred page title. For further WebUI customization, consider using a [custom style sheet](#Customizing-the-WebUI). - ## More configuration options See the [reference section](#Service-configuration-file) @@ -439,7 +435,7 @@ echo Initializing workspace git clone git@example.com:company/project.git . ``` -**CAUTION**: By default, laminar permits multiple simultaneous runs of the same job. If a job can **modify** the workspace, this might result in inconsistent builds when simultaneous runs access the same content. This is unlikely to be an issue for nightly builds, but for SCM-triggered builds it will be. To solve this, use [nodes](#Nodes-and-Tags) to restrict simultaneous execution of jobs, or consider [flock](https://linux.die.net/man/1/flock). +**CAUTION**: By default, laminar permits multiple simultaneous runs of the same job. If a job can **modify** the workspace, this might result in inconsistent builds when simultaneous runs access the same content. This is unlikely to be an issue for nightly builds, but for SCM-triggered builds it will be. To solve this, use [contexts](#Contexts) to restrict simultaneous execution of jobs, or consider [flock](https://linux.die.net/man/1/flock). The following example uses [flock](https://linux.die.net/man/1/flock) to efficiently share a git repository workspace between multiple simultaneous builds: @@ -490,62 +486,71 @@ TIMEOUT=120 --- -# Nodes and Tags +# Contexts + +In Laminar, each run of a job is associated with a context. The context defines an integer number of *executors*, which is the amount of runs which the context will accept simultaneously. A context may also provide additional environment variables. -In Laminar, a *node* is an abstract concept allowing more fine-grained control over job execution scheduling. Each node can be defined to support an integer number of *executors*, which defines how many runs can be executed simultaneously. +Uses for this feature include limiting the amount of concurrent CPU-intensive jobs (such as compilation); and controlling access to jobs [executed remotely](#Remote-jobs). -A typical example would be to allow only a few concurrent CPU-intensive jobs (such as compilation), while simultaneously allowing many more less-intensive jobs (such as monitoring or remote jobs). To create a node named `build` with 3 executors, create the file `/var/lib/laminar/cfg/nodes/build.conf` with the following content: +If no contexts are defined, Laminar will behave as if there is a single context named "default", with `6` executors. This is a reasonable default that allows simple setups to work without any consideration of contexts. + +## Defining a context + +To create a context named "my-env" which only allows a single run at once, create `/var/lib/laminar/cfg/contexts/my-env.conf` with the content: ``` -EXECUTORS=3 +EXECUTORS=1 ``` -To associate jobs with nodes, laminar uses *tags*. Tags may be applied to nodes and jobs. If a node has tags, only jobs with a matching tag will be executed on it. If a node has no tags, it will accept any job. To tag a node, add them to `/var/lib/laminar/cfg/nodes/NODENAME.conf`: +## Associating a job with a context + +When trying to start a job, laminar will wait until the job can be matched to a context which has at least one free executor. You can define which contexts the job will associate with by setting, for example, ``` -EXECUTORS=3 -TAGS=tag1,tag2 +CONTEXTS=my-env-*,special_context ``` -To add a tag to a job, add the following to `/var/lib/laminar/cfg/jobs/JOBNAME.conf`: +in `/var/lib/laminar/cfg/jobs/JOB.conf`. For each of the patterns in the comma-separated list `CONTEXTS`, Laminar will iterate over the known contexts and associate the run with the first context with free executors. Patterns are [glob expressions](http://man7.org/linux/man-pages/man7/glob.7.html). + +If `CONTEXTS` is empty or absent (or if `JOB.conf` doesn't exist), laminar will behave as if `CONTEXTS=default` were defined. + +## Adding environment to a context + +Append desired environment variables to `/var/lib/laminar/cfg/contexts/CONTEXT_NAME.conf`: ``` -TAGS=tag2 +DUT_IP=192.168.3.2 +FOO=bar ``` -If Laminar cannot find any node configuration, it will assume a single node with 6 executors and no tags. +This environment will then be available the run script of jobs associated with this context. -## Grouping jobs with tags -Tags are also used to group jobs in the web UI. Each tag will presented as a tab in the "Jobs" page. -## Node scripts -If `/var/lib/laminar/cfg/nodes/NODENAME.before` exists, it will be executed before the run script of a job scheduled to that node. Similarly, if `/var/lib/laminar/cfg/nodes/NODENAME.after` exists, it will be executed after the run script of a job scheduled to that node. +## Grouping jobs with tags -## Node environment +Tags are also used to group jobs in the web UI. Each tag will presented as a tab in the "Jobs" page. -If `/var/lib/laminar/cfg/nodes/NODENAME.env` exists and can be parsed as a list of `KEY=VALUE` pairs, these variables will be exposed as part of the run's environment. # Remote jobs -Laminar provides no specific support, `bash`, `ssh` and possibly NFS are all you need. For example, consider two identical target devices on which test jobs can be run in parallel. You might create a [node](#Nodes-and-Tags) for each, `/var/lib/laminar/cfg/nodes/target{1,2}.conf` with a common tag: +Laminar provides no specific support, `bash`, `ssh` and possibly NFS are all you need. For example, consider two identical target devices on which test jobs can be run in parallel. You might create a [context](#Contexts) for each, `/var/lib/laminar/cfg/contexts/target{1,2}.conf`: ``` EXECUTORS=1 -TAGS=remote-target ``` -In each node's `.env` file, set the individual device's IP address: +In each context's `.env` file, set the individual device's IP address: ``` TARGET_IP=192.168.0.123 ``` -And tag the job accordingly in `/var/lib/laminar/cfg/jobs/myproject-test.conf`: +And mark the job accordingly in `/var/lib/laminar/cfg/jobs/myproject-test.conf`: ``` -TAGS=remote-target +CONTEXTS=target* ``` This means the job script `/var/lib/laminar/cfg/jobs/myproject-test.run` can be generic: @@ -581,6 +586,24 @@ EOF # Customizing the WebUI +## Organising jobs into groups + +*Groups* may be used to organise the "Jobs" page into tabs. Edit `/var/lib/laminar/cfg/groups.conf` and define the matched jobs as a [javascript regular expression](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), for example: + +``` +Builds=compile-\w+ +My Fav Jobs=^(target-foo-(build|deploy)|run-benchmarks)$ +All=.* +``` + +Changes to this file are detected immediately and will be visible on next page refresh. + +## Setting the page title + +Change `LAMINAR_TITLE` in `/etc/laminar.conf` to your preferred page title. Laminar must be restarted for this change to take effect. + +## Custom stylesheet + If it exists, the file `/var/lib/laminar/custom/style.css` will be served by laminar and may be used to change the appearance of Laminar's WebUI. This directory is also a good place to add any extra assets needed for this customization, but note that in this case you will need to serve this directory directly from your [HTTP reverse proxy](#Service-configuration) (highly recommended). @@ -614,15 +637,13 @@ Laminar will serve a job's current status as a pretty badge at the url `/badge/J ## Script execution order -When `$JOB` is triggered on `$NODE`, the following scripts (relative to `$LAMINAR_HOME/cfg`) may be executed: +When `$JOB` is triggered, the following scripts (relative to `$LAMINAR_HOME/cfg`) may be executed: - `jobs/$JOB.init` if the [workspace](#Data-sharing-and-Workspaces) did not exist - `before` -- `nodes/$NODE.before` - `jobs/$JOB.before` - `jobs/$JOB.run` - `jobs/$JOB.after` -- `nodes/$NODE.after` - `after` ## Environment variables @@ -635,13 +656,14 @@ The following variables are available in run scripts: - `LAST_RESULT` string previous run status - `WORKSPACE` path to this job's workspace - `ARCHIVE` path to this run's archive +- `CONTEXT` the context of this run In addition, `$LAMINAR_HOME/cfg/scripts` is prepended to `$PATH`. See [helper scripts](#Helper-scripts). Laminar will also export variables in the form `KEY=VALUE` found in these files: - `env` -- `nodes/$NODE.env` +- `contexts/$CONTEXT.env` - `jobs/$JOB.env` Finally, variables supplied on the command-line call to `laminarc queue`, `laminarc start` or `laminarc run` will be available. See [parameterized runs](#Parameterized-runs) diff --git a/pkg/centos7-x86_64.sh b/pkg/centos7-x86_64.sh index de3c14e..5352294 100755 --- a/pkg/centos7-x86_64.sh +++ b/pkg/centos7-x86_64.sh @@ -72,7 +72,7 @@ make %post echo Creating laminar user with home in %{_sharedstatedir}/laminar useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar -mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,nodes,scripts} +mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: %{_sharedstatedir}/laminar EOF diff --git a/pkg/centos8-x86_64.sh b/pkg/centos8-x86_64.sh index d3bd523..60b606e 100755 --- a/pkg/centos8-x86_64.sh +++ b/pkg/centos8-x86_64.sh @@ -69,7 +69,7 @@ make %post echo Creating laminar user with home in %{_sharedstatedir}/laminar useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar -mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,nodes,scripts} +mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: %{_sharedstatedir}/laminar EOF diff --git a/pkg/debian10-amd64.sh b/pkg/debian10-amd64.sh index 66e17cd..064c04a 100755 --- a/pkg/debian10-amd64.sh +++ b/pkg/debian10-amd64.sh @@ -38,7 +38,7 @@ cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar -mkdir -p /var/lib/laminar/cfg/{jobs,nodes,scripts} +mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst diff --git a/pkg/debian10-armhf.sh b/pkg/debian10-armhf.sh index fb11754..2f55933 100755 --- a/pkg/debian10-armhf.sh +++ b/pkg/debian10-armhf.sh @@ -53,7 +53,7 @@ cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar -mkdir -p /var/lib/laminar/cfg/{jobs,nodes,scripts} +mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst diff --git a/pkg/debian9-amd64.sh b/pkg/debian9-amd64.sh index 37c6c0a..7c09907 100755 --- a/pkg/debian9-amd64.sh +++ b/pkg/debian9-amd64.sh @@ -58,7 +58,7 @@ cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar -mkdir -p /var/lib/laminar/cfg/{jobs,nodes,scripts} +mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst diff --git a/pkg/debian9-armhf.sh b/pkg/debian9-armhf.sh index 68028ce..ea84a2d 100755 --- a/pkg/debian9-armhf.sh +++ b/pkg/debian9-armhf.sh @@ -78,7 +78,7 @@ cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar -mkdir -p /var/lib/laminar/cfg/{jobs,nodes,scripts} +mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst diff --git a/src/node.h b/src/context.h similarity index 57% rename from src/node.h rename to src/context.h index 342a39c..aca7588 100644 --- a/src/node.h +++ b/src/context.h @@ -1,5 +1,5 @@ /// -/// Copyright 2015 Oliver Giles +/// Copyright 2015-2019 Oliver Giles /// /// This file is part of Laminar /// @@ -16,28 +16,36 @@ /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// -#ifndef LAMINAR_NODE_H_ -#define LAMINAR_NODE_H_ +#ifndef LAMINAR_CONTEXT_H_ +#define LAMINAR_CONTEXT_H_ +#include #include #include - class Run; -// Represents a group of executors. Currently almost unnecessary POD -// abstraction, but may be enhanced in the future to support e.g. tags -class Node { +// Represents a context within which a Run will be executed. Allows applying +// a certain environment to a set of Jobs, or setting a limit on the number +// of parallel Runs +class Context { public: - Node() {} + Context() {} std::string name; int numExecutors; int busyExecutors = 0; - std::set tags; - // Attempts to queue the given run to this node. Returns true if succeeded. - bool queue(const Run& run); + bool canQueue(std::set& patterns) { + if(busyExecutors >= numExecutors) + return false; + + for(std::string pattern : patterns) { + if(fnmatch(pattern.c_str(), name.c_str(), FNM_EXTMATCH) == 0) + return true; + } + return false; + } }; -#endif // LAMINAR_NODE_H_ +#endif // LAMINAR_CONTEXT_H_ diff --git a/src/laminar.cpp b/src/laminar.cpp index 740d618..5097bb4 100644 --- a/src/laminar.cpp +++ b/src/laminar.cpp @@ -76,6 +76,11 @@ Laminar::Laminar(Server &server, Settings settings) : { LASSERT(settings.home[0] == '/'); + if(fsHome->exists(homePath/"cfg"/"nodes")) { + LLOG(ERROR, "Found node configuration directory cfg/nodes. Nodes have been deprecated, please migrate to contexts. Laminar will now exit."); + exit(EXIT_FAILURE); + } + archiveUrl = settings.archive_url; if(archiveUrl.back() != '/') archiveUrl.append("/"); @@ -104,8 +109,9 @@ Laminar::Laminar(Server &server, Settings settings) : loadConfiguration(); // config change may allow stuck jobs to dequeue assignNewJobs(); - }).addPath((homePath/"cfg"/"nodes").toString(true).cStr()) - .addPath((homePath/"cfg"/"jobs").toString(true).cStr()); + }).addPath((homePath/"cfg"/"contexts").toString(true).cStr()) + .addPath((homePath/"cfg"/"jobs").toString(true).cStr()) + .addPath((homePath/"cfg").toString(true).cStr()); // for groups.conf srv.listenRpc(*rpc, settings.bind_rpc); srv.listenHttp(*http, settings.bind_http); @@ -283,7 +289,7 @@ std::string Laminar::getStatus(MonitorScope scope) { const std::shared_ptr run = *it; j.StartObject(); j.set("number", run->build); - j.set("node", run->node->name); + j.set("context", run->context->name); j.set("started", run->startedAt); j.set("result", to_string(RunState::RUNNING)); j.set("reason", run->reason()); @@ -322,11 +328,6 @@ std::string Laminar::getStatus(MonitorScope scope) { j.set("result", to_string(RunState(result))); j.set("started", started); j.set("completed", completed); - j.startArray("tags"); - for(const str& t: jobTags[name]) { - j.String(t.c_str()); - } - j.EndArray(); j.EndObject(); }); j.EndArray(); @@ -335,24 +336,23 @@ std::string Laminar::getStatus(MonitorScope scope) { j.StartObject(); j.set("name", run->name); j.set("number", run->build); - j.set("node", run->node->name); + j.set("context", run->context->name); j.set("started", run->startedAt); - j.startArray("tags"); - for(const str& t: jobTags[run->name]) { - j.String(t.c_str()); - } - j.EndArray(); j.EndObject(); } j.EndArray(); + j.startObject("groups"); + for(const auto& group : jobGroups) + j.set(group.first.c_str(), group.second); + j.EndObject(); } else { // Home page j.startArray("recent"); db->stmt("SELECT * FROM builds ORDER BY completedAt DESC LIMIT 15") - .fetch([&](str name,uint build,str node,time_t,time_t started,time_t completed,int result){ + .fetch([&](str name,uint build,str context,time_t,time_t started,time_t completed,int result){ j.StartObject(); j.set("name", name) .set("number", build) - .set("node", node) + .set("context", context) .set("started", started) .set("completed", completed) .set("result", to_string(RunState(result))) @@ -364,7 +364,7 @@ std::string Laminar::getStatus(MonitorScope scope) { j.StartObject(); j.set("name", run->name); j.set("number", run->build); - j.set("node", run->node->name); + j.set("context", run->context->name); j.set("started", run->startedAt); db->stmt("SELECT completedAt - startedAt FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1") .bind(run->name) @@ -383,10 +383,10 @@ std::string Laminar::getStatus(MonitorScope scope) { j.EndArray(); int execTotal = 0; int execBusy = 0; - for(const auto& it : nodes) { - const std::shared_ptr& node = it.second; - execTotal += node->numExecutors; - execBusy += node->busyExecutors; + for(const auto& it : contexts) { + const std::shared_ptr& context = it.second; + execTotal += context->numExecutors; + execBusy += context->busyExecutors; } j.set("executorsTotal", execTotal); j.set("executorsBusy", execBusy); @@ -488,51 +488,41 @@ bool Laminar::loadConfiguration() { if(const char* ndirs = getenv("LAMINAR_KEEP_RUNDIRS")) numKeepRunDirs = static_cast(atoi(ndirs)); - std::set knownNodes; + std::set knownContexts; - KJ_IF_MAYBE(nodeDir, fsHome->tryOpenSubdir(kj::Path{"cfg","nodes"})) { - for(kj::Directory::Entry& entry : (*nodeDir)->listEntries()) { + KJ_IF_MAYBE(contextsDir, fsHome->tryOpenSubdir(kj::Path{"cfg","contexts"})) { + for(kj::Directory::Entry& entry : (*contextsDir)->listEntries()) { if(!entry.name.endsWith(".conf")) continue; - StringMap conf = parseConfFile((homePath/"cfg"/"nodes"/entry.name).toString(true).cStr()); - - std::string nodeName(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); - auto existingNode = nodes.find(nodeName); - std::shared_ptr node = existingNode == nodes.end() ? nodes.emplace(nodeName, std::shared_ptr(new Node)).first->second : existingNode->second; - node->name = nodeName; - node->numExecutors = conf.get("EXECUTORS", 6); - - std::string tagString = conf.get("TAGS"); - std::set tagList; - if(!tagString.empty()) { - std::istringstream iss(tagString); - std::string tag; - while(std::getline(iss, tag, ',')) - tagList.insert(tag); - } - std::swap(node->tags, tagList); + StringMap conf = parseConfFile((homePath/"cfg"/"contexts"/entry.name).toString(true).cStr()); - knownNodes.insert(nodeName); + std::string name(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); + auto existing = contexts.find(name); + std::shared_ptr context = existing == contexts.end() ? contexts.emplace(name, std::shared_ptr(new Context)).first->second : existing->second; + context->name = name; + context->numExecutors = conf.get("EXECUTORS", 6); + + knownContexts.insert(name); } } - // remove any nodes whose config files disappeared. - // if there are no known nodes, take care not to remove and re-add the default node - for(auto it = nodes.begin(); it != nodes.end();) { - if((it->first == "" && knownNodes.size() == 0) || knownNodes.find(it->first) != knownNodes.end()) + // remove any contexts whose config files disappeared. + // if there are no known contexts, take care not to remove and re-add the default context. + for(auto it = contexts.begin(); it != contexts.end();) { + if((it->first == "default" && knownContexts.size() == 0) || knownContexts.find(it->first) != knownContexts.end()) it++; else - it = nodes.erase(it); + it = contexts.erase(it); } - // add a default node - if(nodes.empty()) { - LLOG(INFO, "Creating a default node with 6 executors"); - std::shared_ptr node(new Node); - node->name = ""; - node->numExecutors = 6; - nodes.emplace("", node); + // add a default context + if(contexts.empty()) { + LLOG(INFO, "Creating a default context with 6 executors"); + std::shared_ptr context(new Context); + context->name = "default"; + context->numExecutors = 6; + contexts.emplace("default", context); } KJ_IF_MAYBE(jobsDir, fsHome->tryOpenSubdir(kj::Path{"cfg","jobs"})) { @@ -543,19 +533,25 @@ bool Laminar::loadConfiguration() { std::string jobName(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); - std::string tags = conf.get("TAGS"); - if(!tags.empty()) { - std::istringstream iss(tags); - std::set tagList; - std::string tag; - while(std::getline(iss, tag, ',')) - tagList.insert(tag); - jobTags[jobName] = tagList; - } + std::string ctxPtns = conf.get("CONTEXTS"); + if(!ctxPtns.empty()) { + std::istringstream iss(ctxPtns); + std::set ctxPtnList; + std::string ctx; + while(std::getline(iss, ctx, ',')) + ctxPtnList.insert(ctx); + jobContexts[jobName].swap(ctxPtnList); + } } } + jobGroups.clear(); + KJ_IF_MAYBE(groupsConf, fsHome->tryOpenFile(kj::Path{"cfg","groups.conf"})) + jobGroups = parseConfFile((homePath/"cfg"/"groups.conf").toString(true).cStr()); + if(jobGroups.empty()) + jobGroups["All Jobs"] = ".*"; + return true; } @@ -565,6 +561,10 @@ std::shared_ptr Laminar::queueJob(std::string name, ParamMap params) { return nullptr; } + // If the job has no contexts (maybe there is no .conf file at all), add the default context + if(jobContexts[name].empty()) + jobContexts.at(name).insert("default"); + std::shared_ptr run = std::make_shared(name, kj::mv(params), homePath.clone()); queuedJobs.push_back(run); @@ -594,35 +594,12 @@ void Laminar::abortAll() { } } -bool Laminar::nodeCanQueue(const Node& node, std::string jobName) const { - // if a node is too busy, it can't take the job - if(node.busyExecutors >= node.numExecutors) - return false; - - // if the node has no tags, allow the build - if(node.tags.size() == 0) - return true; - - auto it = jobTags.find(jobName); - // if the job has no tags, it cannot be run on this node - if(it == jobTags.end()) - return false; - - // otherwise, allow the build if job and node have a tag in common - for(const std::string& tag : it->second) { - if(node.tags.find(tag) != node.tags.end()) - return true; - } - - return false; -} - bool Laminar::tryStartRun(std::shared_ptr run, int queueIndex) { - for(auto& sn : nodes) { - std::shared_ptr node = sn.second; + for(auto& sc : contexts) { + std::shared_ptr ctx = sc.second; - if(nodeCanQueue(*node.get(), run->name) && run->configure(buildNums[run->name] + 1, node, *fsHome)) { - node->busyExecutors++; + if(ctx->canQueue(jobContexts.at(run->name)) && run->configure(buildNums[run->name] + 1, ctx, *fsHome)) { + ctx->busyExecutors++; // set the last known result if exists db->stmt("SELECT result FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1") .bind(run->name) @@ -640,7 +617,7 @@ bool Laminar::tryStartRun(std::shared_ptr run, int queueIndex) { })); } srv.addTask(kj::mv(exec)); - LLOG(INFO, "Started job on node", run->name, run->build, node->name); + LLOG(INFO, "Started job", run->name, run->build, ctx->name); // update next build number buildNums[run->name]++; @@ -660,11 +637,6 @@ bool Laminar::tryStartRun(std::shared_ptr run, int queueIndex) { .fetch([&](uint etc){ j.set("etc", time(nullptr) + etc); }); - j.startArray("tags"); - for(const str& t: jobTags[run->name]) { - j.String(t.c_str()); - } - j.EndArray(); j.EndObject(); http->notifyEvent(j.str(), run->name.c_str()); return true; @@ -710,9 +682,9 @@ kj::Promise Laminar::handleRunStep(Run* run) { } void Laminar::runFinished(Run * r) { - std::shared_ptr node = r->node; + std::shared_ptr ctx = r->context; - node->busyExecutors--; + ctx->busyExecutors--; LLOG(INFO, "Run completed", r->name, to_string(r->result)); time_t completedAt = time(nullptr); @@ -731,7 +703,7 @@ void Laminar::runFinished(Run * r) { std::string reason = r->reason(); db->stmt("INSERT INTO builds VALUES(?,?,?,?,?,?,?,?,?,?,?,?)") - .bind(r->name, r->build, node->name, r->queuedAt, r->startedAt, completedAt, int(r->result), + .bind(r->name, r->build, ctx->name, r->queuedAt, r->startedAt, completedAt, int(r->result), maybeZipped, logsize, r->parentName, r->parentBuild, reason) .exec(); @@ -746,11 +718,6 @@ void Laminar::runFinished(Run * r) { .set("started", r->startedAt) .set("result", to_string(r->result)) .set("reason", r->reason()); - j.startArray("tags"); - for(const str& t: jobTags[r->name]) { - j.String(t.c_str()); - } - j.EndArray(); j.startArray("artifacts"); populateArtifacts(j, r->name, r->build); j.EndArray(); diff --git a/src/laminar.h b/src/laminar.h index 988db66..febb419 100644 --- a/src/laminar.h +++ b/src/laminar.h @@ -21,15 +21,15 @@ #include "run.h" #include "monitorscope.h" -#include "node.h" +#include "context.h" #include "database.h" #include #include #include -// Node name to node object map -typedef std::unordered_map> NodeMap; +// Context name to context object map +typedef std::unordered_map> ContextMap; struct Server; class Json; @@ -107,7 +107,6 @@ private: bool tryStartRun(std::shared_ptr run, int queueIndex); kj::Promise handleRunStep(Run *run); void runFinished(Run*); - bool nodeCanQueue(const Node&, std::string jobName) const; // expects that Json has started an array void populateArtifacts(Json& out, std::string job, uint num) const; @@ -120,13 +119,15 @@ private: std::unordered_map buildNums; - std::unordered_map> jobTags; + std::unordered_map> jobContexts; + + std::unordered_map jobGroups; Settings settings; RunSet activeJobs; Database* db; Server& srv; - NodeMap nodes; + ContextMap contexts; kj::Path homePath; kj::Own fsHome; uint numKeepRunDirs; diff --git a/src/resources/index.html b/src/resources/index.html index 5041bd2..6b5a7bc 100644 --- a/src/resources/index.html +++ b/src/resources/index.html @@ -207,8 +207,8 @@ diff --git a/src/resources/js/app.js b/src/resources/js/app.js index f7a0a31..93a08a3 100644 --- a/src/resources/js/app.js +++ b/src/resources/js/app.js @@ -415,8 +415,9 @@ const Jobs = function() { var state = { jobs: [], search: '', - tags: [], - tag: null + groups: {}, + group: null, + ungrouped: [] }; return { template: '#jobs', @@ -437,13 +438,10 @@ const Jobs = function() { state.jobs.sort(function(a, b){return a.name < b.name ? -1 : a.name > b.name ? 1 : 0;}); } } - var tags = {}; - for (var i in state.jobs) { - for (var j in state.jobs[i].tags) { - tags[state.jobs[i].tags[j]] = true; - } - } - state.tags = Object.keys(tags); + state.groups = {}; + Object.keys(msg.groups).map(k => state.groups[k] = new RegExp(msg.groups[k])); + state.ungrouped = state.jobs.filter(j => !Object.values(state.groups).some(r => r.test(j.name))).map(j => j.name); + state.group = state.ungrouped.length ? null : Object.keys(state.groups)[0]; }, job_started: function(data) { var updAt = null; @@ -470,6 +468,8 @@ const Jobs = function() { // first execution of new job. TODO insert without resort state.jobs.unshift(data); state.jobs.sort(function(a, b){return a.name < b.name ? -1 : a.name > b.name ? 1 : 0;}); + if(!Object.values(state.groups).some(r => r.test(data.name))) + state.ungrouped.push(data.name); } else { state.jobs[updAt] = data; } @@ -492,19 +492,13 @@ const Jobs = function() { } }, filteredJobs: function() { - var ret = state.jobs; - var tag = state.tag; - if (tag) { - ret = ret.filter(function(job) { - return job.tags.indexOf(tag) >= 0; - }); - } - var search = this.search; - if (search) { - ret = ret.filter(function(job) { - return job.name.indexOf(search) > -1; - }); - } + let ret = []; + if (state.group) + ret = state.jobs.filter(job => state.groups[state.group].test(job.name)); + else + ret = state.jobs.filter(job => state.ungrouped.includes(job.name)); + if (this.search) + ret = ret.filter(job => job.name.indexOf(this.search) > -1); return ret; }, } diff --git a/src/run.cpp b/src/run.cpp index 73b917f..6eae376 100644 --- a/src/run.cpp +++ b/src/run.cpp @@ -17,7 +17,7 @@ /// along with Laminar. If not, see /// #include "run.h" -#include "node.h" +#include "context.h" #include "conf.h" #include "log.h" @@ -75,7 +75,7 @@ Run::~Run() { LLOG(INFO, "Run destroyed"); } -bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory& fsHome) +bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory& fsHome) { kj::Path cfgDir{"cfg"}; @@ -117,9 +117,6 @@ bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory // global before-run script if(fsHome.exists(cfgDir/"before")) addScript(cfgDir/"before", rd.clone()); - // per-node before-run script - if(fsHome.exists(cfgDir/"nodes"/(nd->name+".before"))) - addScript(cfgDir/"nodes"/(nd->name+".before"), rd.clone()); // job before-run script if(fsHome.exists(cfgDir/"jobs"/(name+".before"))) addScript(cfgDir/"jobs"/(name+".before"), rd.clone()); @@ -128,9 +125,6 @@ bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory // job after-run script if(fsHome.exists(cfgDir/"jobs"/(name+".after"))) addScript(cfgDir/"jobs"/(name+".after"), rd.clone(), true); - // per-node after-run script - if(fsHome.exists(cfgDir/"nodes"/(nd->name+".after"))) - addScript(cfgDir/"nodes"/(nd->name+".after"), rd.clone(), true); // global after-run script if(fsHome.exists(cfgDir/"after")) addScript(cfgDir/"after", rd.clone(), true); @@ -138,8 +132,8 @@ bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory // add environment files if(fsHome.exists(cfgDir/"env")) addEnv(cfgDir/"env"); - if(fsHome.exists(cfgDir/"nodes"/(nd->name+".env"))) - addEnv(cfgDir/"nodes"/(nd->name+".env")); + if(fsHome.exists(cfgDir/"contexts"/(nd->name+".env"))) + addEnv(cfgDir/"contexts"/(nd->name+".env")); if(fsHome.exists(cfgDir/"jobs"/(name+".env"))) addEnv(cfgDir/"jobs"/(name+".env")); @@ -151,7 +145,7 @@ bool Run::configure(uint buildNum, std::shared_ptr nd, const kj::Directory // All good, we've "started" startedAt = time(nullptr); build = buildNum; - node = nd; + context = nd; // notifies the rpc client if the start command was used started.fulfiller->fulfill(); @@ -212,8 +206,7 @@ bool Run::step() { setenv("PATH", PATH.c_str(), true); setenv("RUN", buildNum.c_str(), true); setenv("JOB", name.c_str(), true); - if(!node->name.empty()) - setenv("NODE", node->name.c_str(), true); + setenv("CONTEXT", context->name.c_str(), true); setenv("RESULT", to_string(result).c_str(), true); setenv("LAST_RESULT", to_string(lastResult).c_str(), true); setenv("WORKSPACE", (rootPath/"run"/name/"workspace").toString(true).cStr(), true); diff --git a/src/run.h b/src/run.h index 5fde79e..10288cf 100644 --- a/src/run.h +++ b/src/run.h @@ -43,7 +43,7 @@ enum class RunState { std::string to_string(const RunState& rs); -class Node; +class Context; typedef std::unordered_map ParamMap; @@ -57,8 +57,8 @@ public: Run(const Run&) = delete; Run& operator=(const Run&) = delete; - // Call this to "start" the run with a specific number and node - bool configure(uint buildNum, std::shared_ptr node, const kj::Directory &fsHome); + // Call this to "start" the run with a specific number and context + bool configure(uint buildNum, std::shared_ptr context, const kj::Directory &fsHome); // executes the next script (if any), returning true if there is nothing // more to be done. @@ -76,7 +76,7 @@ public: kj::Promise&& whenStarted() { return kj::mv(started.promise); } kj::Promise&& whenFinished() { return kj::mv(finished.promise); } - std::shared_ptr node; + std::shared_ptr context; RunState result; RunState lastResult; std::string name; diff --git a/test/unit-run.cpp b/test/unit-run.cpp index 23d4a56..f893365 100644 --- a/test/unit-run.cpp +++ b/test/unit-run.cpp @@ -19,7 +19,7 @@ #include #include "run.h" #include "log.h" -#include "node.h" +#include "context.h" #include "conf.h" #include "tempdir.h" @@ -27,7 +27,7 @@ class RunTest : public testing::Test { protected: RunTest() : testing::Test(), - node(std::make_shared()), + context(std::make_shared()), tmp(), run("foo", ParamMap{}, tmp.path.clone()) { @@ -64,7 +64,7 @@ protected: return map; } - std::shared_ptr node; + std::shared_ptr context; TempDir tmp; class Run run; @@ -78,7 +78,7 @@ protected: TEST_F(RunTest, WorkingDirectory) { setRunLink("pwd"); - run.configure(1, node, *tmp.fs); + run.configure(1, context, *tmp.fs); runAll(); std::string cwd{tmp.path.append(kj::Path{"run","foo","1"}).toString(true).cStr()}; EXPECT_EQ(cwd + "\n", readAllOutput()); @@ -86,21 +86,21 @@ TEST_F(RunTest, WorkingDirectory) { TEST_F(RunTest, SuccessStatus) { setRunLink("true"); - run.configure(1, node, *tmp.fs); + run.configure(1, context, *tmp.fs); runAll(); EXPECT_EQ(RunState::SUCCESS, run.result); } TEST_F(RunTest, FailedStatus) { setRunLink("false"); - run.configure(1, node, *tmp.fs); + run.configure(1, context, *tmp.fs); runAll(); EXPECT_EQ(RunState::FAILED, run.result); } TEST_F(RunTest, Environment) { setRunLink("env"); - run.configure(1234, node, *tmp.fs); + run.configure(1234, context, *tmp.fs); runAll(); std::string ws{tmp.path.append(kj::Path{"run","foo","workspace"}).toString(true).cStr()}; @@ -118,7 +118,7 @@ TEST_F(RunTest, Environment) { TEST_F(RunTest, ParamsToEnv) { setRunLink("env"); run.params["foo"] = "bar"; - run.configure(1, node, *tmp.fs); + run.configure(1, context, *tmp.fs); runAll(); StringMap map = parseFromString(readAllOutput()); EXPECT_EQ("bar", map["foo"]); @@ -126,7 +126,7 @@ TEST_F(RunTest, ParamsToEnv) { TEST_F(RunTest, Abort) { setRunLink("yes"); - run.configure(1, node, *tmp.fs); + run.configure(1, context, *tmp.fs); run.step(); usleep(200); // TODO fix run.abort(false);