2015-09-13 20:25:26 +00:00
|
|
|
///
|
2018-05-12 10:25:19 +00:00
|
|
|
/// Copyright 2015-2018 Oliver Giles
|
2015-09-13 20:25:26 +00:00
|
|
|
///
|
|
|
|
/// This file is part of Laminar
|
|
|
|
///
|
|
|
|
/// Laminar is free software: you can redistribute it and/or modify
|
|
|
|
/// it under the terms of the GNU General Public License as published by
|
|
|
|
/// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
/// (at your option) any later version.
|
|
|
|
///
|
|
|
|
/// Laminar is distributed in the hope that it will be useful,
|
|
|
|
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
/// GNU General Public License for more details.
|
|
|
|
///
|
|
|
|
/// You should have received a copy of the GNU General Public License
|
|
|
|
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
|
|
|
|
///
|
|
|
|
#include "server.h"
|
|
|
|
#include "interface.h"
|
|
|
|
#include "laminar.capnp.h"
|
|
|
|
#include "resources.h"
|
2015-12-06 11:36:12 +00:00
|
|
|
#include "log.h"
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
#include <capnp/ez-rpc.h>
|
|
|
|
#include <capnp/rpc-twoparty.h>
|
|
|
|
#include <capnp/rpc.capnp.h>
|
|
|
|
#include <kj/async-io.h>
|
2018-08-03 11:36:24 +00:00
|
|
|
#include <kj/async-unix.h>
|
2015-09-13 20:25:26 +00:00
|
|
|
#include <kj/threadlocal.h>
|
|
|
|
|
|
|
|
#include <sys/eventfd.h>
|
2018-04-06 15:04:50 +00:00
|
|
|
#include <sys/inotify.h>
|
2018-02-24 16:53:11 +00:00
|
|
|
#include <sys/signal.h>
|
|
|
|
#include <sys/signalfd.h>
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2018-06-01 11:51:34 +00:00
|
|
|
#include <rapidjson/document.h>
|
|
|
|
|
2016-07-25 11:59:45 +00:00
|
|
|
// Size of buffer used to read from file descriptors. Should be
|
|
|
|
// a multiple of sizeof(struct signalfd_siginfo) == 128
|
|
|
|
#define PROC_IO_BUFSIZE 4096
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Used for returning run state to RPC clients
|
|
|
|
LaminarCi::JobResult fromRunState(RunState state) {
|
|
|
|
switch(state) {
|
|
|
|
case RunState::SUCCESS: return LaminarCi::JobResult::SUCCESS;
|
2015-12-06 11:36:12 +00:00
|
|
|
case RunState::FAILED: return LaminarCi::JobResult::FAILED;
|
|
|
|
case RunState::ABORTED: return LaminarCi::JobResult::ABORTED;
|
2015-09-13 20:25:26 +00:00
|
|
|
default:
|
|
|
|
return LaminarCi::JobResult::UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is the implementation of the Laminar Cap'n Proto RPC interface.
|
|
|
|
// As such, it implements the pure virtual interface generated from
|
|
|
|
// laminar.capnp with calls to the LaminarInterface
|
2017-08-10 05:25:20 +00:00
|
|
|
class RpcImpl : public LaminarCi::Server, public LaminarWaiter {
|
2015-09-13 20:25:26 +00:00
|
|
|
public:
|
|
|
|
RpcImpl(LaminarInterface& l) :
|
|
|
|
LaminarCi::Server(),
|
|
|
|
laminar(l)
|
|
|
|
{
|
2017-08-10 05:25:20 +00:00
|
|
|
laminar.registerWaiter(this);
|
|
|
|
}
|
|
|
|
|
2017-12-21 06:19:45 +00:00
|
|
|
~RpcImpl() override {
|
2017-08-10 05:25:20 +00:00
|
|
|
laminar.deregisterWaiter(this);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-05-12 14:56:56 +00:00
|
|
|
// Queue a job, without waiting for it to start
|
|
|
|
kj::Promise<void> queue(QueueContext context) override {
|
2015-09-13 20:25:26 +00:00
|
|
|
std::string jobName = context.getParams().getJobName();
|
2018-05-12 14:56:56 +00:00
|
|
|
LLOG(INFO, "RPC queue", jobName);
|
2015-09-13 20:25:26 +00:00
|
|
|
ParamMap params;
|
|
|
|
for(auto p : context.getParams().getParams()) {
|
|
|
|
params[p.getName().cStr()] = p.getValue().cStr();
|
|
|
|
}
|
|
|
|
LaminarCi::MethodResult result = laminar.queueJob(jobName, params)
|
|
|
|
? LaminarCi::MethodResult::SUCCESS
|
|
|
|
: LaminarCi::MethodResult::FAILED;
|
|
|
|
context.getResults().setResult(result);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
|
2018-05-12 14:56:56 +00:00
|
|
|
// Start a job, without waiting for it to finish
|
|
|
|
kj::Promise<void> start(StartContext context) override {
|
|
|
|
std::string jobName = context.getParams().getJobName();
|
|
|
|
LLOG(INFO, "RPC start", jobName);
|
|
|
|
ParamMap params;
|
|
|
|
for(auto p : context.getParams().getParams()) {
|
|
|
|
params[p.getName().cStr()] = p.getValue().cStr();
|
|
|
|
}
|
|
|
|
std::shared_ptr<Run> run = laminar.queueJob(jobName, params);
|
|
|
|
if(Run* r = run.get()) {
|
|
|
|
return r->started.promise.then([context,r]() mutable {
|
|
|
|
context.getResults().setResult(LaminarCi::MethodResult::SUCCESS);
|
|
|
|
context.getResults().setBuildNum(r->build);
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
context.getResults().setResult(LaminarCi::MethodResult::FAILED);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
// Start a job and wait for the result
|
2018-04-06 10:43:38 +00:00
|
|
|
kj::Promise<void> run(RunContext context) override {
|
2015-09-13 20:25:26 +00:00
|
|
|
std::string jobName = context.getParams().getJobName();
|
2018-04-06 10:43:38 +00:00
|
|
|
LLOG(INFO, "RPC run", jobName);
|
2015-09-13 20:25:26 +00:00
|
|
|
ParamMap params;
|
|
|
|
for(auto p : context.getParams().getParams()) {
|
|
|
|
params[p.getName().cStr()] = p.getValue().cStr();
|
|
|
|
}
|
|
|
|
std::shared_ptr<Run> run = laminar.queueJob(jobName, params);
|
2017-08-10 05:25:20 +00:00
|
|
|
if(const Run* r = run.get()) {
|
|
|
|
runWaiters[r].emplace_back(kj::newPromiseAndFulfiller<RunState>());
|
2018-05-12 14:56:56 +00:00
|
|
|
return runWaiters[r].back().promise.then([context,run](RunState state) mutable {
|
2015-09-13 20:25:26 +00:00
|
|
|
context.getResults().setResult(fromRunState(state));
|
2018-05-12 14:56:56 +00:00
|
|
|
context.getResults().setBuildNum(run->build);
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
} else {
|
|
|
|
context.getResults().setResult(LaminarCi::JobResult::UNKNOWN);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a parameter on a running build
|
|
|
|
kj::Promise<void> set(SetContext context) override {
|
|
|
|
std::string jobName = context.getParams().getJobName();
|
2017-12-21 06:19:45 +00:00
|
|
|
uint buildNum = context.getParams().getBuildNum();
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "RPC set", jobName, buildNum);
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
LaminarCi::MethodResult result = laminar.setParam(jobName, buildNum,
|
|
|
|
context.getParams().getParam().getName(), context.getParams().getParam().getValue())
|
|
|
|
? LaminarCi::MethodResult::SUCCESS
|
|
|
|
: LaminarCi::MethodResult::FAILED;
|
|
|
|
context.getResults().setResult(result);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
|
2017-08-10 04:40:30 +00:00
|
|
|
// Take a named lock
|
|
|
|
kj::Promise<void> lock(LockContext context) override {
|
|
|
|
std::string lockName = context.getParams().getLockName();
|
|
|
|
LLOG(INFO, "RPC lock", lockName);
|
|
|
|
auto& lockList = locks[lockName];
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.emplace_back(kj::newPromiseAndFulfiller<void>());
|
2017-08-10 04:40:30 +00:00
|
|
|
if(lockList.size() == 1)
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.front().fulfiller->fulfill();
|
|
|
|
return std::move(lockList.back().promise);
|
2017-08-10 04:40:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release a named lock
|
|
|
|
kj::Promise<void> release(ReleaseContext context) override {
|
|
|
|
std::string lockName = context.getParams().getLockName();
|
|
|
|
LLOG(INFO, "RPC release", lockName);
|
|
|
|
auto& lockList = locks[lockName];
|
|
|
|
if(lockList.size() == 0) {
|
|
|
|
LLOG(INFO, "Attempt to release unheld lock", lockName);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
lockList.erase(lockList.begin());
|
|
|
|
if(lockList.size() > 0)
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.front().fulfiller->fulfill();
|
2017-08-10 04:40:30 +00:00
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
2017-08-10 05:25:20 +00:00
|
|
|
private:
|
|
|
|
// Implements LaminarWaiter::complete
|
|
|
|
void complete(const Run* r) override {
|
|
|
|
for(kj::PromiseFulfillerPair<RunState>& w : runWaiters[r])
|
|
|
|
w.fulfiller->fulfill(RunState(r->result));
|
|
|
|
runWaiters.erase(r);
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
private:
|
|
|
|
LaminarInterface& laminar;
|
2017-08-10 05:25:20 +00:00
|
|
|
std::unordered_map<std::string, std::list<kj::PromiseFulfillerPair<void>>> locks;
|
|
|
|
std::unordered_map<const Run*, std::list<kj::PromiseFulfillerPair<RunState>>> runWaiters;
|
2015-09-13 20:25:26 +00:00
|
|
|
};
|
|
|
|
|
2018-06-30 17:34:56 +00:00
|
|
|
// This is the implementation of the HTTP/Websocket interface. It creates
|
2015-09-13 20:25:26 +00:00
|
|
|
// websocket connections as LaminarClients and registers them with the
|
|
|
|
// LaminarInterface so that status messages will be delivered to the client.
|
|
|
|
// On opening a websocket connection, it delivers a status snapshot message
|
|
|
|
// (see LaminarInterface::sendStatus)
|
2018-06-30 17:34:56 +00:00
|
|
|
class HttpImpl : public kj::HttpService {
|
2015-09-13 20:25:26 +00:00
|
|
|
public:
|
2018-06-30 17:34:56 +00:00
|
|
|
HttpImpl(LaminarInterface& laminar, kj::HttpHeaderTable&tbl) :
|
|
|
|
laminar(laminar),
|
|
|
|
responseHeaders(tbl)
|
|
|
|
{}
|
|
|
|
virtual ~HttpImpl() {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Implements LaminarClient and holds the Websocket connection object.
|
|
|
|
// Automatically destructed when the promise created in request() resolves
|
|
|
|
// or is cancelled
|
|
|
|
class WebsocketClient : public LaminarClient {
|
|
|
|
public:
|
|
|
|
WebsocketClient(LaminarInterface& laminar, kj::Own<kj::WebSocket>&& ws) :
|
|
|
|
laminar(laminar),
|
|
|
|
ws(kj::mv(ws))
|
|
|
|
{}
|
|
|
|
~WebsocketClient() override {
|
|
|
|
laminar.deregisterClient(this);
|
|
|
|
}
|
|
|
|
virtual void sendMessage(std::string payload) override {
|
|
|
|
messages.push_back(kj::mv(payload));
|
|
|
|
// sendMessage might be called several times before the event loop
|
|
|
|
// gets a chance to act on the fulfiller. So store the payload here
|
|
|
|
// where it can be fetched later and don't pass the payload with the
|
|
|
|
// fulfiller because subsequent calls to fulfill() are ignored.
|
|
|
|
fulfiller->fulfill();
|
|
|
|
}
|
|
|
|
LaminarInterface& laminar;
|
|
|
|
kj::Own<kj::WebSocket> ws;
|
|
|
|
std::list<std::string> messages;
|
|
|
|
kj::Own<kj::PromiseFulfiller<void>> fulfiller;
|
|
|
|
};
|
|
|
|
|
2018-07-06 09:32:20 +00:00
|
|
|
kj::Promise<void> websocketRead(WebsocketClient& lc)
|
|
|
|
{
|
|
|
|
return lc.ws->receive().then([&lc,this](kj::WebSocket::Message&& message) {
|
2018-06-30 17:34:56 +00:00
|
|
|
KJ_SWITCH_ONEOF(message) {
|
|
|
|
KJ_CASE_ONEOF(str, kj::String) {
|
|
|
|
rapidjson::Document d;
|
|
|
|
d.ParseInsitu(const_cast<char*>(str.cStr()));
|
2018-08-24 09:15:40 +00:00
|
|
|
if(d.HasMember("page") && d["page"].IsInt() && d.HasMember("field") && d["field"].IsString() && d.HasMember("order") && d["order"].IsString()) {
|
|
|
|
lc.scope.page = d["page"].GetInt();
|
|
|
|
lc.scope.field = d["field"].GetString();
|
|
|
|
lc.scope.order_desc = strcmp(d["order"].GetString(), "dsc") == 0;
|
2018-06-30 17:34:56 +00:00
|
|
|
laminar.sendStatus(&lc);
|
2018-07-06 09:32:20 +00:00
|
|
|
return websocketRead(lc);
|
2018-06-30 17:34:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
KJ_CASE_ONEOF(close, kj::WebSocket::Close) {
|
|
|
|
// clean socket shutdown
|
2018-07-06 09:32:20 +00:00
|
|
|
return lc.ws->close(close.code, close.reason);
|
2018-06-30 17:34:56 +00:00
|
|
|
}
|
|
|
|
KJ_CASE_ONEOF_DEFAULT {}
|
|
|
|
}
|
|
|
|
// unhandled/unknown message
|
2018-07-06 09:32:20 +00:00
|
|
|
return lc.ws->disconnect();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
kj::Promise<void> websocketWrite(WebsocketClient& lc)
|
|
|
|
{
|
|
|
|
auto paf = kj::newPromiseAndFulfiller<void>();
|
|
|
|
lc.fulfiller = kj::mv(paf.fulfiller);
|
|
|
|
return paf.promise.then([this,&lc]{
|
2018-06-30 17:34:56 +00:00
|
|
|
kj::Promise<void> p = kj::READY_NOW;
|
2018-07-06 09:32:20 +00:00
|
|
|
std::list<std::string> messages = kj::mv(lc.messages);
|
|
|
|
for(std::string& s : messages) {
|
2018-06-30 17:34:56 +00:00
|
|
|
p = p.then([&s,&lc]{
|
|
|
|
kj::String str = kj::str(s);
|
|
|
|
return lc.ws->send(str).attach(kj::mv(str));
|
|
|
|
});
|
2018-06-01 11:51:34 +00:00
|
|
|
}
|
2018-07-06 09:32:20 +00:00
|
|
|
return p.attach(kj::mv(messages)).then([this,&lc]{
|
|
|
|
return websocketWrite(lc);
|
|
|
|
});
|
2018-06-01 11:51:34 +00:00
|
|
|
});
|
2018-06-30 17:34:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kj::Promise<void> websocketUpgraded(WebsocketClient& lc, std::string resource) {
|
|
|
|
// convert the requested URL to a MonitorScope
|
|
|
|
if(resource.substr(0, 5) == "/jobs") {
|
|
|
|
if(resource.length() == 5) {
|
|
|
|
lc.scope.type = MonitorScope::ALL;
|
|
|
|
} else {
|
|
|
|
resource = resource.substr(5);
|
|
|
|
size_t split = resource.find('/',1);
|
|
|
|
std::string job = resource.substr(1,split-1);
|
|
|
|
if(!job.empty()) {
|
|
|
|
lc.scope.job = job;
|
|
|
|
lc.scope.type = MonitorScope::JOB;
|
|
|
|
}
|
|
|
|
if(split != std::string::npos) {
|
|
|
|
size_t split2 = resource.find('/', split+1);
|
|
|
|
std::string run = resource.substr(split+1, split2-split);
|
|
|
|
if(!run.empty()) {
|
|
|
|
lc.scope.num = static_cast<uint>(atoi(run.c_str()));
|
|
|
|
lc.scope.type = MonitorScope::RUN;
|
|
|
|
}
|
|
|
|
if(split2 != std::string::npos && resource.compare(split2, 4, "/log") == 0) {
|
|
|
|
lc.scope.type = MonitorScope::LOG;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
laminar.registerClient(&lc);
|
2018-07-06 09:32:20 +00:00
|
|
|
kj::Promise<void> connection = websocketRead(lc).exclusiveJoin(websocketWrite(lc));
|
2018-06-30 17:34:56 +00:00
|
|
|
// registerClient can happen after a successful websocket handshake.
|
|
|
|
// However, the connection might not be closed gracefully, so the
|
|
|
|
// corresponding deregister operation happens in the WebsocketClient
|
|
|
|
// destructor rather than the close handler or a then() clause
|
|
|
|
laminar.sendStatus(&lc);
|
|
|
|
return connection;
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2018-06-30 17:34:56 +00:00
|
|
|
virtual kj::Promise<void> request(kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers,
|
|
|
|
kj::AsyncInputStream& requestBody, Response& response) override
|
|
|
|
{
|
|
|
|
std::string resource = url.cStr();
|
|
|
|
if(headers.isWebSocket()) {
|
|
|
|
responseHeaders.clear();
|
|
|
|
kj::Own<WebsocketClient> lc = kj::heap<WebsocketClient>(laminar, response.acceptWebSocket(responseHeaders));
|
|
|
|
return websocketUpgraded(*lc, resource).attach(kj::mv(lc));
|
|
|
|
} else {
|
|
|
|
// handle regular HTTP request
|
2017-10-31 18:07:12 +00:00
|
|
|
const char* start, *end, *content_type;
|
2018-09-10 11:51:43 +00:00
|
|
|
std::string badge;
|
2018-06-30 17:34:56 +00:00
|
|
|
responseHeaders.clear();
|
2015-09-19 15:24:20 +00:00
|
|
|
if(resource.compare(0, strlen("/archive/"), "/archive/") == 0) {
|
2018-07-06 10:18:04 +00:00
|
|
|
kj::Own<MappedFile> file = laminar.getArtefact(resource.substr(strlen("/archive/")));
|
|
|
|
if(file->address() != nullptr) {
|
2018-06-30 17:34:56 +00:00
|
|
|
responseHeaders.add("Content-Transfer-Encoding", "binary");
|
2018-07-06 10:18:04 +00:00
|
|
|
auto stream = response.send(200, "OK", responseHeaders, file->size());
|
|
|
|
return stream->write(file->address(), file->size()).attach(kj::mv(file)).attach(kj::mv(stream));
|
2015-09-19 15:24:20 +00:00
|
|
|
}
|
2017-12-29 09:14:10 +00:00
|
|
|
} else if(resource.compare("/custom/style.css") == 0) {
|
2018-06-30 17:34:56 +00:00
|
|
|
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "text/css; charset=utf-8");
|
|
|
|
responseHeaders.add("Content-Transfer-Encoding", "binary");
|
|
|
|
std::string css = laminar.getCustomCss();
|
|
|
|
auto stream = response.send(200, "OK", responseHeaders, css.size());
|
2018-07-06 10:18:04 +00:00
|
|
|
return stream->write(css.data(), css.size()).attach(kj::mv(css)).attach(kj::mv(stream));
|
2017-10-31 18:07:12 +00:00
|
|
|
} else if(resources.handleRequest(resource, &start, &end, &content_type)) {
|
2018-06-30 17:34:56 +00:00
|
|
|
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, content_type);
|
|
|
|
responseHeaders.add("Content-Encoding", "gzip");
|
|
|
|
responseHeaders.add("Content-Transfer-Encoding", "binary");
|
|
|
|
auto stream = response.send(200, "OK", responseHeaders, end-start);
|
|
|
|
return stream->write(start, end-start).attach(kj::mv(stream));
|
2018-09-10 11:51:43 +00:00
|
|
|
} else if(url.startsWith("/badge/") && url.endsWith(".svg") && laminar.handleBadgeRequest(resource.substr(7,resource.length()-11), badge)) {
|
|
|
|
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "image/svg+xml");
|
|
|
|
responseHeaders.add("Cache-Control", "no-cache");
|
|
|
|
auto stream = response.send(200, "OK", responseHeaders, badge.size());
|
|
|
|
return stream->write(badge.data(), badge.size()).attach(kj::mv(badge)).attach(kj::mv(stream));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2018-06-30 17:34:56 +00:00
|
|
|
return response.sendError(404, "Not Found", responseHeaders);
|
|
|
|
}
|
2018-01-27 11:11:40 +00:00
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
LaminarInterface& laminar;
|
2018-06-30 17:34:56 +00:00
|
|
|
Resources resources;
|
|
|
|
kj::HttpHeaders responseHeaders;
|
2015-09-13 20:25:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Context for an RPC connection
|
|
|
|
struct RpcConnection {
|
|
|
|
RpcConnection(kj::Own<kj::AsyncIoStream>&& stream,
|
|
|
|
capnp::Capability::Client bootstrap,
|
|
|
|
capnp::ReaderOptions readerOpts) :
|
|
|
|
stream(kj::mv(stream)),
|
|
|
|
network(*this->stream, capnp::rpc::twoparty::Side::SERVER, readerOpts),
|
|
|
|
rpcSystem(capnp::makeRpcServer(network, bootstrap))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
kj::Own<kj::AsyncIoStream> stream;
|
|
|
|
capnp::TwoPartyVatNetwork network;
|
|
|
|
capnp::RpcSystem<capnp::rpc::twoparty::VatId> rpcSystem;
|
|
|
|
};
|
|
|
|
|
|
|
|
Server::Server(LaminarInterface& li, kj::StringPtr rpcBindAddress,
|
|
|
|
kj::StringPtr httpBindAddress) :
|
|
|
|
rpcInterface(kj::heap<RpcImpl>(li)),
|
2018-01-27 11:11:40 +00:00
|
|
|
laminarInterface(li),
|
2015-09-13 20:25:26 +00:00
|
|
|
ioContext(kj::setupAsyncIo()),
|
2018-06-30 17:34:56 +00:00
|
|
|
headerTable(),
|
|
|
|
httpService(kj::heap<HttpImpl>(li, headerTable)),
|
|
|
|
httpServer(kj::heap<kj::HttpServer>(ioContext.provider->getTimer(), headerTable, *httpService)),
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners(kj::heap<kj::TaskSet>(*this)),
|
|
|
|
childTasks(*this),
|
|
|
|
httpConnections(*this),
|
2018-01-27 11:11:40 +00:00
|
|
|
httpReady(kj::newPromiseAndFulfiller<void>())
|
2015-09-13 20:25:26 +00:00
|
|
|
{
|
2015-09-21 20:30:50 +00:00
|
|
|
// RPC task
|
2017-08-15 05:19:54 +00:00
|
|
|
if(rpcBindAddress.startsWith("unix:"))
|
|
|
|
unlink(rpcBindAddress.slice(strlen("unix:")).cStr());
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners->add(ioContext.provider->getNetwork().parseAddress(rpcBindAddress)
|
2015-09-21 20:30:50 +00:00
|
|
|
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
|
2018-02-24 16:53:11 +00:00
|
|
|
return acceptRpcClient(addr->listen());
|
2015-09-21 20:30:50 +00:00
|
|
|
}));
|
|
|
|
|
|
|
|
// HTTP task
|
2017-08-15 05:19:54 +00:00
|
|
|
if(httpBindAddress.startsWith("unix:"))
|
|
|
|
unlink(httpBindAddress.slice(strlen("unix:")).cStr());
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners->add(ioContext.provider->getNetwork().parseAddress(httpBindAddress)
|
2015-09-21 20:30:50 +00:00
|
|
|
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
|
2018-01-27 11:11:40 +00:00
|
|
|
// TODO: a better way? Currently used only for testing
|
|
|
|
httpReady.fulfiller->fulfill();
|
2018-06-30 17:34:56 +00:00
|
|
|
kj::Own<kj::ConnectionReceiver> listener = addr->listen();
|
|
|
|
return httpServer->listenHttp(*listener).attach(kj::mv(listener));
|
2015-09-21 20:30:50 +00:00
|
|
|
}));
|
2018-02-24 16:53:11 +00:00
|
|
|
|
2018-04-06 15:04:50 +00:00
|
|
|
// handle watched paths
|
|
|
|
{
|
|
|
|
inotify_fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
|
2018-08-03 11:36:24 +00:00
|
|
|
pathWatch = readDescriptor(inotify_fd, [this](const char*, size_t){
|
2018-04-06 15:04:50 +00:00
|
|
|
laminarInterface.notifyConfigChanged();
|
2018-08-03 11:36:24 +00:00
|
|
|
});
|
2018-04-06 15:04:50 +00:00
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Server::~Server() {
|
|
|
|
}
|
|
|
|
|
|
|
|
void Server::start() {
|
2018-02-24 16:53:11 +00:00
|
|
|
// The eventfd is used to quit the server later since we need to trigger
|
|
|
|
// a reaction from the event loop
|
2017-12-04 18:31:23 +00:00
|
|
|
efd_quit = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK);
|
2018-02-24 16:53:11 +00:00
|
|
|
kj::evalLater([this](){
|
2015-09-13 20:25:26 +00:00
|
|
|
static uint64_t _;
|
2016-07-25 11:59:45 +00:00
|
|
|
auto wakeEvent = ioContext.lowLevelProvider->wrapInputFd(efd_quit);
|
2015-09-13 20:25:26 +00:00
|
|
|
return wakeEvent->read(&_, sizeof(uint64_t)).attach(std::move(wakeEvent));
|
2018-02-24 16:53:11 +00:00
|
|
|
}).wait(ioContext.waitScope);
|
|
|
|
// Execution arrives here when the eventfd is triggered (in stop())
|
|
|
|
|
|
|
|
// Shutdown sequence:
|
|
|
|
// 1. stop accepting new connections
|
|
|
|
listeners = nullptr;
|
|
|
|
// 2. abort current jobs. Most of the time this isn't necessary since
|
|
|
|
// systemd stop or other kill mechanism will send SIGTERM to the whole
|
|
|
|
// process group.
|
|
|
|
laminarInterface.abortAll();
|
|
|
|
// 3. wait for all children to close
|
|
|
|
childTasks.onEmpty().wait(ioContext.waitScope);
|
|
|
|
// 4. run the loop once more to send any pending output to websocket clients
|
|
|
|
ioContext.waitScope.poll();
|
2018-08-03 11:36:24 +00:00
|
|
|
// 5. return: websockets will be destructed when class is deleted
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Server::stop() {
|
2018-02-24 16:53:11 +00:00
|
|
|
// This method is expected to be called in signal context, so an eventfd
|
|
|
|
// is used to get the main loop to react. See run()
|
2016-07-25 11:59:45 +00:00
|
|
|
eventfd_write(efd_quit, 1);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-07-20 11:15:59 +00:00
|
|
|
kj::Promise<void> Server::readDescriptor(int fd, std::function<void(const char*,size_t)> cb) {
|
2017-07-31 05:53:50 +00:00
|
|
|
auto event = this->ioContext.lowLevelProvider->wrapInputFd(fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP);
|
2017-12-07 16:28:12 +00:00
|
|
|
auto buffer = kj::heapArrayBuilder<char>(PROC_IO_BUFSIZE);
|
2018-07-20 11:15:59 +00:00
|
|
|
return handleFdRead(event, buffer.asPtr().begin(), cb).attach(std::move(event)).attach(std::move(buffer));
|
|
|
|
}
|
|
|
|
|
2018-08-03 11:36:24 +00:00
|
|
|
void Server::addTask(kj::Promise<void>&& task) {
|
2018-07-20 11:15:59 +00:00
|
|
|
childTasks.add(kj::mv(task));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-05-12 10:25:19 +00:00
|
|
|
kj::Promise<void> Server::addTimeout(int seconds, std::function<void ()> cb) {
|
|
|
|
return ioContext.lowLevelProvider->getTimer().afterDelay(seconds * kj::SECONDS).then([cb](){
|
|
|
|
cb();
|
|
|
|
}).eagerlyEvaluate(nullptr);
|
|
|
|
}
|
|
|
|
|
2018-08-03 11:36:24 +00:00
|
|
|
kj::Promise<int> Server::onChildExit(kj::Maybe<pid_t> &pid) {
|
|
|
|
return ioContext.unixEventPort.onChildExit(pid);
|
|
|
|
}
|
|
|
|
|
2018-04-06 15:04:50 +00:00
|
|
|
void Server::addWatchPath(const char* dpath) {
|
|
|
|
inotify_add_watch(inotify_fd, dpath, IN_ONLYDIR | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE);
|
|
|
|
}
|
|
|
|
|
2018-02-24 16:53:11 +00:00
|
|
|
kj::Promise<void> Server::acceptRpcClient(kj::Own<kj::ConnectionReceiver>&& listener) {
|
|
|
|
kj::ConnectionReceiver& cr = *listener.get();
|
|
|
|
return cr.accept().then(kj::mvCapture(kj::mv(listener),
|
2018-01-27 11:11:40 +00:00
|
|
|
[this](kj::Own<kj::ConnectionReceiver>&& listener, kj::Own<kj::AsyncIoStream>&& connection) {
|
2015-09-13 20:25:26 +00:00
|
|
|
auto server = kj::heap<RpcConnection>(kj::mv(connection), rpcInterface, capnp::ReaderOptions());
|
2018-02-24 16:53:11 +00:00
|
|
|
childTasks.add(server->network.onDisconnect().attach(kj::mv(server)));
|
|
|
|
return acceptRpcClient(kj::mv(listener));
|
|
|
|
}));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2016-07-25 11:59:45 +00:00
|
|
|
// returns a promise which will read a chunk of data from the file descriptor
|
|
|
|
// wrapped by stream and invoke the provided callback with the read data.
|
|
|
|
// Repeats until ::read returns <= 0
|
2017-12-07 16:28:12 +00:00
|
|
|
kj::Promise<void> Server::handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function<void(const char*,size_t)> cb) {
|
|
|
|
return stream->tryRead(buffer, 1, PROC_IO_BUFSIZE).then([this,stream,buffer,cb](size_t sz) {
|
2015-09-13 20:25:26 +00:00
|
|
|
if(sz > 0) {
|
2017-12-07 16:28:12 +00:00
|
|
|
cb(buffer, sz);
|
2017-12-06 19:51:50 +00:00
|
|
|
return handleFdRead(stream, kj::mv(buffer), cb);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2017-12-06 19:59:22 +00:00
|
|
|
return kj::Promise<void>(kj::READY_NOW);
|
2017-12-07 16:28:12 +00:00
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2018-01-27 11:11:40 +00:00
|
|
|
|
|
|
|
void Server::taskFailed(kj::Exception &&exception) {
|
|
|
|
// An unexpected http connection close can cause an exception, so don't re-throw.
|
|
|
|
// TODO: consider re-throwing selected exceptions
|
|
|
|
LLOG(INFO, exception);
|
|
|
|
//kj::throwFatalException(kj::mv(exception));
|
|
|
|
}
|