2015-09-13 20:25:26 +00:00
|
|
|
///
|
2017-12-09 10:21:34 +00:00
|
|
|
/// Copyright 2015-2017 Oliver Giles
|
2015-09-13 20:25:26 +00:00
|
|
|
///
|
|
|
|
/// This file is part of Laminar
|
|
|
|
///
|
|
|
|
/// Laminar is free software: you can redistribute it and/or modify
|
|
|
|
/// it under the terms of the GNU General Public License as published by
|
|
|
|
/// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
/// (at your option) any later version.
|
|
|
|
///
|
|
|
|
/// Laminar is distributed in the hope that it will be useful,
|
|
|
|
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
/// GNU General Public License for more details.
|
|
|
|
///
|
|
|
|
/// You should have received a copy of the GNU General Public License
|
|
|
|
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
|
|
|
|
///
|
|
|
|
#include "server.h"
|
|
|
|
#include "interface.h"
|
|
|
|
#include "laminar.capnp.h"
|
|
|
|
#include "resources.h"
|
2015-12-06 11:36:12 +00:00
|
|
|
#include "log.h"
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
#include <capnp/ez-rpc.h>
|
|
|
|
#include <capnp/rpc-twoparty.h>
|
|
|
|
#include <capnp/rpc.capnp.h>
|
|
|
|
#include <kj/async-io.h>
|
|
|
|
#include <kj/threadlocal.h>
|
|
|
|
|
|
|
|
#include <websocketpp/config/core.hpp>
|
|
|
|
#include <websocketpp/server.hpp>
|
|
|
|
|
|
|
|
#include <sys/eventfd.h>
|
2018-04-06 15:04:50 +00:00
|
|
|
#include <sys/inotify.h>
|
2018-02-24 16:53:11 +00:00
|
|
|
#include <sys/signal.h>
|
|
|
|
#include <sys/signalfd.h>
|
2015-09-13 20:25:26 +00:00
|
|
|
|
2016-07-25 11:59:45 +00:00
|
|
|
// Size of buffer used to read from file descriptors. Should be
|
|
|
|
// a multiple of sizeof(struct signalfd_siginfo) == 128
|
|
|
|
#define PROC_IO_BUFSIZE 4096
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
// Configuration struct for the websocketpp template library.
|
|
|
|
struct wsconfig : public websocketpp::config::core {
|
|
|
|
// static const websocketpp::log::level elog_level =
|
|
|
|
// websocketpp::log::elevel::info;
|
|
|
|
|
|
|
|
// static const websocketpp::log::level alog_level =
|
|
|
|
// websocketpp::log::alevel::access_core |
|
|
|
|
// websocketpp::log::alevel::message_payload ;
|
|
|
|
|
|
|
|
static const websocketpp::log::level elog_level =
|
|
|
|
websocketpp::log::elevel::none;
|
|
|
|
|
|
|
|
static const websocketpp::log::level alog_level =
|
|
|
|
websocketpp::log::alevel::none;
|
|
|
|
|
|
|
|
typedef struct { LaminarClient* lc; } connection_base;
|
|
|
|
};
|
|
|
|
typedef websocketpp::server<wsconfig> websocket;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Used for returning run state to RPC clients
|
|
|
|
LaminarCi::JobResult fromRunState(RunState state) {
|
|
|
|
switch(state) {
|
|
|
|
case RunState::SUCCESS: return LaminarCi::JobResult::SUCCESS;
|
2015-12-06 11:36:12 +00:00
|
|
|
case RunState::FAILED: return LaminarCi::JobResult::FAILED;
|
|
|
|
case RunState::ABORTED: return LaminarCi::JobResult::ABORTED;
|
2015-09-13 20:25:26 +00:00
|
|
|
default:
|
|
|
|
return LaminarCi::JobResult::UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is the implementation of the Laminar Cap'n Proto RPC interface.
|
|
|
|
// As such, it implements the pure virtual interface generated from
|
|
|
|
// laminar.capnp with calls to the LaminarInterface
|
2017-08-10 05:25:20 +00:00
|
|
|
class RpcImpl : public LaminarCi::Server, public LaminarWaiter {
|
2015-09-13 20:25:26 +00:00
|
|
|
public:
|
|
|
|
RpcImpl(LaminarInterface& l) :
|
|
|
|
LaminarCi::Server(),
|
|
|
|
laminar(l)
|
|
|
|
{
|
2017-08-10 05:25:20 +00:00
|
|
|
laminar.registerWaiter(this);
|
|
|
|
}
|
|
|
|
|
2017-12-21 06:19:45 +00:00
|
|
|
~RpcImpl() override {
|
2017-08-10 05:25:20 +00:00
|
|
|
laminar.deregisterWaiter(this);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start a job, without waiting for it to finish
|
|
|
|
kj::Promise<void> trigger(TriggerContext context) override {
|
|
|
|
std::string jobName = context.getParams().getJobName();
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "RPC trigger", jobName);
|
2015-09-13 20:25:26 +00:00
|
|
|
ParamMap params;
|
|
|
|
for(auto p : context.getParams().getParams()) {
|
|
|
|
params[p.getName().cStr()] = p.getValue().cStr();
|
|
|
|
}
|
|
|
|
LaminarCi::MethodResult result = laminar.queueJob(jobName, params)
|
|
|
|
? LaminarCi::MethodResult::SUCCESS
|
|
|
|
: LaminarCi::MethodResult::FAILED;
|
|
|
|
context.getResults().setResult(result);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start a job and wait for the result
|
2018-04-06 10:43:38 +00:00
|
|
|
kj::Promise<void> run(RunContext context) override {
|
2015-09-13 20:25:26 +00:00
|
|
|
std::string jobName = context.getParams().getJobName();
|
2018-04-06 10:43:38 +00:00
|
|
|
LLOG(INFO, "RPC run", jobName);
|
2015-09-13 20:25:26 +00:00
|
|
|
ParamMap params;
|
|
|
|
for(auto p : context.getParams().getParams()) {
|
|
|
|
params[p.getName().cStr()] = p.getValue().cStr();
|
|
|
|
}
|
|
|
|
std::shared_ptr<Run> run = laminar.queueJob(jobName, params);
|
2017-08-10 05:25:20 +00:00
|
|
|
if(const Run* r = run.get()) {
|
2018-04-06 10:35:02 +00:00
|
|
|
uint num = r->build;
|
2017-08-10 05:25:20 +00:00
|
|
|
runWaiters[r].emplace_back(kj::newPromiseAndFulfiller<RunState>());
|
2018-04-06 10:35:02 +00:00
|
|
|
return runWaiters[r].back().promise.then([context,num](RunState state) mutable {
|
2015-09-13 20:25:26 +00:00
|
|
|
context.getResults().setResult(fromRunState(state));
|
2018-04-06 10:35:02 +00:00
|
|
|
context.getResults().setBuildNum(num);
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
} else {
|
|
|
|
context.getResults().setResult(LaminarCi::JobResult::UNKNOWN);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a parameter on a running build
|
|
|
|
kj::Promise<void> set(SetContext context) override {
|
|
|
|
std::string jobName = context.getParams().getJobName();
|
2017-12-21 06:19:45 +00:00
|
|
|
uint buildNum = context.getParams().getBuildNum();
|
2015-12-06 11:36:12 +00:00
|
|
|
LLOG(INFO, "RPC set", jobName, buildNum);
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
LaminarCi::MethodResult result = laminar.setParam(jobName, buildNum,
|
|
|
|
context.getParams().getParam().getName(), context.getParams().getParam().getValue())
|
|
|
|
? LaminarCi::MethodResult::SUCCESS
|
|
|
|
: LaminarCi::MethodResult::FAILED;
|
|
|
|
context.getResults().setResult(result);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
|
2017-08-10 04:40:30 +00:00
|
|
|
// Take a named lock
|
|
|
|
kj::Promise<void> lock(LockContext context) override {
|
|
|
|
std::string lockName = context.getParams().getLockName();
|
|
|
|
LLOG(INFO, "RPC lock", lockName);
|
|
|
|
auto& lockList = locks[lockName];
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.emplace_back(kj::newPromiseAndFulfiller<void>());
|
2017-08-10 04:40:30 +00:00
|
|
|
if(lockList.size() == 1)
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.front().fulfiller->fulfill();
|
|
|
|
return std::move(lockList.back().promise);
|
2017-08-10 04:40:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release a named lock
|
|
|
|
kj::Promise<void> release(ReleaseContext context) override {
|
|
|
|
std::string lockName = context.getParams().getLockName();
|
|
|
|
LLOG(INFO, "RPC release", lockName);
|
|
|
|
auto& lockList = locks[lockName];
|
|
|
|
if(lockList.size() == 0) {
|
|
|
|
LLOG(INFO, "Attempt to release unheld lock", lockName);
|
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
|
|
|
lockList.erase(lockList.begin());
|
|
|
|
if(lockList.size() > 0)
|
2017-08-10 05:25:20 +00:00
|
|
|
lockList.front().fulfiller->fulfill();
|
2017-08-10 04:40:30 +00:00
|
|
|
return kj::READY_NOW;
|
|
|
|
}
|
2017-08-10 05:25:20 +00:00
|
|
|
private:
|
|
|
|
// Implements LaminarWaiter::complete
|
|
|
|
void complete(const Run* r) override {
|
|
|
|
for(kj::PromiseFulfillerPair<RunState>& w : runWaiters[r])
|
|
|
|
w.fulfiller->fulfill(RunState(r->result));
|
|
|
|
runWaiters.erase(r);
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
private:
|
|
|
|
LaminarInterface& laminar;
|
2017-08-10 05:25:20 +00:00
|
|
|
std::unordered_map<std::string, std::list<kj::PromiseFulfillerPair<void>>> locks;
|
|
|
|
std::unordered_map<const Run*, std::list<kj::PromiseFulfillerPair<RunState>>> runWaiters;
|
2015-09-13 20:25:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// This is the implementation of the HTTP/Websocket interface. It exposes
|
|
|
|
// websocket connections as LaminarClients and registers them with the
|
|
|
|
// LaminarInterface so that status messages will be delivered to the client.
|
|
|
|
// On opening a websocket connection, it delivers a status snapshot message
|
|
|
|
// (see LaminarInterface::sendStatus)
|
|
|
|
class Server::HttpImpl {
|
|
|
|
public:
|
|
|
|
HttpImpl(LaminarInterface& l) :
|
|
|
|
laminar(l)
|
|
|
|
{
|
|
|
|
// debug logging
|
|
|
|
// wss.set_access_channels(websocketpp::log::alevel::all);
|
|
|
|
// wss.set_error_channels(websocketpp::log::elevel::all);
|
|
|
|
|
|
|
|
// TODO: This could be used in the future to trigger actions on the
|
|
|
|
// server in response to a web client request. Currently not supported.
|
|
|
|
// wss.set_message_handler([](std::weak_ptr<void> s, websocket::message_ptr msg){
|
|
|
|
// msg->get_payload();
|
|
|
|
// });
|
|
|
|
|
|
|
|
// Handle plain HTTP requests by delivering the binary resource
|
|
|
|
wss.set_http_handler([this](websocketpp::connection_hdl hdl){
|
|
|
|
websocket::connection_ptr c = wss.get_con_from_hdl(hdl);
|
2017-10-31 18:07:12 +00:00
|
|
|
const char* start, *end, *content_type;
|
2015-09-19 15:24:20 +00:00
|
|
|
std::string resource = c->get_resource();
|
|
|
|
if(resource.compare(0, strlen("/archive/"), "/archive/") == 0) {
|
|
|
|
std::string file(resource.substr(strlen("/archive/")));
|
|
|
|
std::string content;
|
|
|
|
if(laminar.getArtefact(file, content)) {
|
|
|
|
c->set_status(websocketpp::http::status_code::ok);
|
2015-09-21 20:30:50 +00:00
|
|
|
c->append_header("Content-Transfer-Encoding", "binary");
|
2015-09-19 15:24:20 +00:00
|
|
|
c->set_body(content);
|
|
|
|
} else {
|
|
|
|
c->set_status(websocketpp::http::status_code::not_found);
|
|
|
|
}
|
2017-12-29 09:14:10 +00:00
|
|
|
} else if(resource.compare("/custom/style.css") == 0) {
|
|
|
|
c->set_status(websocketpp::http::status_code::ok);
|
|
|
|
c->append_header("Content-Transfer-Encoding", "binary");
|
|
|
|
c->set_body(laminar.getCustomCss());
|
2017-10-31 18:07:12 +00:00
|
|
|
} else if(resources.handleRequest(resource, &start, &end, &content_type)) {
|
2015-09-13 20:25:26 +00:00
|
|
|
c->set_status(websocketpp::http::status_code::ok);
|
2017-10-31 18:07:12 +00:00
|
|
|
c->append_header("Content-Type", content_type);
|
2015-09-13 20:25:26 +00:00
|
|
|
c->append_header("Content-Encoding", "gzip");
|
2015-09-21 20:30:50 +00:00
|
|
|
c->append_header("Content-Transfer-Encoding", "binary");
|
|
|
|
std::string response(start,end);
|
|
|
|
c->set_body(response);
|
2015-09-13 20:25:26 +00:00
|
|
|
} else {
|
|
|
|
// 404
|
|
|
|
c->set_status(websocketpp::http::status_code::not_found);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Handle new websocket connection. Parse the URL to determine
|
|
|
|
// the client's scope of interest, register the client for update
|
|
|
|
// messages, and call sendStatus.
|
|
|
|
wss.set_open_handler([this](websocketpp::connection_hdl hdl){
|
|
|
|
websocket::connection_ptr c = wss.get_con_from_hdl(hdl);
|
|
|
|
std::string res = c->get_resource();
|
|
|
|
if(res.substr(0, 5) == "/jobs") {
|
|
|
|
if(res.length() == 5) {
|
|
|
|
c->lc->scope.type = MonitorScope::ALL;
|
|
|
|
} else {
|
|
|
|
res = res.substr(5);
|
2017-12-21 06:19:45 +00:00
|
|
|
size_t split = res.find('/',1);
|
2015-09-13 20:25:26 +00:00
|
|
|
std::string job = res.substr(1,split-1);
|
|
|
|
if(!job.empty()) {
|
|
|
|
c->lc->scope.job = job;
|
|
|
|
c->lc->scope.type = MonitorScope::JOB;
|
|
|
|
}
|
|
|
|
if(split != std::string::npos) {
|
2017-12-21 06:19:45 +00:00
|
|
|
size_t split2 = res.find('/', split+1);
|
2015-09-13 20:25:26 +00:00
|
|
|
std::string run = res.substr(split+1, split2-split);
|
|
|
|
if(!run.empty()) {
|
2017-12-21 06:19:45 +00:00
|
|
|
c->lc->scope.num = static_cast<uint>(atoi(run.c_str()));
|
2015-09-13 20:25:26 +00:00
|
|
|
c->lc->scope.type = MonitorScope::RUN;
|
|
|
|
}
|
|
|
|
if(split2 != std::string::npos && res.compare(split2, 4, "/log") == 0) {
|
|
|
|
c->lc->scope.type = MonitorScope::LOG;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-27 11:11:40 +00:00
|
|
|
// registerClient can happen after a successful websocket handshake.
|
|
|
|
// However, the connection might not be closed gracefully, so the
|
|
|
|
// corresponding deregister operation happens in the connection
|
|
|
|
// destructor rather than the close handler
|
2015-09-13 20:25:26 +00:00
|
|
|
laminar.registerClient(c->lc);
|
|
|
|
laminar.sendStatus(c->lc);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a new connection object linked with the context defined below.
|
|
|
|
// This is a bit untidy, it would be better to make them a single object,
|
|
|
|
// but I didn't yet figure it out
|
|
|
|
websocket::connection_ptr newConnection(LaminarClient* lc) {
|
|
|
|
websocket::connection_ptr c = wss.get_connection();
|
|
|
|
c->lc = lc;
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2018-01-27 11:11:40 +00:00
|
|
|
void connectionDestroyed(LaminarClient* lc) {
|
|
|
|
// This will be called for all connections, not just websockets, so
|
|
|
|
// the laminar instance should silently ignore unknown clients
|
|
|
|
laminar.deregisterClient(lc);
|
|
|
|
}
|
|
|
|
|
2015-09-13 20:25:26 +00:00
|
|
|
private:
|
|
|
|
Resources resources;
|
|
|
|
LaminarInterface& laminar;
|
|
|
|
websocket wss;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Context for an RPC connection
|
|
|
|
struct RpcConnection {
|
|
|
|
RpcConnection(kj::Own<kj::AsyncIoStream>&& stream,
|
|
|
|
capnp::Capability::Client bootstrap,
|
|
|
|
capnp::ReaderOptions readerOpts) :
|
|
|
|
stream(kj::mv(stream)),
|
|
|
|
network(*this->stream, capnp::rpc::twoparty::Side::SERVER, readerOpts),
|
|
|
|
rpcSystem(capnp::makeRpcServer(network, bootstrap))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
kj::Own<kj::AsyncIoStream> stream;
|
|
|
|
capnp::TwoPartyVatNetwork network;
|
|
|
|
capnp::RpcSystem<capnp::rpc::twoparty::VatId> rpcSystem;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Context for a WebsocketConnection (implements LaminarClient)
|
2017-12-16 16:21:33 +00:00
|
|
|
// This object maps read and write handlers between the websocketpp library
|
|
|
|
// and the corresponding kj async methods
|
|
|
|
struct Server::WebsocketConnection : public LaminarClient {
|
2015-09-13 20:25:26 +00:00
|
|
|
WebsocketConnection(kj::Own<kj::AsyncIoStream>&& stream, Server::HttpImpl& http) :
|
2018-01-27 11:11:40 +00:00
|
|
|
http(http),
|
2015-09-13 20:25:26 +00:00
|
|
|
stream(kj::mv(stream)),
|
|
|
|
cn(http.newConnection(this)),
|
2017-12-16 16:21:33 +00:00
|
|
|
writePaf(kj::newPromiseAndFulfiller<void>())
|
2015-09-13 20:25:26 +00:00
|
|
|
{
|
2017-12-16 16:21:33 +00:00
|
|
|
cn->set_write_handler([this](websocketpp::connection_hdl, const char* s, size_t sz) {
|
|
|
|
outputBuffer.append(std::string(s, sz));
|
|
|
|
writePaf.fulfiller->fulfill();
|
|
|
|
return std::error_code();
|
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
cn->start();
|
|
|
|
}
|
|
|
|
|
2018-01-27 11:11:40 +00:00
|
|
|
virtual ~WebsocketConnection() noexcept(true) override {
|
|
|
|
// Removes the connection from the list of registered clients. Must be
|
|
|
|
// here rather than in the websocket closing handshake because connections
|
|
|
|
// might be unexpectedly/aggressively closed and any references must be
|
|
|
|
// removed.
|
|
|
|
http.connectionDestroyed(this);
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
|
|
|
|
kj::Promise<void> pend() {
|
2015-09-21 20:30:50 +00:00
|
|
|
return stream->tryRead(ibuf, 1, sizeof(ibuf)).then([this](size_t sz){
|
2017-12-16 16:21:33 +00:00
|
|
|
cn->read_some(ibuf, sz);
|
2015-09-13 20:25:26 +00:00
|
|
|
if(sz == 0 || cn->get_state() == websocketpp::session::state::closed) {
|
|
|
|
return kj::Promise<void>(kj::READY_NOW);
|
|
|
|
}
|
|
|
|
return pend();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
kj::Promise<void> writeTask() {
|
|
|
|
return writePaf.promise.then([this]() {
|
|
|
|
std::string payload;
|
|
|
|
// clear the outputBuffer for more context, and take a chunk
|
|
|
|
// to send now
|
|
|
|
payload.swap(outputBuffer);
|
|
|
|
writePaf = kj::newPromiseAndFulfiller<void>();
|
2017-12-16 16:21:33 +00:00
|
|
|
KJ_ASSERT(!payload.empty());
|
|
|
|
return stream->write(payload.data(), payload.size()).then([this](){
|
|
|
|
if(cn->get_state() == websocketpp::session::state::closed) {
|
|
|
|
return kj::Promise<void>(kj::READY_NOW);
|
|
|
|
}
|
|
|
|
return writeTask();
|
|
|
|
}).attach(kj::mv(payload));
|
2015-09-13 20:25:26 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void sendMessage(std::string payload) override {
|
|
|
|
cn->send(payload, websocketpp::frame::opcode::text);
|
|
|
|
}
|
|
|
|
|
2018-01-27 11:11:40 +00:00
|
|
|
HttpImpl& http;
|
2015-09-13 20:25:26 +00:00
|
|
|
kj::Own<kj::AsyncIoStream> stream;
|
|
|
|
websocket::connection_ptr cn;
|
|
|
|
std::string outputBuffer;
|
|
|
|
kj::PromiseFulfillerPair<void> writePaf;
|
2015-09-21 20:30:50 +00:00
|
|
|
char ibuf[131072];
|
2015-09-13 20:25:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
Server::Server(LaminarInterface& li, kj::StringPtr rpcBindAddress,
|
|
|
|
kj::StringPtr httpBindAddress) :
|
|
|
|
rpcInterface(kj::heap<RpcImpl>(li)),
|
2018-01-27 11:11:40 +00:00
|
|
|
laminarInterface(li),
|
|
|
|
httpInterface(kj::heap<HttpImpl>(li)),
|
2015-09-13 20:25:26 +00:00
|
|
|
ioContext(kj::setupAsyncIo()),
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners(kj::heap<kj::TaskSet>(*this)),
|
|
|
|
childTasks(*this),
|
|
|
|
httpConnections(*this),
|
2018-01-27 11:11:40 +00:00
|
|
|
httpReady(kj::newPromiseAndFulfiller<void>())
|
2015-09-13 20:25:26 +00:00
|
|
|
{
|
2015-09-21 20:30:50 +00:00
|
|
|
// RPC task
|
2017-08-15 05:19:54 +00:00
|
|
|
if(rpcBindAddress.startsWith("unix:"))
|
|
|
|
unlink(rpcBindAddress.slice(strlen("unix:")).cStr());
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners->add(ioContext.provider->getNetwork().parseAddress(rpcBindAddress)
|
2015-09-21 20:30:50 +00:00
|
|
|
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
|
2018-02-24 16:53:11 +00:00
|
|
|
return acceptRpcClient(addr->listen());
|
2015-09-21 20:30:50 +00:00
|
|
|
}));
|
|
|
|
|
|
|
|
// HTTP task
|
2017-08-15 05:19:54 +00:00
|
|
|
if(httpBindAddress.startsWith("unix:"))
|
|
|
|
unlink(httpBindAddress.slice(strlen("unix:")).cStr());
|
2018-02-24 16:53:11 +00:00
|
|
|
listeners->add(ioContext.provider->getNetwork().parseAddress(httpBindAddress)
|
2015-09-21 20:30:50 +00:00
|
|
|
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
|
2018-01-27 11:11:40 +00:00
|
|
|
// TODO: a better way? Currently used only for testing
|
|
|
|
httpReady.fulfiller->fulfill();
|
2018-02-24 16:53:11 +00:00
|
|
|
return acceptHttpClient(addr->listen());
|
2015-09-21 20:30:50 +00:00
|
|
|
}));
|
2018-02-24 16:53:11 +00:00
|
|
|
|
|
|
|
// handle SIGCHLD
|
2018-04-06 15:04:50 +00:00
|
|
|
{
|
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
|
|
|
sigaddset(&mask, SIGCHLD);
|
|
|
|
sigprocmask(SIG_BLOCK, &mask, nullptr);
|
|
|
|
int sigchld = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
|
|
|
|
auto event = ioContext.lowLevelProvider->wrapInputFd(sigchld, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP);
|
|
|
|
auto buffer = kj::heapArrayBuilder<char>(PROC_IO_BUFSIZE);
|
|
|
|
reapWatch = handleFdRead(event, buffer.asPtr().begin(), [this](const char* buf, size_t){
|
|
|
|
const struct signalfd_siginfo* siginfo = reinterpret_cast<const struct signalfd_siginfo*>(buf);
|
|
|
|
KJ_ASSERT(siginfo->ssi_signo == SIGCHLD);
|
|
|
|
laminarInterface.reapChildren();
|
|
|
|
}).attach(std::move(event)).attach(std::move(buffer));
|
|
|
|
}
|
|
|
|
|
|
|
|
// handle watched paths
|
|
|
|
{
|
|
|
|
inotify_fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
|
|
|
|
auto event = ioContext.lowLevelProvider->wrapInputFd(inotify_fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP);
|
|
|
|
auto buffer = kj::heapArrayBuilder<char>(PROC_IO_BUFSIZE);
|
|
|
|
pathWatch = handleFdRead(event, buffer.asPtr().begin(), [this](const char*, size_t){
|
|
|
|
laminarInterface.notifyConfigChanged();
|
|
|
|
}).attach(std::move(event)).attach(std::move(buffer));
|
|
|
|
}
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Server::~Server() {
|
|
|
|
}
|
|
|
|
|
|
|
|
void Server::start() {
|
2018-02-24 16:53:11 +00:00
|
|
|
// The eventfd is used to quit the server later since we need to trigger
|
|
|
|
// a reaction from the event loop
|
2017-12-04 18:31:23 +00:00
|
|
|
efd_quit = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK);
|
2018-02-24 16:53:11 +00:00
|
|
|
kj::evalLater([this](){
|
2015-09-13 20:25:26 +00:00
|
|
|
static uint64_t _;
|
2016-07-25 11:59:45 +00:00
|
|
|
auto wakeEvent = ioContext.lowLevelProvider->wrapInputFd(efd_quit);
|
2015-09-13 20:25:26 +00:00
|
|
|
return wakeEvent->read(&_, sizeof(uint64_t)).attach(std::move(wakeEvent));
|
2018-02-24 16:53:11 +00:00
|
|
|
}).wait(ioContext.waitScope);
|
|
|
|
// Execution arrives here when the eventfd is triggered (in stop())
|
|
|
|
|
|
|
|
// Shutdown sequence:
|
|
|
|
// 1. stop accepting new connections
|
|
|
|
listeners = nullptr;
|
|
|
|
// 2. abort current jobs. Most of the time this isn't necessary since
|
|
|
|
// systemd stop or other kill mechanism will send SIGTERM to the whole
|
|
|
|
// process group.
|
|
|
|
laminarInterface.abortAll();
|
|
|
|
// 3. wait for all children to close
|
|
|
|
childTasks.onEmpty().wait(ioContext.waitScope);
|
|
|
|
// 4. run the loop once more to send any pending output to websocket clients
|
|
|
|
ioContext.waitScope.poll();
|
|
|
|
// 5. return: websockets will be destructed
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Server::stop() {
|
2018-02-24 16:53:11 +00:00
|
|
|
// This method is expected to be called in signal context, so an eventfd
|
|
|
|
// is used to get the main loop to react. See run()
|
2016-07-25 11:59:45 +00:00
|
|
|
eventfd_write(efd_quit, 1);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2017-12-04 18:29:19 +00:00
|
|
|
void Server::addDescriptor(int fd, std::function<void(const char*,size_t)> cb) {
|
2017-07-31 05:53:50 +00:00
|
|
|
auto event = this->ioContext.lowLevelProvider->wrapInputFd(fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP);
|
2017-12-07 16:28:12 +00:00
|
|
|
auto buffer = kj::heapArrayBuilder<char>(PROC_IO_BUFSIZE);
|
2018-02-24 16:53:11 +00:00
|
|
|
childTasks.add(handleFdRead(event, buffer.asPtr().begin(), cb).attach(std::move(event)).attach(std::move(buffer)));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-04-06 15:04:50 +00:00
|
|
|
void Server::addWatchPath(const char* dpath) {
|
|
|
|
inotify_add_watch(inotify_fd, dpath, IN_ONLYDIR | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE);
|
|
|
|
}
|
|
|
|
|
2018-02-24 16:53:11 +00:00
|
|
|
kj::Promise<void> Server::acceptHttpClient(kj::Own<kj::ConnectionReceiver>&& listener) {
|
|
|
|
kj::ConnectionReceiver& cr = *listener.get();
|
|
|
|
return cr.accept().then(kj::mvCapture(kj::mv(listener),
|
2018-01-27 11:11:40 +00:00
|
|
|
[this](kj::Own<kj::ConnectionReceiver>&& listener, kj::Own<kj::AsyncIoStream>&& connection) {
|
2015-09-13 20:25:26 +00:00
|
|
|
auto conn = kj::heap<WebsocketConnection>(kj::mv(connection), *httpInterface);
|
2018-01-27 11:11:40 +00:00
|
|
|
// delete the connection when either the read or write task completes
|
2018-02-24 16:53:11 +00:00
|
|
|
httpConnections.add(conn->pend().exclusiveJoin(conn->writeTask()).attach(kj::mv(conn)));
|
|
|
|
return acceptHttpClient(kj::mv(listener));
|
|
|
|
}));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2018-02-24 16:53:11 +00:00
|
|
|
kj::Promise<void> Server::acceptRpcClient(kj::Own<kj::ConnectionReceiver>&& listener) {
|
|
|
|
kj::ConnectionReceiver& cr = *listener.get();
|
|
|
|
return cr.accept().then(kj::mvCapture(kj::mv(listener),
|
2018-01-27 11:11:40 +00:00
|
|
|
[this](kj::Own<kj::ConnectionReceiver>&& listener, kj::Own<kj::AsyncIoStream>&& connection) {
|
2015-09-13 20:25:26 +00:00
|
|
|
auto server = kj::heap<RpcConnection>(kj::mv(connection), rpcInterface, capnp::ReaderOptions());
|
2018-02-24 16:53:11 +00:00
|
|
|
childTasks.add(server->network.onDisconnect().attach(kj::mv(server)));
|
|
|
|
return acceptRpcClient(kj::mv(listener));
|
|
|
|
}));
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
|
|
|
|
2016-07-25 11:59:45 +00:00
|
|
|
// returns a promise which will read a chunk of data from the file descriptor
|
|
|
|
// wrapped by stream and invoke the provided callback with the read data.
|
|
|
|
// Repeats until ::read returns <= 0
|
2017-12-07 16:28:12 +00:00
|
|
|
kj::Promise<void> Server::handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function<void(const char*,size_t)> cb) {
|
|
|
|
return stream->tryRead(buffer, 1, PROC_IO_BUFSIZE).then([this,stream,buffer,cb](size_t sz) {
|
2015-09-13 20:25:26 +00:00
|
|
|
if(sz > 0) {
|
2017-12-07 16:28:12 +00:00
|
|
|
cb(buffer, sz);
|
2017-12-06 19:51:50 +00:00
|
|
|
return handleFdRead(stream, kj::mv(buffer), cb);
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2017-12-06 19:59:22 +00:00
|
|
|
return kj::Promise<void>(kj::READY_NOW);
|
2017-12-07 16:28:12 +00:00
|
|
|
});
|
2015-09-13 20:25:26 +00:00
|
|
|
}
|
2018-01-27 11:11:40 +00:00
|
|
|
|
|
|
|
void Server::taskFailed(kj::Exception &&exception) {
|
|
|
|
// An unexpected http connection close can cause an exception, so don't re-throw.
|
|
|
|
// TODO: consider re-throwing selected exceptions
|
|
|
|
LLOG(INFO, exception);
|
|
|
|
//kj::throwFatalException(kj::mv(exception));
|
|
|
|
}
|