1
0
mirror of https://github.com/ohwgiles/laminar.git synced 2024-10-27 20:34:20 +00:00

Compare commits

..

No commits in common. "master" and "0.1" have entirely different histories.
master ... 0.1

72 changed files with 2031 additions and 7635 deletions

View File

@ -1,5 +1,5 @@
### ###
### Copyright 2015-2024 Oliver Giles ### Copyright 2015 Oliver Giles
### ###
### This file is part of Laminar ### This file is part of Laminar
### ###
@ -16,65 +16,14 @@
### You should have received a copy of the GNU General Public License ### You should have received a copy of the GNU General Public License
### along with Laminar. If not, see <http://www.gnu.org/licenses/> ### along with Laminar. If not, see <http://www.gnu.org/licenses/>
### ###
cmake_minimum_required(VERSION 3.6)
project(laminar) project(laminar)
cmake_minimum_required(VERSION 2.8)
if (${CMAKE_SYSTEM_NAME} STREQUAL "FreeBSD")
# ld.lld is a default option on FreeBSD
set(LLVM_LINKER_IS_LLD ON)
endif()
# ld.lld specific options. There is no sane way in cmake
# to detect if toolchain is actually using ld.lld
if (LLVM_LINKER_IS_LLD)
if (NOT DEFINED LINKER_EMULATION_FLAGS)
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64")
set(LINKER_EMULATION_FLAGS "-melf_x86_64")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(LINKER_EMULATION_FLAGS "-melf_x86_64")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(LINKER_EMULATION_FLAGS "-maarch64elf")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "powerpc64le")
set(LINKER_EMULATION_FLAGS "-melf64lppc")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "powerpc64")
set(LINKER_EMULATION_FLAGS "-melf64ppc")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "riscv64")
# llvm17 & riscv64 requires extra step, it is necessary to
# patch 'Elf64.e_flags' (48-th byte) in binary-blob object files
# with value 0x5 - to change soft_float ABI to hard_float ABI
# so they can link with rest of the object files.
set(LINKER_EMULATION_FLAGS "-melf64lriscv")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "arm")
set(LINKER_EMULATION_FLAGS "-marmelf")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7")
set(LINKER_EMULATION_FLAGS "-marmelf")
else()
message(FATAL_ERROR
"Unsupported '${CMAKE_SYSTEM_PROCESSOR}' translation to emulation flag. "
"Please set it explicitly 'cmake -DLINKER_EMULATION_FLAGS=\"-melf_your_arch\" ...'")
endif()
endif()
endif()
set(CMAKE_INCLUDE_CURRENT_DIR ON) set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare") add_definitions("-std=c++11 -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Werror -DDEBUG") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Werror -DDEBUG")
# Allow passing in the version string, for e.g. patched/packaged versions
if(NOT LAMINAR_VERSION AND EXISTS ${CMAKE_SOURCE_DIR}/.git)
execute_process(COMMAND git describe --tags --abbrev=8 --dirty
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE LAMINAR_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
if(NOT LAMINAR_VERSION)
set(LAMINAR_VERSION xx-unversioned)
endif()
set_source_files_properties(src/version.cpp PROPERTIES COMPILE_DEFINITIONS
LAMINAR_VERSION=${LAMINAR_VERSION})
# This macro takes a list of files, gzips them and converts the output into # This macro takes a list of files, gzips them and converts the output into
# object files so they can be linked directly into the application. # object files so they can be linked directly into the application.
# ld generates symbols based on the string argument given to its executable, # ld generates symbols based on the string argument given to its executable,
@ -93,11 +42,7 @@ macro(generate_compressed_bins BASEDIR)
DEPENDS ${BASEDIR}/${FILE} DEPENDS ${BASEDIR}/${FILE}
) )
add_custom_command(OUTPUT ${OUTPUT_FILE} add_custom_command(OUTPUT ${OUTPUT_FILE}
COMMAND ${CMAKE_LINKER} ${LINKER_EMULATION_FLAGS} -r -b binary -o ${OUTPUT_FILE} ${COMPRESSED_FILE} COMMAND ld -r -b binary -o ${OUTPUT_FILE} ${COMPRESSED_FILE}
COMMAND ${CMAKE_OBJCOPY}
--rename-section .data=.rodata.alloc,load,readonly,data,contents
--add-section .note.GNU-stack=/dev/null
--set-section-flags .note.GNU-stack=contents,readonly ${OUTPUT_FILE}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${COMPRESSED_FILE} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${COMPRESSED_FILE}
) )
list(APPEND COMPRESSED_BINS ${OUTPUT_FILE}) list(APPEND COMPRESSED_BINS ${OUTPUT_FILE})
@ -112,98 +57,39 @@ add_custom_command(OUTPUT laminar.capnp.c++ laminar.capnp.h
# Zip and compile statically served resources # Zip and compile statically served resources
generate_compressed_bins(${CMAKE_SOURCE_DIR}/src/resources index.html js/app.js generate_compressed_bins(${CMAKE_SOURCE_DIR}/src/resources index.html js/app.js
style.css manifest.webmanifest favicon.ico favicon-152.png icon.png) tpl/home.html tpl/job.html tpl/run.html tpl/browse.html
favicon.ico favicon-152.png icon.png progress.png)
# The code that allows dynamic modifying of index.html requires knowing its original size
add_custom_command(OUTPUT index_html_size.h
COMMAND sh -c '( echo -n "\\#define INDEX_HTML_UNCOMPRESSED_SIZE " && wc -c < "${CMAKE_SOURCE_DIR}/src/resources/index.html" ) > index_html_size.h'
DEPENDS src/resources/index.html)
# Download 3rd-party frontend JS libs... # Download 3rd-party frontend JS libs...
file(DOWNLOAD https://cdnjs.cloudflare.com/ajax/libs/vue/2.6.12/vue.min.js file(DOWNLOAD https://ajax.googleapis.com/ajax/libs/angularjs/1.3.14/angular.min.js
${CMAKE_BINARY_DIR}/js/vue.min.js EXPECTED_MD5 fb192338844efe86ec759a40152fcb8e) js/angular.min.js EXPECTED_MD5 b1137641dbb512a60e83d673f7e2d98f)
file(DOWNLOAD https://raw.githubusercontent.com/drudru/ansi_up/v4.0.4/ansi_up.js file(DOWNLOAD https://ajax.googleapis.com/ajax/libs/angularjs/1.3.14/angular-route.min.js
${CMAKE_BINARY_DIR}/js/ansi_up.js EXPECTED_MD5 b31968e1a8fed0fa82305e978161f7f5) js/angular-route.min.js EXPECTED_MD5 28ef7d7b4349ae0dce602748185ef32a)
file(DOWNLOAD https://cdnjs.cloudflare.com/ajax/libs/Chart.js/3.9.1/chart.min.js file(DOWNLOAD https://ajax.googleapis.com/ajax/libs/angularjs/1.3.14/angular-sanitize.min.js
${CMAKE_BINARY_DIR}/js/Chart.min.js EXPECTED_MD5 7dd5ea7d2cf22a1c42b43c40093d2669) js/angular-sanitize.min.js EXPECTED_MD5 0854eae86bcdf5f92b1ab2b458d8d054)
file(DOWNLOAD https://raw.githubusercontent.com/drudru/ansi_up/v1.3.0/ansi_up.js
js/ansi_up.js EXPECTED_MD5 158566dc1ff8f2804de972f7e841e2f6)
file(DOWNLOAD https://cdnjs.cloudflare.com/ajax/libs/Chart.js/1.0.2/Chart.min.js
js/Chart.min.js EXPECTED_MD5 0d3004601c1a855a3d203502549528a7)
file(DOWNLOAD https://raw.githubusercontent.com/tomsouthall/Chart.HorizontalBar.js/v1.04/Chart.HorizontalBar.js
js/Chart.HorizontalBar.js EXPECTED_MD5 95070a38e69bc56534e1b2086d985270)
file(DOWNLOAD https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css
css/bootstrap.min.css EXPECTED_MD5 5d5357cb3704e1f43a1f5bfed2aebf42)
# ...and compile them # ...and compile them
generate_compressed_bins(${CMAKE_BINARY_DIR} js/vue.min.js generate_compressed_bins(${CMAKE_BINARY_DIR} js/angular.min.js js/angular-route.min.js
js/ansi_up.js js/Chart.min.js) js/angular-sanitize.min.js js/ansi_up.js js/Chart.min.js js/Chart.HorizontalBar.js
css/bootstrap.min.css)
# (see resources.cpp where these are fetched) # (see resources.cpp where these are fetched)
set(LAMINARD_CORE_SOURCES
src/conf.cpp
src/database.cpp
src/laminar.cpp
src/leader.cpp
src/http.cpp
src/resources.cpp
src/rpc.cpp
src/run.cpp
src/server.cpp
src/version.cpp
laminar.capnp.c++
index_html_size.h
)
find_package(CapnProto REQUIRED)
include_directories(${CAPNP_INCLUDE_DIRS})
find_package(SQLite3 REQUIRED)
include_directories(${SQLite3_INCLUDE_DIRS})
find_package(ZLIB REQUIRED)
include_directories(${ZLIB_INCLUDE_DIRS})
find_package(Threads REQUIRED)
include_directories(${Threads_INCLUDE_DIRS})
## Server ## Server
add_executable(laminard ${LAMINARD_CORE_SOURCES} src/main.cpp ${COMPRESSED_BINS}) add_executable(laminard src/database.cpp src/main.cpp src/server.cpp src/laminar.cpp
target_link_libraries(laminard CapnProto::capnp-rpc CapnProto::capnp CapnProto::kj-http CapnProto::kj-async src/conf.cpp src/resources.cpp src/run.cpp laminar.capnp.c++ ${COMPRESSED_BINS})
CapnProto::kj Threads::Threads SQLite::SQLite3 ZLIB::ZLIB) # TODO: some alternative to boost::filesystem?
target_link_libraries(laminard capnp-rpc capnp kj-async kj pthread boost_filesystem boost_system sqlite3)
if (${CMAKE_SYSTEM_NAME} STREQUAL "FreeBSD")
pkg_check_modules(INOTIFY REQUIRED libinotify)
target_link_libraries(laminard ${INOTIFY_LINK_LIBRARIES})
endif()
## Client ## Client
add_executable(laminarc src/client.cpp src/version.cpp laminar.capnp.c++) add_executable(laminarc src/client.cpp laminar.capnp.c++)
target_link_libraries(laminarc CapnProto::capnp-rpc CapnProto::capnp CapnProto::kj-async CapnProto::kj Threads::Threads) target_link_libraries(laminarc capnp-rpc capnp kj-async kj pthread)
## Manpages install(TARGETS laminard laminarc RUNTIME DESTINATION usr/bin)
macro(gzip SOURCE) install(FILES laminar.service DESTINATION usr/lib/systemd/system)
get_filename_component(OUT_FILE ${SOURCE} NAME) install(FILES laminar.conf DESTINATION etc)
add_custom_command(OUTPUT ${OUT_FILE}.gz
COMMAND gzip < ${CMAKE_CURRENT_SOURCE_DIR}/${SOURCE} > ${OUT_FILE}.gz
DEPENDS ${SOURCE})
endmacro()
add_custom_target(laminar-manpages ALL DEPENDS laminard.8.gz laminarc.1.gz)
gzip(etc/laminard.8)
gzip(etc/laminarc.1)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminard.8.gz DESTINATION share/man/man8)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminarc.1.gz DESTINATION share/man/man1)
## Tests
set(BUILD_TESTS FALSE CACHE BOOL "Build tests")
if(BUILD_TESTS)
find_package(GTest REQUIRED)
include_directories(${GTEST_INCLUDE_DIRS} src)
add_executable(laminar-tests ${LAMINARD_CORE_SOURCES} ${COMPRESSED_BINS} test/main.cpp test/laminar-functional.cpp test/unit-conf.cpp test/unit-database.cpp)
target_link_libraries(laminar-tests ${GTEST_LIBRARIES} capnp-rpc capnp kj-http kj-async kj pthread sqlite3 z)
endif()
set(BASH_COMPLETIONS_DIR /usr/share/bash-completion/completions CACHE PATH "Path to bash completions directory")
set(ZSH_COMPLETIONS_DIR /usr/share/zsh/site-functions CACHE PATH "Path to zsh completions directory")
install(TARGETS laminard RUNTIME DESTINATION sbin)
install(TARGETS laminarc RUNTIME DESTINATION bin)
install(FILES etc/laminar.conf DESTINATION /etc)
install(FILES etc/laminarc-completion.bash DESTINATION ${BASH_COMPLETIONS_DIR} RENAME laminarc)
install(FILES etc/laminarc-completion.zsh DESTINATION ${ZSH_COMPLETIONS_DIR} RENAME _laminarc)
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(SYSTEMD_UNITDIR /lib/systemd/system CACHE PATH "Path to systemd unit files")
configure_file(etc/laminar.service.in laminar.service @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminar.service DESTINATION ${SYSTEMD_UNITDIR})
endif()

674
COPYING
View File

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@ -1,41 +1,11 @@
# Laminar CI [![status](https://ci.ohwg.net/badge/laminar.svg)](https://ci.ohwg.net/jobs/laminar) ## laminar
Laminar (https://laminar.ohwg.net) is a lightweight and modular Continuous Integration service for Linux. It is self-hosted and developer-friendly, eschewing a configuration UI in favour of simple version-controllable configuration files and scripts. Laminar is continuous integration with a focus on simplicity and flexibility. Minimal wheels are reinvented. See the [wiki page](https://github.com/ohwgiles/laminar/wiki) for more information
Laminar encourages the use of existing GNU/Linux tools such as `bash` and `cron` instead of reinventing them. ### Features
Although the status and progress front-end is very user-friendly, administering a Laminar instance requires writing shell scripts and manually editing configuration files. That being said, there is nothing esoteric here and the [guide](http://laminar.ohwg.net/docs.html) should be straightforward for anyone with even very basic Linux server administration experience. * Pure C++ backend with few dependencies
* Highly scriptable
* Filesystem based configuration
* Simple and responsive web frontend based on bootstrap
See [the website](https://laminar.ohwg.net) and the [documentation](https://laminar.ohwg.net/docs.html) for more information.
## Building from source
First install development packages for `capnproto (version 0.7.0 or newer)`, `rapidjson`, `sqlite` and `boost` (for the header-only `multi_index_container` library) from your distribution's repository or other source.
On Debian Bookworm, this can be done with:
```bash
sudo apt install capnproto cmake g++ libboost-dev libcapnp-dev libsqlite3-dev \
make rapidjson-dev zlib1g-dev pkg-config
```
Then compile and install laminar with:
```bash
git clone https://github.com/ohwgiles/laminar.git
cd laminar
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
make -j "$(nproc)"
# Warning: the following will overwrite an existing /etc/laminar.conf
sudo make install
```
`make install` includes a systemd unit file. If you intend to use it, consider creating a new user `laminar` or modifying the user specified in the unit file.
## Packaging for distributions
The `pkg` directory contains shell scripts which use docker to build native packages (deb,rpm) for common Linux distributions. Note that these are very simple packages which may not completely conform to the distribution's packaging guidelines, however they may serve as a starting point for creating an official package, or may be useful if the official package lags.
## Contributing
Issues and pull requests via GitHub are most welcome. All pull requests must adhere to the [Developer Certificate of Origin](https://developercertificate.org/).

View File

@ -1,725 +0,0 @@
# Introduction
[Laminar](http://laminar.ohwg.net) is a lightweight and modular Continuous Integration service for Linux. It is self-hosted and developer-friendly, eschewing a configuration web UI in favor of simple version-controllable configuration files and scripts.
Laminar encourages the use of existing GNU/Linux tools such as `bash` and `cron` instead of reinventing them.
Although the status and progress front-end is very user-friendly, administering a Laminar instance requires writing shell scripts and manually editing configuration files. That being said, there is nothing esoteric here and the tutorial below should be straightforward for anyone with even very basic Linux server administration experience.
Throughout this document, the fixed base path `/var/lib/laminar` is used. This is the default path and can be changed by setting `LAMINAR_HOME` in `/etc/laminar.conf` as desired.
## Terminology
- *job*: a task, identified by a name, comprising of one or more executable scripts.
- *run*: a numbered execution of a *job*
---
# Installing Laminar
Since Debian Bullseye, Laminar is available in [the official repositories](https://packages.debian.org/search?searchon=sourcenames&keywords=laminar).
Alternatively, pre-built upstream packages are available for Debian 10 (Buster) on x86_64 and armhf, and for Rocky/CentOS/RHEL 7 and 8 on x86_64.
Finally, Laminar may be built from source for any Linux distribution.
## Installation from upstream packages
Under Debian:
```bash
wget https://github.com/ohwgiles/laminar/releases/download/1.1/laminar_1.1-1.upstream-debian10_amd64.deb
sudo apt install ./laminar_1.1-1.upstream-debian10_amd64.deb
```
Under Rocky/CentOS/RHEL:
```bash
wget https://github.com/ohwgiles/laminar/releases/download/1.1/laminar-1.1.upstream_rocky8-1.x86_64.rpm
sudo dnf install ./laminar-1.1.upstream_rocky8-1.x86_64.rpm
```
Both install packages will create a new `laminar` user and install (but not activate) a systemd service for launching the laminar daemon.
## Building from source
See the [development README](https://github.com/ohwgiles/laminar) for instructions for installing from source.
## Building for Docker
You can build an image that runs `laminard` by default, and contains `laminarc` for use based on `alpine:edge` using the `Dockerfile` in the `docker/` directory.
```bash
# from the repository root:
docker build [-t image:tag] -f docker/Dockerfile .
```
Keep in mind that this is meant to be used as a base image to build from, so it contains only the minimum packages required to run laminar. The only shell available by default is sh (so scripts with `#!/bin/bash` will fail to execute) and it does not have `ssh` or `git`. You can use this image to run a basic build server, but it is recommended that you build a custom image from this base to better suit your needs.
The container will execute `laminard` by default. To start a laminar server with docker you can simply run the image as a daemon, for example:
```bash
docker run -d --name laminar_server -p 8080:8080 -v path/to/laminardir:/var/lib/laminar --env-file path/to/laminar.conf laminar:latest
```
The [`-v` flag](https://docs.docker.com/storage/volumes/#choose-the--v-or---mount-flag) is necessary to persist job scripts and artefacts beyond the container lifetime.
The [`--env-file` flag](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) is necessary to pass configuration from `laminar.conf` to `laminard` because `laminard` does not read `/etc/laminar.conf` directly but expects variables within to be exported by `systemd` or other process supervisor.
Executing `laminarc` may be done in any of the usual ways, for example:
```bash
docker exec -i laminar_server laminarc queue example_task
```
Alternatively, you might [use an external `laminarc`](#Triggering-on-a-remote-laminar-instance).
---
# Service configuration
Use `systemctl start laminar` to start the laminar system service and `systemctl enable laminar` to launch it automatically on system boot.
After starting the service, an empty laminar dashboard should be available at http://localhost:8080
Laminar's configuration file may be found at `/etc/laminar.conf`. Laminar will start with reasonable defaults if no configuration can be found.
## Running on a different HTTP port or Unix socket
Edit `/etc/laminar.conf` and change `LAMINAR_BIND_HTTP` to `IPADDR:PORT`, `unix:PATH/TO/SOCKET` or `unix-abstract:SOCKETNAME`. `IPADDR` may be `*` to bind on all interfaces. The default is `*:8080`.
Do not attempt to run laminar on port 80. This requires running as `root`, and Laminar will not drop privileges when executing job scripts! For a more complete integrated solution (including SSL), run laminar behind a regular webserver acting as a reverse proxy.
## Running behind a reverse proxy
A reverse proxy is required if you want Laminar to share a port with other web services. It is also recommended to improve performance by serving artefacts directly or providing a caching layer for static assets.
If you use [artefacts](#Archiving-artefacts), note that Laminar is not designed as a file server, and better performance will be achieved by allowing the frontend web server to serve the archive directory directly (e.g. using a `Location` directive).
Laminar uses Server Sent Events to provide a responsive, auto-updating display without polling. Most frontend webservers should handle this without any extra configuration.
If you use a reverse proxy to host Laminar at a subfolder instead of a subdomain root, the `<base href>` needs to be updated to ensure all links point to their proper targets. This can be done by setting `LAMINAR_BASE_URL` in `/etc/laminar.conf`.
See [this example configuration file for nginx](https://github.com/ohwgiles/laminar/blob/master/examples/nginx-ssl-reverse-proxy.conf).
## More configuration options
See the [reference section](#Service-configuration-file)
---
# Defining a job
To create a job that downloads and compiles [GNU Hello](https://www.gnu.org/software/hello/), create the file `/var/lib/laminar/cfg/jobs/hello.run` with the following content:
```bash
#!/bin/bash -ex
wget ftp://ftp.gnu.org/gnu/hello/hello-2.10.tar.gz
tar xzf hello-2.10.tar.gz
cd hello-2.10
./configure
make
```
Laminar uses your script's exit code to determine whether to mark the run as successful or failed. If your script is written in bash, the [`-e` option](http://tldp.org/LDP/abs/html/options.html) is helpful for this. See also [Exit and Exit Status](http://tldp.org/LDP/abs/html/exit-status.html).
Don't forget to mark the script executable:
```bash
chmod +x /var/lib/laminar/cfg/jobs/hello.run
```
---
# Triggering a run
To queue execution of the `hello` job, run
```bash
laminarc queue hello
```
In this case, `laminarc` returns immediately, with its error code indicating whether adding the job to the queue was sucessful. The run number will be printed to standard output.
If the server is busy, a run may wait in the queue for some time. To have `laminarc` instead block until the run leaves the queue and starts executing, use
```bash
laminarc start hello
```
In this case, `laminarc` blocks until the job starts executing, or returns immediately if queueing failed. The run number will be printed to standard output.
Finally, to launch and run the `hello` job to completion, execute
```bash
laminarc run hello
```
In this case, laminarc's return value indicates whether the run completed successfully.
In all cases, a started run means the `/var/lib/laminar/cfg/jobs/hello.run` script will be executed, with a working directory of `/var/lib/laminar/run/hello/1` (or current run number)
The result and log output should be visible in the Web UI at http://localhost:8080/jobs/hello/1
Also note that all the above commands can simultaneously trigger multiple different jobs:
```bash
laminarc queue test-host test-target
```
## Isn't there a "Build Now" button I can click?
This is against the design principles of Laminar and was deliberately excluded. Laminar's web UI is strictly read-only, making it simple to deploy in mixed-permission or public environments without an authentication layer. Furthermore, Laminar tries to encourage ideal continuous integration, where manual triggering is an anti-pattern. Want to make a release? Push a git tag and implement a post-receive hook. Want to re-run a build due to sporadic failure/flaky tests? Fix the tests locally and push a patch. Experience shows that a manual trigger such as a "Build Now" button is often used as a crutch to avoid doing the correct thing, negatively impacting traceability and quality.
## Listing jobs from the command line
`laminarc` may be used to inspect the server state:
- `laminarc show-jobs`: Lists all files matching `/var/lib/laminar/cfg/jobs/*.run` on the server side.
- `laminarc show-running`: Lists all currently running jobs and their run numbers.
- `laminarc show-queued`: Lists all jobs waiting in the queue.
## Triggering a job at a certain time
This is what `cron` is for. To trigger a build of `hello` every day at 0300, add
```
0 3 * * * LAMINAR_REASON="Nightly build" laminarc queue hello
```
to `laminar`'s crontab. For more information about `cron`, see `man crontab`.
`LAMINAR_REASON` is an optional human-readable string that will be displayed in the web UI as the cause of the build.
## Triggering on a git commit
This is what [git hooks](https://git-scm.com/book/gr/v2/Customizing-Git-Git-Hooks) are for. To create a hook that triggers the `example-build` job when a push is made to the `example` repository, create the file `hooks/post-receive` in the `example.git` bare repository.
```bash
#!/bin/bash
LAMINAR_REASON="Push to git repository" laminarc queue example-build
```
For a more advanced example, see [examples/git-post-receive-hook-notes](https://github.com/ohwgiles/laminar/blob/master/examples/git-post-receive-hook-notes)
What if your git server is not the same machine as the laminar instance?
## Triggering on a remote laminar instance
`laminarc` and `laminard` communicate by default over an [abstract unix socket](http://man7.org/linux/man-pages/man7/unix.7.html). This means that any user **on the same machine** can send commands to the laminar service.
On a trusted network, you might want `laminard` to listen for commands on a TCP port instead. To achieve this, in `/etc/laminar.conf`, set
```
LAMINAR_BIND_RPC=*:9997
```
or any interface/port combination you like. This option uses the same syntax as `LAMINAR_BIND_HTTP`.
Then, point `laminarc` to the new location using an environment variable:
```bash
LAMINAR_HOST=192.168.1.1:9997 laminarc queue example
```
If you need more flexibility, consider running the communication channel as a regular unix socket. Setting
```
LAMINAR_BIND_RPC=unix:/var/run/laminar.sock
```
or similar path in `/etc/laminar.conf` will result in a socket with group read/write permissions (`660`), so any user in the `laminar` group can queue a job.
This can be securely and flexibly combined with remote triggering using `ssh`. There is no need to allow the client full shell access to the server machine, the ssh server can restrict certain users to certain commands (in this case `laminarc`). See [the authorized_keys section of the sshd man page](https://man.openbsd.org/sshd#AUTHORIZED_KEYS_FILE_FORMAT) for further information.
## Triggering on a push to GitHub
Consider using [webhook](https://github.com/adnanh/webhook) or a similar application to call `laminarc`.
## Viewing job logs
A job's console output can be viewed on the Web UI at http://localhost:8080/jobs/$NAME/$NUMBER.
Additionally, the raw log output may be fetched over a plain HTTP request to http://localhost:8080/log/$NAME/$NUMBER. The response will be chunked, allowing this mechanism to also be used for in-progress jobs. Furthermore, the special endpoint http://localhost:8080/log/$NAME/latest will redirect to the most recent log output. Be aware that the use of this endpoint may be subject to races when new jobs start.
---
# Job chains
A typical pipeline may involve several steps, such as build, test and deploy. Depending on the project, these may be broken up into separate laminar jobs for maximal flexibility.
The preferred way to accomplish this in Laminar is to use the same method as [regular run triggering](#Triggering-a-run), that is, calling `laminarc` directly in your `example.run` scripts.
```bash
#!/bin/bash -xe
# simultaneously starts example-test-qemu and example-test-target
# and returns a non-zero error code if either of them fail
laminarc run example-test-qemu example-test-target
```
An advantage to using this `laminarc` approach from bash or other scripting language is that it enables highly dynamic pipelines, since you can execute commands like
```bash
if [ ... ]; then
laminarc run example-downstream-special
else
laminarc run example-downstream-regular
fi
laminarc run example-test-$TARGET_PLATFORM
```
`laminarc` reads the `$JOB` and `$RUN` variables set by `laminard` and passes them as part of the queue/start/run request so the dependency chain can always be traced back.
---
# Parameterized runs
Any argument passed to `laminarc` of the form `var=value` will be exposed as an environment variable in the corresponding build scripts. For example:
```bash
laminarc queue example foo=bar
```
In `/var/lib/laminar/cfg/jobs/example.run`:
```bash
#!/bin/bash
if [ "$foo" == "bar" ]; then
...
else
...
fi
```
---
# Pre- and post-build actions
If the script `/var/lib/laminar/cfg/jobs/example.before` exists, it will be executed as part of the `example` job, before the primary `/var/lib/laminar/cfg/jobs/example.run` script.
Similarly, if the script `/var/lib/laminar/cfg/jobs/example.after` script exists, it will be executed as part of the `example` job, after the primary `var/lib/laminar/cfg/jobs/example.run` script. In this script, the `$RESULT` variable will be `success`, `failed`, or `aborted` according to the result of `example.run`.
See also [script execution order](#Script-execution-order)
## Conditionally trigger a downstream job
Often, you may wish to only trigger the `example-test` job if the `example-build` job completed successfully. `example-build.after` might look like this:
```bash
#!/bin/bash -xe
if [ "$RESULT" == "success" ]; then
laminarc queue example-test
fi
```
## Passing data between scripts
Any script can set environment variables that will stay exposed for subsequent scripts of the same run using `laminarc set`. In `example.before`:
```bash
#!/bin/bash
laminarc set foo=bar
```
Then in `example.run`
```bash
#!/bin/bash
echo $foo # prints "bar"
```
---
# Archiving artefacts
Laminar's default behaviour is to remove the run directory `/var/lib/laminar/run/JOB/RUN` after its completion. This prevents the typical CI disk usage explosion and encourages the user to judiciously select artefacts for archive.
Laminar provides an archive directory `/var/lib/laminar/archive/JOB/RUN` and exposes its path in `$ARCHIVE`. `example-build.after` might look like this:
```bash
#!/bin/bash -xe
cp example.out $ARCHIVE/
```
This folder structure has been chosen to make it easy for system administrators to host the archive on a separate partition or network drive.
## Accessing artefacts from an upstream build
Rather than implementing a separate mechanism for this, the path of the upstream's archive should be passed to the downstream run as a parameter. See [Parameterized runs](#Parameterized-runs).
---
# Email and IM Notifications
As well as per-job `.after` scripts, a common use case is to send a notification for every job completion. If the global `after` script at `/var/lib/laminar/cfg/after` exists, it will be executed after every job. One way to use this might be:
```bash
#!/bin/bash -xe
if [ "$RESULT" != "$LAST_RESULT" ]; then
sendmail -t <<EOF
To: engineering@company.com
Subject: Laminar $JOB #$RUN: $RESULT
From: laminar-ci@company.com
Laminar $JOB #$RUN: $RESULT
EOF
fi
```
Of course, you can make this as pretty as you like. A [helper script](#Helper-scripts) can be a good choice here.
If you want to send to different addresses depending on the job, replace `engineering@company.com` above with a variable, e.g. `$RECIPIENTS`, and set `RECIPIENTS=nora@company.com,joe@company.com` in `/var/lib/laminar/cfg/jobs/JOB.env`. See [Environment variables](#Environment-variables).
You could also update the `$RECIPIENTS` variable dynamically based on the build itself. For example, if your run script accepts a parameter `$rev` which is a git commit id, as part of your job's `.after` script you could do the following:
```bash
author_email=$(git show -s --format='%ae' $rev)
laminarc set RECIPIENTS $author_email
```
See [examples/notify-email-pretty](https://github.com/ohwgiles/laminar/blob/master/examples/notify-email-pretty) and [examples/notify-email-text-log](https://github.com/ohwgiles/laminar/blob/master/examples/notify-email-text-log).
---
# Helper scripts
The directory `/var/lib/laminar/cfg/scripts` is automatically prepended to the `PATH` of all runs. It is a convenient place to drop executables or scripts to help keep individual job scripts clean and concise. A simple example might be `/var/lib/laminar/cfg/scripts/success_trigger`:
```bash
#!/bin/bash -e
if [ "$RESULT" == "success" ]; then
laminarc queue "$@"
fi
```
With this in place, any `.after` script can conditionally trigger a downstream job more succinctly:
```bash
success_trigger example-test
```
Another excellent candidate for helper scripts is automatically sending notifications on job status change.
---
# Data sharing and Workspaces
Often, a job will require a (relatively) large block of (relatively) unchanging data. Examples are a git repository with a long history, or static asset files. Instead of fetching everything from scratch for every run, a job may make use a *workspace*, a per-job folder that is reused between builds.
For example, the following script creates a tarball containing both compiled output and some static asset files from the workspace:
```bash
#!/bin/bash -ex
git clone /path/to/sources .
make
# Use a hardlink so the arguments to tar will be relative to the CWD
ln $WORKSPACE/StaticAsset.bin ./
tar zc a.out StaticAsset.bin > MyProject.tar.gz
# Archive the artefact (consider moving this to the .after script)
mv MyProject.tar.gz $ARCHIVE/
```
For a project with a large git history, it can be more efficient to store the sources in the workspace:
```bash
#!/bin/bash -ex
cd $WORKSPACE/myproject
git pull
cd -
cmake $WORKSPACE/myproject
make -j4
```
Laminar will automatically create the workspace for a job if it doesn't exist when a job is executed. In this case, the `/var/lib/laminar/cfg/jobs/JOBNAME.init` will be executed if it exists. This is an excellent place to prepare the workspace to a state where subsequent builds can rely on its content:
```bash
#!/bin/bash -e
echo Initializing workspace
git clone git@example.com:company/project.git .
```
**CAUTION**: By default, laminar permits multiple simultaneous runs of the same job. If a job can **modify** the workspace, this might result in inconsistent builds when simultaneous runs access the same content. This is unlikely to be an issue for nightly builds, but for SCM-triggered builds it will be. To solve this, use [contexts](#Contexts) to restrict simultaneous execution of jobs, or consider [flock](https://linux.die.net/man/1/flock).
The following example uses [flock](https://linux.die.net/man/1/flock) to efficiently share a git repository workspace between multiple simultaneous builds:
```bash
#!/bin/bash -xe
# This script expects to be passed the parameter 'rev' which
# should refer to a specific git commit in its source repository.
# The commit ids could have been read from a server-side
# post-commit git hook, where many commits could have been pushed
# at once, but we want to check them all individually. This means
# this job can be executed several times (with different values
# for $rev) simultaneously.
# Locked subshell for modifying the workspace
(
flock 200
cd $WORKSPACE
# Download all the latest commits
git fetch
git checkout $rev
cd -
# Fast copy (hard-link) the source from the specific checkout
# to the build dir. This relies on the fact that git unlinks
# during checkout, effectively implementing copy-on-write.
cp -al $WORKSPACE/src src
) 200>$WORKSPACE
# run the (much longer) regular build process
make -C src
```
---
# Aborting running jobs
## After a timeout
To configure a maximum execution time in seconds for a job, add a line to `/var/lib/laminar/cfg/jobs/JOBNAME.conf`:
```
TIMEOUT=120
```
## Manually
`laminarc abort $JOBNAME $NUMBER`
---
# Contexts
In Laminar, each run of a job is associated with a context. The context defines an integer number of *executors*, which is the amount of runs which the context will accept simultaneously. A context may also provide additional environment variables.
Uses for this feature include limiting the amount of concurrent CPU-intensive jobs (such as compilation); and controlling access to jobs [executed remotely](#Remote-jobs).
If no contexts are defined, Laminar will behave as if there is a single context named "default", with `6` executors. This is a reasonable default that allows simple setups to work without any consideration of contexts.
## Defining a context
To create a context named "my-env" which only allows a single run at once, create `/var/lib/laminar/cfg/contexts/my-env.conf` with the content:
```
EXECUTORS=1
```
## Associating a job with a context
When trying to start a job, laminar will wait until the job can be matched to a context which has at least one free executor. There are two ways to associate jobs and contexts. You can specify a comma-separated list of patterns `JOBS` in the context configuration file `/var/lib/laminar/cfg/contexts/CONTEXT.conf`:
```
JOBS=amd64-target-*,usage-monitor
```
This approach is often preferred when you have many jobs that need to share limited resources.
Alternatively, you can set
```
CONTEXTS=my-env-*,special_context
```
in `/var/lib/laminar/cfg/jobs/JOB.conf`. This approach is often preferred when you have a small number of jobs that require exclusive access to an environment and you can supply alternative environments (e.g. target devices), because new contexts can be added without modifying the job configuration.
In both cases, Laminar will iterate over the known contexts and associate the run with the first matching context with free executors. Patterns are [glob expressions](http://man7.org/linux/man-pages/man7/glob.7.html).
If `CONTEXTS` is empty or absent (or if `JOB.conf` doesn't exist), laminar will behave as if `CONTEXTS=default` were defined.
## Adding environment to a context
Append desired environment variables to `/var/lib/laminar/cfg/contexts/CONTEXT_NAME.env`:
```
DUT_IP=192.168.3.2
FOO=bar
```
This environment will then be available the run script of jobs associated with this context. Note that these definitions are not expanded by a shell, so `FOO="bar"` would result in a variable `FOO` whose contents *include* double-quotes.
---
# Remote jobs
Laminar provides no specific support, `bash`, `ssh` and possibly NFS are all you need. For example, consider two identical target devices on which test jobs can be run in parallel. You might create a [context](#Contexts) for each, `/var/lib/laminar/cfg/contexts/target{1,2}.conf`:
```
EXECUTORS=1
```
In each context's `.env` file, set the individual device's IP address:
```
TARGET_IP=192.168.0.123
```
And mark the job accordingly in `/var/lib/laminar/cfg/jobs/myproject-test.conf`:
```
CONTEXTS=target*
```
This means the job script `/var/lib/laminar/cfg/jobs/myproject-test.run` can be generic:
```bash
#!/bin/bash -e
ssh root@$TARGET_IP /bin/bash -xe <<"EOF"
uname -a
...
EOF
scp root@$TARGET_IP:result.xml "$ARCHIVE/"
```
Don't forget to add the `laminar` user's public ssh key to the remote's `authorized_keys`.
---
# Docker container jobs
Laminar provides no specific support, but just like [remote jobs](#Remote-jobs) these are easily implementable in plain bash:
```bash
#!/bin/bash
docker run --rm -ti -v $PWD:/root ubuntu /bin/bash -xe <<EOF
git clone http://...
...
EOF
```
For more advanced usage, see [examples/docker-advanced](https://github.com/ohwgiles/laminar/blob/master/examples/docker-advanced)
---
# Colours in log output
Laminar's frontend supports ANSI colours using the [ansi-up library](https://github.com/drudru/ansi_up). Unfortunately, there is no standard way of convincing applications to output colours when not connected to a tty. It is recommended to set [CLICOLOR_FORCE=1](https://bixense.com/clicolors/) in Laminar's [global environment file](#Environment-variables), plus any of the following environment variables that may be relevant (please submit more):
* git: `GIT_CONFIG_PARAMETERS='color.status=always' 'color.ui=always'`
* google test: `GTEST_COLOR=1`
* grep: `GREP_OPTIONS=--color=always`
More intrusive options for other common tools which do not support enabling colours via environment variable:
* gcc and clang: Add `-fdiagnostics-color=always` to compile flags
---
# Customizing the WebUI
## Organising jobs into groups
*Groups* may be used to organise the "Jobs" page into tabs. Edit `/var/lib/laminar/cfg/groups.conf` and define the matched jobs as a [javascript regular expression](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), for example:
```
Builds=compile-\w+
My Fav Jobs=^(target-foo-(build|deploy)|run-benchmarks)$
All=.*
```
Changes to this file are detected immediately and will be visible on next page refresh.
## Adding a description to a job
Edit `/var/lib/laminar/cfg/jobs/$JOBNAME.conf`:
```
DESCRIPTION=Anything here will appear on the job page in the frontend <em>unescaped</em>.
```
## Setting the page title
Change `LAMINAR_TITLE` in `/etc/laminar.conf` to your preferred page title. Laminar must be restarted for this change to take effect.
## Custom HTML template
If it exists, the file `/var/lib/laminar/custom/index.html` will be served by laminar instead of the default markup that is bundled into the Laminar binary. This file can be used to change any aspect of Laminar's WebUI, for example adding menu links or adding a custom stylesheet. Any required assets will need to be served directly from your [HTTP reverse proxy](#Service-configuration) or other HTTP server.
An example customization can be found at [cweagans/semantic-laminar-theme](https://github.com/cweagans/semantic-laminar-theme).
---
# Badges
Laminar will serve a job's current status as a pretty badge at the url `/badge/JOBNAME.svg`. This can be used as a link to your server instance from your Github README.md file or cat blog:
```
<a href="https://my-example-laminar-server.com/jobs/my-project">
<img src="https://my-example-laminar-server.com/badge/my-project.svg">
</a>
```
---
# Reference
## Service configuration file
`laminard` reads the following variables from the environment, which are expected to be sourced by `systemd` from `/etc/laminar.conf`:
- `LAMINAR_HOME`: The directory in which `laminard` should find job configuration and create run directories. Default `/var/lib/laminar`
- `LAMINAR_BIND_HTTP`: The interface/port or unix socket on which `laminard` should listen for incoming connections to the web frontend. Default `*:8080`
- `LAMINAR_BIND_RPC`: The interface/port or unix socket on which `laminard` should listen for incoming commands such as build triggers. Default `unix-abstract:laminar`
- `LAMINAR_TITLE`: The page title to show in the web frontend.
- `LAMINAR_KEEP_RUNDIRS`: Set to an integer defining how many rundirs to keep per job. The lowest-numbered ones will be deleted. The default is 0, meaning all run dirs will be immediately deleted.
- `LAMINAR_ARCHIVE_URL`: If set, the web frontend served by `laminard` will use this URL to form links to artefacts archived jobs. Must be synchronized with web server configuration.
## Script execution order
When `$JOB` is triggered, the following scripts (relative to `$LAMINAR_HOME/cfg`) may be executed:
- `jobs/$JOB.init` if the [workspace](#Data-sharing-and-Workspaces) did not exist
- `before`
- `jobs/$JOB.before`
- `jobs/$JOB.run`
- `jobs/$JOB.after`
- `after`
## Environment variables
The following variables are available in run scripts:
- `RUN` integer number of this *run*
- `JOB` string name of this *job*
- `RESULT` string run status: "success", "failed", etc.
- `LAST_RESULT` string previous run status
- `WORKSPACE` path to this job's workspace
- `ARCHIVE` path to this run's archive
- `CONTEXT` the context of this run
In addition, `$LAMINAR_HOME/cfg/scripts` is prepended to `$PATH`. See [helper scripts](#Helper-scripts).
Laminar will also export variables in the form `KEY=VALUE` found in these files:
- `env`
- `contexts/$CONTEXT.env`
- `jobs/$JOB.env`
Note that definitions in these files are not expanded by a shell, so `FOO="bar"` would result in a variable `FOO` whose contents *include* double-quotes.
Finally, variables supplied on the command-line call to `laminarc queue`, `laminarc start` or `laminarc run` will be available. See [parameterized runs](#Parameterized-runs)
## laminarc
`laminarc` commands are:
- `queue [JOB [PARAMS...]]...` adds one or more jobs to the queue with optional parameters, returning immediately.
- `start [JOB [PARAMS...]]...` starts one or more jobs with optional parameters, returning when the jobs begin execution.
- `run [JOB [PARAMS...]]...` triggers one or more jobs with optional parameters and waits for the completion of all jobs.
- `--next` may be passed before `JOB` in order to place the job at the front of the queue instead of at the end.
- `set [VARIABLE=VALUE]...` sets one or more variables to be exported in subsequent scripts for the run identified by the `$JOB` and `$RUN` environment variables
- `show-jobs` shows the known jobs on the server (`$LAMINAR_HOME/cfg/jobs/*.run`).
- `show-running` shows the currently running jobs with their numbers.
- `show-queued` shows the names of the jobs waiting in the queue.
- `abort JOB NUMBER` manually aborts a currently running job by name and number.
`laminarc` connects to `laminard` using the address supplied by the `LAMINAR_HOST` environment variable. If it is not set, `laminarc` will first attempt to use `LAMINAR_BIND_RPC`, which will be available if `laminarc` is executed from a script within `laminard`. If neither `LAMINAR_HOST` nor `LAMINAR_BIND_RPC` is set, `laminarc` will assume a default host of `unix-abstract:laminar`.
All commands return zero on success or a non-zero code if the command could not be executed. `laminarc run` will return a non-zero exit status if any executed job failed.

View File

@ -1,45 +0,0 @@
FROM alpine:edge
EXPOSE 8080
LABEL org.label-schema.name="laminar" \
org.label-schema.description="Fast and lightweight Continuous Integration" \
org.label-schema.usage="/usr/doc/UserManual.md" \
org.label-schema.url="https://laminar.ohwg.net" \
org.label-schema.vcs-url="https://github.com/ohwgiles/laminar" \
org.label-schema.schema-version="1.0" \
org.label-schema.docker.cmd="docker run -d -p 8080:8080 laminar"
RUN apk add --no-cache -X http://dl-3.alpinelinux.org/alpine/edge/testing/ \
sqlite-dev \
zlib \
capnproto \
tini
ADD UserManual.md /usr/doc/
ADD . /build/laminar
RUN apk add --no-cache --virtual .build -X http://dl-3.alpinelinux.org/alpine/edge/testing/ \
build-base \
cmake \
capnproto-dev \
boost-dev \
zlib-dev \
rapidjson-dev && \
cd /build/laminar && \
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr && \
make -j4 && \
make install && \
apk del .build && \
rm -rf /build
# Create laminar system user in "users" group
RUN adduser -SDh /var/lib/laminar -g 'Laminar' -G users laminar
# Set the working directory to the laminar user's home
WORKDIR /var/lib/laminar
# Run the preceeding as the user laminar
USER laminar
ENTRYPOINT [ "/sbin/tini", "--" ]
CMD [ "laminard" ]

View File

@ -1,64 +0,0 @@
###
### LAMINAR_HOME
###
### Root location containing laminar configuration, database,
### build workspaces and archive.
###
### Default: /var/lib/laminar
###
#LAMINAR_HOME=/var/lib/laminar
### LAMINAR_BIND_HTTP
###
### Interface on which laminard will bind to serve the Web UI.
### May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME
###
### Default: *:8080
###
#LAMINAR_BIND_HTTP=*:8080
### LAMINAR_BIND_RPC
###
### Interface on which laminard will bind to accept RPC from laminarc.
### May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME
###
### Default: unix-abstract:laminar
#LAMINAR_BIND_RPC=unix-abstract:laminar
###
### LAMINAR_TITLE
###
### Page title to show in web frontend
###
#LAMINAR_TITLE=
###
### LAMINAR_KEEP_RUNDIRS
###
### Setting this prevents the immediate deletion of job rundirs
### $LAMINAR_HOME/run/$JOB/$RUN. Value should be an integer represeting
### the number of rundirs to keep.
###
### Default: 0
###
#LAMINAR_KEEP_RUNDIRS=0
###
### LAMINAR_BASE_URL
###
### Base url for the frontend. This affects the <base href> tag and needs
### to be set if Laminar runs behind a reverse-proxy that hosts Laminar
### within a subfolder (rather than at a subdomain root)
###
#LAMINAR_BASE_URL=/
###
### LAMINAR_ARCHIVE_URL
###
### Base url used to request artifacts. Laminar can serve build
### artifacts (and it will if you leave this unset), but it
### uses a very naive and inefficient method. Best to let a real
### webserver handle serving those requests.
###
#LAMINAR_ARCHIVE_URL=http://backbone.example.com/ci/archive/

View File

@ -1,28 +0,0 @@
# Bash completion file for laminarc
# vim: ft=sh
_laminarc() {
local cur prev words cword
_init_completion || return
if [ "$cword" -gt 1 ]; then
case "${words[1]}" in
queue|start|run)
if [ "$cword" -eq 2 ]; then
COMPREPLY+=($(compgen -W "$(laminarc show-jobs)" -- ${cur}))
fi
;;
abort)
if [ "$cword" -eq 2 ]; then
COMPREPLY+=($(compgen -W "$(laminarc show-running | cut -d : -f 1)" -- ${cur}))
elif [ "$cword" -eq 3 ]; then
COMPREPLY+=($(compgen -W "$(laminarc show-running | cut -d : -f 2)" -- ${cur}))
fi
;;
esac
else
local cmds="queue start run set show-jobs show-queued show-running abort"
COMPREPLY+=($(compgen -W "${cmds}" -- ${cur}))
fi
}
complete -F _laminarc laminarc

View File

@ -1,34 +0,0 @@
#compdef laminarc
#autoload
_laminarc() {
if (( CURRENT == 2 )); then
_values "Operation" \
"queue" \
"start" \
"run" \
"set" \
"show-jobs" \
"show-queued" \
"show-running" \
"abort"
else
case "${words[2]}" in
queue|start|run)
if (( CURRENT == 3 )); then
_values "Jobs" $(laminarc show-jobs)
fi
;;
abort)
if (( CURRENT == 3 )); then
_values "Jobs" $(laminarc show-running | cut -d : -f 1)
elif (( CURRENT == 4 )); then
_values "Runs" $(laminarc show-running | cut -d : -f 2)
fi
;;
esac
fi
}
_laminarc
# vim: ft=zsh

View File

@ -1,74 +0,0 @@
.Dd Apr 04, 2019
.Dt LAMINARC 1
.Sh NAME
.Nm laminarc
\-
Laminar CI client application
.Sh SYNOPSIS
.Nm laminarc Li queue \fIJOB\fR [\fIPARAM=VALUE...\fR] ...
.Nm laminarc Li start \fIJOB\fR [\fIPARAM=VALUE...\fR] ...
.Nm laminarc Li run \fIJOB\fR [\fIPARAM=VALUE...\fR] ...
.Nm laminarc Li set \fIPARAM=VALUE...\fR
.Nm laminarc Li show-jobs
.Nm laminarc Li show-running
.Nm laminarc Li show-queued
.Nm laminarc Li abort \fIJOB\fR \fINUMBER\fR
.Sh DESCRIPTION
The
.Nm laminarc
program connects to a Laminar server and perform one of following operations:
.Bl -tag
.It Sy queue
adds job(s) (with optional parameters) to the queue and returns immediately.
.It Sy start
adds job(s) (with optional parameters) to the queue and returns when the jobs
begin execution.
.It Sy run
adds job(s) (with optional parameters) to the queue and returns when the jobs
complete execution. The exit code will be non-zero if any of the runs does
not complete successfully.
.It \t
\fB--next\fR may be passed to \fBqueue\fR, \fBstart\fR or \fBrun\fR in order
to place the job at the front of the queue instead of at the end.
.It Sy set
sets one or more parameters to be exported as environment variables in subsequent
scripts for the run identified by the $JOB and $RUN environment variables.
This is primarily intended for use from within a job execution, where those
variables are already set by the server.
.It Sy show-jobs
list jobs known to the server.
.It Sy show-running
list the currently running jobs with their numbers.
.It Sy show-queued
list the names and numbers of the jobs waiting in the queue.
.It Sy abort
manually abort a currently running job by name and number.
.El
.Pp
The laminar server to connect to is read from the
.Ev LAMINAR_HOST
environment variable. If empty, it falls back to
.Ev LAMINAR_BIND_RPC
and finally defaults to
.Ad unix-abstract:laminar
.Sh ENVIRONMENT
.Bl -tag
.It Ev LAMINAR_HOST
address of server to connect. May be of the form
.Ad IP:PORT,
.Ad unix:PATH/TO/SOCKET
or
.Ad unix-abstract:NAME
.It Ev LAMINAR_BIND_RPC
fallback server address variable. It is set by
.Nm laminard
during execution of scripts.
.El
.Sh SEE ALSO
.Xr laminard 8
.Sh AUTHORS
.An Oliver Giles
created Laminar CI.
.An Dmitry Bogatov
created this manual page for the Debian project (but it can be used
by others).

View File

@ -1,56 +0,0 @@
.Dd Apr 03, 2019
.Dt LAMINARD 1
.Sh NAME
.Nm laminard
\-
Laminar CI server
.Sh SYNOPSIS
.Nm laminard Op Fl v
.Sh DESCRIPTION
Start Laminar CI server in the foreground. If option
.Fl v
is specified, verbose logging is enabled. Other aspects of
operation are controlled by environment variables.
.Sh ENVIRONMENT
.Bl -tag
.It Ev LAMINAR_HOME
Root location containing laminar configuration, database, build
workspaces and archive.
.Pp
Default: /var/lib/laminar
.It Ev LAMINAR_BIND_HTTP
Interface on which laminard will bind to serve the Web UI.
May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME
.Pp
Default: *:8080
.It Ev LAMINAR_BIND_HRPC
Interface on which laminard will bind to accept RPC from laminarc.
May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME
.Pp
Default: unix-abstract:laminar
.It Ev LAMINAR_TITLE
Page title to show in web frontend
.It Ev LAMINAR_KEEP_RUNDIRS
Setting this prevents the immediate deletion of job rundirs
$LAMINAR_HOME/run/$JOB/$RUN. Value should be an integer represeting
the number of rundirs to keep.
.Pp
Default: 0
.It Ev LAMINAR_ARCHIVE_URL
Base url used to request artifacts. Laminar can serve build artifacts
(and it will if you leave this unset), but it uses a very naive and
inefficient method. Best to let a real webserver handle serving those
requests.
.El
.Sh FILES
.Bl -tag
.It Pa /etc/laminar.conf
Variable assignments in this file are exported by systemd or other
init system before launching the system-wide installation of Laminar.
.El
.Sh AUTHORS
.An Oliver Giles
created Laminar CI.
.An Dmitry Bogatov
created this manual page for Debian project (but it can be used
by others).

View File

@ -0,0 +1,21 @@
#!/bin/bash
cat <<EOF
<!doctype html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Laminar</title>
<link href="/css/bootstrap.min.css" rel="stylesheet">
</head>
<body>
<div class="col-xs-12">
<h2>Test report for $lJobName #$lBuildNum</h2>
EOF
xsltproc "$(dirname ${BASH_SOURCE[0]})/testreport.xsl" "$1";
cat <<EOF
</div>
</body>
</html>
EOF

View File

@ -0,0 +1,42 @@
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="no"/>
<xsl:template match="testsuite">
<h3>Test Suite: <xsl:value-of select="@name" /></h3>
<dl class="dl-horizontal">
<dt>Tests run:</dt><dd><xsl:value-of select="@tests" /></dd>
<dt>Failures:</dt><dd><xsl:value-of select="@failures" /></dd>
<dt>Errors:</dt><dd><xsl:value-of select="@errors" /></dd>
<dt>Elapsed time:</dt><dd><xsl:value-of select="@time" /></dd>
</dl>
<ul class="list-group">
<xsl:apply-templates select="testcase" />
</ul>
<xsl:apply-templates select="system-out" />
<xsl:apply-templates select="system-err" />
</xsl:template>
<xsl:template match="testcase">
<xsl:choose>
<xsl:when test="*">
<li class="list-group-item list-group-item-danger"><xsl:value-of select="@name" />
<xsl:apply-templates select="failure" />
</li>
</xsl:when>
<xsl:otherwise>
<li class="list-group-item list-group-item-success"><xsl:value-of select="@name" /></li>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="failure">
<pre>
<xsl:value-of select="@message"/>
</pre>
</xsl:template>
<xsl:template match="system-out"><h5>Standard output:</h5><pre><xsl:value-of select="." /></pre></xsl:template>
<xsl:template match="system-err"><h5>Standard error:</h5><pre><xsl:value-of select="." /></pre></xsl:template>
</xsl:stylesheet>

View File

@ -1,53 +0,0 @@
#!/bin/bash -eu
# Any failing command in a pipe will cause an error, instead
# of just an error in the last command in the pipe
set -o pipefail
# Log commands executed
set -x
# Simple way of getting the docker build tag:
tag=$(docker build -q - <<\EOF
FROM debian:bookworm
RUN apt-get update && apt-get install -y build-essential
EOF
)
# But -q suppresses the log output. If you want to keep it,
# you could use the following fancier way:
exec {pfd}<><(:) # get a new pipe
docker build - <<\EOF |
FROM debian:bookworm
RUN apt-get update && apt-get install -y build-essential
EOF
tee >(awk '/Successfully built/{print $3}' >&$pfd) # parse output to pipe
read tag <&$pfd # read tag back from pipe
exec {pfd}<&- # close pipe
# Alternatively, you can use the -t option to docker build
# to give the built image a name to refer to later. But then
# you need to ensure that it does not conflict with any other
# images, and handle cases where multiple instances of the
# job attempt to update the tagged image.
# If you want the image to be cleaned up on exit:
trap "docker rmi $tag" EXIT
# Now use the image to build something:
docker run -i --rm \
-v "$PWD:$PWD" \
-w "$PWD" \
-u $(id -u):$(id -g) \
$tag /bin/bash -eux \
<<EOF
# The passed options mean we keep our current working
# directory and user, so no permission problems on the
# artifacts produced within the container.
echo 'main(){puts("hello world");}' | gcc -x c -static -o hello -
EOF
# Test the result
./hello

View File

@ -1,117 +0,0 @@
#!/bin/bash
# This example script takes the XML output of a gtest run and formats
# it as HTML. It can easily be adapted to other XML test output formats
# such as JUnit or CTest
# Prepare xml with ./path/to/test --gtest_output=xml:path/to/output.xml
# Usage: format-test-results test_result.xml > output.html
if [ ! -f "$1" ]; then
echo "File not found: \"$1\""
exit 1
fi
xsltproc --stringparam JOB $JOB --stringparam RUN $RUN <(cat <<\EOF
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="yes" />
<xsl:template match="/">
<html>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test report for <xsl:value-of select="$JOB" /> #<xsl:value-of select="$RUN" /></title>
<style>
body { font-family: Helvetica Neue, Helvetica, Arial, sans-serif; }
label:hover { cursor: pointer; }
label:before { content: '+ ' }
input[type=checkbox].toggle-collapse { display: none; }
input[type=checkbox].toggle-collapse~.collapsible { display: none; }
input[type=checkbox].toggle-collapse:checked~.collapsible { display: block; }
code { white-space: pre-wrap; color: white; }
.failure { background-color: #c73030; color: white; }
table { min-width: 720px; border-collapse: collapse; }
td, th { padding: 5px; }
tr:nth-child(even) td { padding: 0; }
tr:nth-child(even) { border-bottom: 1px solid #b3abab; }
.testcase { padding: 5px; }
.testcase.success:before { content: '✔ '; color: green; }
</style>
</head>
<body>
<h1><xsl:value-of select="$JOB" /> #<xsl:value-of select="$RUN" /></h1>
<h2>Test Report</h2>
<table>
<thead>
<tr>
<th>Suite</th>
<th>Tests run</th>
<th>Failures</th>
<th>Errors</th>
<th>Elapsed time</th>
</tr>
</thead>
<xsl:apply-templates select="testsuites" />
</table>
</body>
</html>
</xsl:template>
<xsl:template match="testsuite">
<xsl:variable name="result">
<xsl:choose>
<xsl:when test="(@failures &gt; 0) or (@errors &gt; 0)">failure</xsl:when>
<xsl:otherwise>success</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<tr class="{$result}">
<td>
<label for="suite-{@name}"><xsl:value-of select="@name" /></label>
</td>
<td><xsl:value-of select="@tests" /></td>
<td><xsl:value-of select="@failures" /></td>
<td><xsl:value-of select="@errors" /></td>
<td><xsl:value-of select="@time" /></td>
</tr>
<tr class="toggle-target">
<td colspan="5">
<input class="toggle-collapse" id="suite-{@name}" type="checkbox" />
<div class="collapsible" style="padding-left: 15px;">
<xsl:apply-templates select="testcase" />
</div>
</td>
</tr>
</xsl:template>
<xsl:template match="testcase">
<xsl:choose>
<!-- has child nodes? -->
<xsl:when test="*">
<div class="testcase failure">
<xsl:value-of select="@name" /><br />
<xsl:apply-templates select="failure" />
</div>
</xsl:when>
<xsl:otherwise>
<div class="testcase success">
<xsl:value-of select="@name" />
</div>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="failure">
<div style="padding: 5px; background-color: #313235">
<code>
<xsl:value-of select="@message"/>
</code>
</div>
</xsl:template>
</xsl:stylesheet>
EOF
) "$1"

View File

@ -1,42 +0,0 @@
#!/bin/bash -e
# Simple post-receive hook that triggers a laminar run
# for every commit pushed to every branch, and annotates
# the commit with the run number using a git note.
# On the cloned repository, useful config is
# git config --add remote.origin.fetch "+refs/notes/*:refs/notes/*"
# to automatically fetch all notes from the origin, and
# git config --add notes.displayRef "refs/notes/*"
# to display all notes in the git log by default
# The laminar job to trigger
LAMINAR_JOB=my-project
# Default notes ref is refs/notes/commits
NOTES_REF=refs/notes/ci
# For each ref pushed...
while read old new ref; do
# Skip tags, notes, etc. Only do heads.
# Extend this to only trigger on specific branches.
if [[ $ref != refs/heads/* ]]; then
continue
fi
# Otherwise, for each new commit in the ref...
# (to only trigger on the newest, set commit=$new and delete the loop)
git rev-list $([[ $old =~ ^0+$ ]] && echo $new || echo $old..$new) | while read commit; do
# Queue the laminar run
run=$(laminarc queue $LAMINAR_JOB commit=$commit ref=$ref)
echo "Started Laminar $run for commit $commit to ref $ref"
# Add a git note about the run
blob=$(echo -n "Laminar-Run: $run" | git hash-object -w --stdin)
if last_note=$(git show-ref -s $NOTES_REF); then
git read-tree $last_note
p_arg=-p
fi
git update-index --add --cacheinfo 100644 $blob $commit
tree=$(git write-tree)
new_note=$(echo "Notes added by post-receive hook" | git commit-tree $tree $p_arg $last_note)
git update-ref $NOTES_REF $new_note $last_note
done
done

View File

@ -1,52 +0,0 @@
server {
listen [::]:80;
listen 80;
server_name laminar.example.com;
# rule for letsencrypt ACME challenge requests
location ^~ /.well-known/acme-challenge/ {
default_type "text/plain";
alias /srv/www/acme-challenge/;
}
# redirect all other http to https
return 301 https://$server_name$request_uri;
}
server {
# http2 is recommended because browsers will only open a small number of concurrent SSE streams over http1
listen [::]:443 ssl http2;
listen 443 ssl http2;
server_name laminar.example.com;
# modern tls only, see https://syslink.pl/cipherlist/ for a more complete example
ssl_protocols TLSv1.3;
ssl_ciphers EECDH+AESGCM:EDH+AESGCM;
# set according to ACME/letsencrypt client
ssl_certificate /path/to/certificate.crt;
ssl_certificate_key /path/to/private.key;
# use "location /" if laminar is to be accessible at the (sub)domain root.
# alteratively, use a subdirectory such as "location /my-laminar/" and ensure that
# LAMINAR_BASE_URL=/my-laminar/ accordingly.
location / {
# set proxy_pass according to LAMINAR_BIND_HTTP.
# note that the laminar default for LAMINAR_BIND_HTTP is *:8080, which binds on all interfaces
# instead of just the loopback device and is almost certainly not what you want if you are using
# a reverse proxy. It should be set to 127.0.0.1:8080 at a minimum, or use unix sockets for more
# fine-grained control of permissions.
# see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass
# and https://laminar.ohwg.net/docs.html#Running-on-a-different-HTTP-port-or-Unix-socket
proxy_pass http://127.0.0.1:8080/;
# required to allow laminar's SSE stream to pass correctly
proxy_http_version 1.1;
proxy_set_header Connection "";
}
# have nginx serve artefacts directly rather than having laminard do it
location /archive/ {
alias /var/lib/laminar/archive/;
}
}

View File

@ -1,55 +0,0 @@
#!/bin/bash -e
# IMPORTANT: change these to appropriate values, or fetch them, for example
# from the environment or from $(git show -s --format='%ae' $rev)
TO_EMAIL=engineering@example.com
FROM_EMAIL=laminar@example.com
LAMINAR_URL=${LAMINAR_BASE_URL:-http://localhost:8080}
LAMINAR_TITLE=${LAMINAR_TITLE:-Laminar CI}
if [[ $RESULT = "success" ]]; then
SVGICON=$(cat <<-EOF
<svg viewBox="0 0 100 100" width="24px">
<path fill="#74af77" d="m 23,46 c -6,0 -17,3 -17,11 0,8 9,30 12,32 3,2 14,5 20,-2 6,-6 24,-36
56,-71 5,-3 -9,-8 -23,-2 -13,6 -33,42 -41,47 -6,-3 -5,-12 -8,-15 z" />
</svg>
EOF
)
else
SVGICON=$(cat <<-EOF
<svg viewBox="0 0 100 100" width="24px">
<path fill="#883d3d" d="m 19,20 c 2,8 12,29 15,32 -5,5 -18,21 -21,26 2,3 8,15 11,18 4,-6 17,-21
21,-26 5,5 11,15 15,20 8,-2 15,-9 20,-15 -3,-3 -17,-18 -20,-24 3,-5 23,-26 30,-33 -3,-5 -8,-9
-12,-12 -6,5 -26,26 -29,30 -6,-8 -11,-15 -15,-23 -3,0 -12,5 -15,7 z" />
</svg>
EOF
)
fi
sendmail -t <<EOF
From: $FROM_EMAIL
To: $TO_EMAIL
Subject: $JOB #$RUN: $RESULT
Mime-Version: 1.0
Content-Type: text/html; charset=utf-8
<html lang="en">
<body bgcolor="#efefef" style="margin: 0; font-family: Helvetica Neue, Helvetica, Arial, sans-serif">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr><td align="center">
<table border="0" cellspacing="0" cellpadding="15" bgcolor="#ffffff">
<tr bgcolor="#2f3340">
<td style="font-size: 28px; color: #ffffff;">$LAMINAR_TITLE</td></tr>
<tr>
<td style="font-size: 26px">
$SVGICON
<a href="$LAMINAR_URL/jobs/$JOB/$RUN">$JOB #$RUN</a>
</td>
</tr>
</table>
</td></tr>
</table>
</body>
</html>
EOF

View File

@ -1,18 +0,0 @@
#!/bin/bash -e
# IMPORTANT: change these to appropriate values, or fetch them, for example
# from the environment or from $(git show -s --format='%ae' $rev)
TO_EMAIL=engineering@example.com
FROM_EMAIL=laminar@example.com
LAMINAR_URL=${LAMINAR_BASE_URL:-http://localhost:8080}
sendmail -t <<EOF
From: $FROM_EMAIL
To: $TO_EMAIL
Subject: $JOB #$RUN: $RESULT
Mime-Version: 1.0
Content-Type: text/plain; charset=utf-8
$(curl -s $LAMINAR_URL/log/$JOB/$RUN)
EOF

View File

@ -1,18 +0,0 @@
#!/bin/bash -e
# Sends a message from a specified bot to a specific telegram chat ID.
# See https://core.telegram.org/bots
# IMPORTANT: modify this to your real bot token and chat ID
BOT_TOKEN=123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
CHAT_ID=10000000
LAMINAR_URL=${LAMINAR_BASE_URL:-http://localhost:8080}
[[ $(curl -sS https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
-d chat_id=$CHAT_ID \
-d parse_mode=HTML \
-d text="<a href=\"$LAMINAR_URL/jobs/$JOB/$RUN\">$JOB #$RUN</a> $RESULT" \
| jq .ok) == true ]]

View File

@ -1,46 +0,0 @@
#!/usr/bin/env gnuplot
# Deeper insights can be obtained by querying Laminar's database directly.
# This example uses gnuplot to create a graph of the distribution of the
# average run time of jobs.
# The following will output a png...
set terminal pngcairo size 800,580 enhanced font 'Helvetica,10'
set output 'build-time-distribution.png'
# ..comment it out to use an interactive widget
# plot style
set tics font "Helvetica,10"
set title font "Helvetica,11"
set xtics nomirror
set ytics nomirror
set border 3 back lt 1 lc rgb "#808080"
set grid back lt 0 lc rgb "#d0d0d0" lw 0.5
set style line 1 lt 1 lc rgb "#7483af" lw 2
# Fetch the path to Laminar's sqlite database
db = system("echo $LAMINAR_HOME") . '/laminar.sqlite'
# Label the axes
set xtics ("<30s" 0, "30s-1m" 1, "1m-5m" 2, "5m-10m" 3, "10m-20m" 4, "20m-40m" 5, "40m-60m" 6, ">60m" 7)
set ylabel "Number of jobs"
set xlabel "Average run time"
set title "Distribution of average run times"
plot '< sqlite3 -separator $''\n'' ' . db . ' \
"WITH ba AS (SELECT name,AVG(completedAt-startedAt) a FROM builds GROUP BY name) SELECT \
COUNT(CASE WHEN a < 30 THEN 1 END), \
COUNT(CASE WHEN a >= 30 AND a < 60 THEN 1 END), \
COUNT(CASE WHEN a >= 60 AND a < 300 THEN 1 END), \
COUNT(CASE WHEN a >= 300 AND a < 600 THEN 1 END), \
COUNT(CASE WHEN a >= 600 AND a < 1200 THEN 1 END), \
COUNT(CASE WHEN a >= 1200 AND a < 2400 THEN 1 END), \
COUNT(CASE WHEN a >= 2400 AND a < 3600 THEN 1 END), \
COUNT(CASE WHEN a >= 3600 THEN 1 END) FROM ba;"' \
using 0:1 with linespoints title '' ls 1
# uncomment this if using an interactive window
#pause mouse close
# Release the output
set output

34
laminar.conf Normal file
View File

@ -0,0 +1,34 @@
###
### LAMINAR_HOME
###
### Root location containing laminar configuration, database,
### build workspaces and archive.
###
### Default: /var/lib/laminar
###
#LAMINAR_HOME=/var/lib/laminar
###
### LAMINAR_TITLE
###
### Page title to show in web frontend
###
#LAMINAR_TITLE=
###
### LAMINAR_KEEP_WORKDIR
###
### If set (to anything), the job workdir $LAMINAR_HOME/run/$JOB/$NUM
### will not be deleted after the run has completed
###
#LAMINAR_KEEP_WORKDIR=1
###
### LAMINAR_ARCHIVE_URL
###
### Base url used to request artifacts. Laminar can serve build
### artifacts (and it will if you leave this unset), but it
### uses a very naive and inefficient method. Best to let a real
### webserver handle serving those requests.
###
#LAMINAR_ARCHIVE_URL=http://backbone.example.com/ci/archive

View File

@ -1,13 +1,10 @@
[Unit] [Unit]
Description=Laminar continuous integration service Description=Laminar continuous integration service
After=network.target
Documentation=man:laminard(8)
Documentation=https://laminar.ohwg.net/docs.html
[Service] [Service]
User=laminar User=laminar
EnvironmentFile=-/etc/laminar.conf EnvironmentFile=-/etc/laminar.conf
ExecStart=@CMAKE_INSTALL_PREFIX@/sbin/laminard -v ExecStart=/usr/bin/laminard
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@ -1,83 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty | tr - .)~upstream_centos7
DOCKER_TAG=$(docker build -q - <<EOS
FROM centos:7
RUN yum -y install epel-release centos-release-scl && yum-config-manager --enable rhel-server-rhscl-7-rpms && yum -y install rpm-build cmake3 make devtoolset-7-gcc-c++ wget sqlite-devel boost-devel zlib-devel
EOS
)
docker run --rm -i -v $SOURCE_DIR:/root/rpmbuild/SOURCES/laminar-$VERSION:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
# for new gcc
export PATH=/opt/rh/devtoolset-7/root/usr/bin:\$PATH
mkdir /build
cd /build
wget -O capnproto.tar.gz https://github.com/capnproto/capnproto/archive/v0.7.0.tar.gz
wget -O rapidjson.tar.gz https://github.com/miloyip/rapidjson/archive/v1.1.0.tar.gz
md5sum -c <<EOF
a9de5f042f4cf05515c2d7dfc7f5df21 capnproto.tar.gz
badd12c511e081fec6c89c43a7027bce rapidjson.tar.gz
EOF
tar xzf capnproto.tar.gz
tar xzf rapidjson.tar.gz
cd /build/capnproto-0.7.0/c++/
cmake3 -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=off .
make -j4
make install
cd /build/rapidjson-1.1.0/
cmake3 -DRAPIDJSON_BUILD_EXAMPLES=off .
make install
cd
cat <<EOF > laminar.spec
Summary: Lightweight Continuous Integration Service
Name: laminar
Version: $VERSION
Release: 1
License: GPL
BuildRequires: systemd-units
Requires: sqlite zlib
%description
Lightweight Continuous Integration Service
%prep
%build
cmake3 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DSYSTEMD_UNITDIR=%{_unitdir} %{_sourcedir}/laminar-$VERSION
pwd
make
%install
%make_install
%files
%{_bindir}/laminarc
%{_sbindir}/laminard
%{_unitdir}/laminar.service
%config(noreplace) %{_sysconfdir}/laminar.conf
%{_datarootdir}/bash-completion/completions/laminarc
%{_datarootdir}/zsh/site-functions/_laminarc
%{_mandir}/man8/laminard.8.gz
%{_mandir}/man1/laminarc.1.gz
%post
echo Creating laminar user with home in %{_sharedstatedir}/laminar
useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar
mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: %{_sharedstatedir}/laminar
EOF
rpmbuild -ba laminar.spec
mv rpmbuild/RPMS/x86_64/laminar-$VERSION-1.x86_64.rpm /output/
EOS

View File

@ -1,48 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian11
DOCKER_TAG=$(docker build -q - <<EOS
FROM debian:11-slim
RUN apt-get update && apt-get install -y wget cmake g++ capnproto libcapnp-dev rapidjson-dev libsqlite3-dev libboost-dev zlib1g-dev
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix /laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: amd64
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-0.7.0, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_amd64.deb
EOS

View File

@ -1,64 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian11
DOCKER_TAG=$(docker build -q - <<EOS
FROM debian:11-slim
RUN dpkg --add-architecture armhf && apt-get update && apt-get install -y wget cmake crossbuild-essential-armhf capnproto libcapnp-dev:armhf rapidjson-dev libsqlite3-dev:armhf libboost-dev:armhf zlib1g-dev:armhf
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cat > toolchain.cmake <<EOF
SET(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
SET(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
set(CMAKE_LIBRARY_ARCHITECTURE arm-linux-gnueabihf)
EOF
cd /build
cmake \
-DCMAKE_TOOLCHAIN_FILE=toolchain.cmake \
-DCMAKE_LINKER=/usr/bin/arm-linux-gnueabihf-ld \
-DCMAKE_OBJCOPY=/usr/bin/arm-linux-gnueabihf-objcopy \
-DCMAKE_STRIP=/usr/bin/arm-linux-gnueabihf-strip \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLAMINAR_VERSION=$VERSION \
-DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix \
/laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: armhf
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-0.7.0, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_armhf.deb
EOS

View File

@ -1,50 +0,0 @@
#!/bin/bash -e
set -ex
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian12
DOCKER_TAG=$(docker build -q - <<EOS
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y wget cmake g++ capnproto libcapnp-dev rapidjson-dev libsqlite3-dev libboost-dev zlib1g-dev pkg-config
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix /laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: amd64
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-1.0.1, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_amd64.deb
EOS

View File

@ -1,50 +0,0 @@
#!/bin/bash -e
set -ex
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian13
DOCKER_TAG=$(docker build -q - <<EOS
FROM debian:trixie-slim
RUN apt-get update && apt-get install -y wget cmake g++ capnproto libcapnp-dev rapidjson-dev libsqlite3-dev libboost-dev zlib1g-dev pkg-config
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix /laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: amd64
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-1.0.1, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_amd64.deb
EOS

View File

@ -1,80 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty | tr - .)~upstream_rocky8
DOCKER_TAG=$(docker build -q - <<EOS
FROM rockylinux/rockylinux:8
RUN dnf -y update && dnf -y install rpm-build cmake make gcc-c++ wget sqlite-devel boost-devel zlib-devel
EOS
)
docker run --rm -i -v $SOURCE_DIR:/root/rpmbuild/SOURCES/laminar-$VERSION:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
wget -O capnproto.tar.gz https://github.com/capnproto/capnproto/archive/v0.7.0.tar.gz
wget -O rapidjson.tar.gz https://github.com/miloyip/rapidjson/archive/v1.1.0.tar.gz
md5sum -c <<EOF
a9de5f042f4cf05515c2d7dfc7f5df21 capnproto.tar.gz
badd12c511e081fec6c89c43a7027bce rapidjson.tar.gz
EOF
tar xzf capnproto.tar.gz
tar xzf rapidjson.tar.gz
cd /build/capnproto-0.7.0/c++/
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=off .
make -j4
make install
cd /build/rapidjson-1.1.0/
cmake -DRAPIDJSON_BUILD_EXAMPLES=off .
make install
cd
cat <<EOF > laminar.spec
Summary: Lightweight Continuous Integration Service
Name: laminar
Version: $VERSION
Release: 1
License: GPL
BuildRequires: systemd-units
Requires: sqlite-libs zlib
%description
Lightweight Continuous Integration Service
%prep
%build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DSYSTEMD_UNITDIR=%{_unitdir} %{_sourcedir}/laminar-$VERSION
pwd
make
%install
%make_install
%files
%{_bindir}/laminarc
%{_sbindir}/laminard
%{_unitdir}/laminar.service
%config(noreplace) %{_sysconfdir}/laminar.conf
%{_datarootdir}/bash-completion/completions/laminarc
%{_datarootdir}/zsh/site-functions/_laminarc
%{_mandir}/man8/laminard.8.gz
%{_mandir}/man1/laminarc.1.gz
%post
echo Creating laminar user with home in %{_sharedstatedir}/laminar
useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar
mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: %{_sharedstatedir}/laminar
EOF
rpmbuild -ba laminar.spec
mv rpmbuild/RPMS/x86_64/laminar-$VERSION-1.x86_64.rpm /output/
EOS

View File

@ -1,49 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-ubuntu2204
DOCKER_TAG=$(docker build -q - <<EOS
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y wget cmake g++ capnproto libcapnp-dev rapidjson-dev libsqlite3-dev libboost-dev zlib1g-dev pkg-config
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix /laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: amd64
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-0.8.0, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_amd64.deb
EOS

View File

@ -1,49 +0,0 @@
#!/bin/bash -e
OUTPUT_DIR=$PWD
SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..)
VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-ubuntu2404
DOCKER_TAG=$(docker build -q - <<EOS
FROM ubuntu:24.04
RUN apt-get update && apt-get install -y wget cmake g++ capnproto libcapnp-dev rapidjson-dev libsqlite3-dev libboost-dev zlib1g-dev pkg-config
EOS
)
docker run --rm -i -v $SOURCE_DIR:/laminar:ro -v $OUTPUT_DIR:/output $DOCKER_TAG bash -xe <<EOS
mkdir /build
cd /build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DZSH_COMPLETIONS_DIR=/usr/share/zsh/functions/Completion/Unix /laminar
make -j4
mkdir laminar
make DESTDIR=laminar install/strip
mkdir laminar/DEBIAN
cat <<EOF > laminar/DEBIAN/control
Package: laminar
Version: $VERSION
Section:
Priority: optional
Architecture: amd64
Maintainer: Oliver Giles <web ohwg net>
Depends: libcapnp-1.0.1, libsqlite3-0, zlib1g
Description: Lightweight Continuous Integration Service
EOF
echo /etc/laminar.conf > laminar/DEBIAN/conffiles
cat <<EOF > laminar/DEBIAN/postinst
#!/bin/bash
echo Creating laminar user with home in /var/lib/laminar
useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar
mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts}
chown -R laminar: /var/lib/laminar
EOF
chmod +x laminar/DEBIAN/postinst
dpkg-deb --build laminar
mv laminar.deb /output/laminar_${VERSION}_amd64.deb
EOS

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2022 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -17,22 +17,15 @@
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#include "laminar.capnp.h" #include "laminar.capnp.h"
#include "log.h"
#include <capnp/ez-rpc.h> #include <capnp/ez-rpc.h>
#include <kj/vector.h> #include <kj/vector.h>
#include <iostream>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <errno.h>
#define EXIT_BAD_ARGUMENT 1 #define EFAILED 55
#define EXIT_OPERATION_FAILED 2
#define EXIT_RUN_FAILED 3
// Definition needed for musl
typedef unsigned int uint;
template<typename T> template<typename T>
static int setParams(int argc, char** argv, T& request) { static int setParams(int argc, char** argv, T& request) {
@ -43,13 +36,20 @@ static int setParams(int argc, char** argv, T& request) {
n++; n++;
} }
char* job = getenv("JOB"); int argsConsumed = n;
char* num = getenv("RUN");
char* job = getenv("lJobName");
char* num = getenv("lBuildNum");
char* reason = getenv("LAMINAR_REASON"); char* reason = getenv("LAMINAR_REASON");
auto params = request.initParams(n + (job&&num?2:0) + (reason?1:0)); if(job && num) n+=2;
else if(reason) n++;
for(int i = 0; i < n; ++i) { if(n == 0) return argsConsumed;
auto params = request.initParams(n);
for(int i = 0; i < argsConsumed; ++i) {
char* name = argv[i]; char* name = argv[i];
char* val = strchr(name, '='); char* val = strchr(name, '=');
*val++ = '\0'; *val++ = '\0';
@ -57,199 +57,111 @@ static int setParams(int argc, char** argv, T& request) {
params[i].setValue(val); params[i].setValue(val);
} }
int argsConsumed = n;
if(job && num) { if(job && num) {
params[n].setName("=parentJob"); params[argsConsumed].setName("=parentJob");
params[n++].setValue(job); params[argsConsumed].setValue(job);
params[n].setName("=parentBuild"); params[argsConsumed+1].setName("=parentBuild");
params[n++].setValue(num); params[argsConsumed+1].setValue(num);
} } else if(reason) {
if(reason) { params[argsConsumed].setName("=reason");
params[n].setName("=reason"); params[argsConsumed].setValue(reason);
params[n].setValue(reason);
} }
return argsConsumed; return argsConsumed;
} }
static void printTriggerLink(const char* job, uint run) {
if(getenv("__LAMINAR_SETENV_PIPE")) {
// use a private ANSI CSI sequence to mark the JOB:NUM so the
// frontend can recognise it and generate a hyperlink.
printf("\033[{%s:%d\033\\\n", job, run);
} else {
// not called from within a laminar job, let's not confuse
// scripts with ANSI sequences.
printf("%s:%d\n", job, run);
}
}
static void usage(std::ostream& out) {
out << "laminarc version " << laminar_version() << "\n";
out << "Usage: laminarc [-h|--help] COMMAND\n";
out << " -h|--help show this help message\n";
out << "where COMMAND is:\n";
out << " queue JOB_LIST... queues one or more jobs for execution and returns immediately.\n";
out << " start JOB_LIST... queues one or more jobs for execution and blocks until it starts.\n";
out << " run JOB_LIST... queues one or more jobs for execution and blocks until it finishes.\n";
out << " JOB_LIST may be prepended with --next, in this case the job will\n";
out << " be pushed to the front of the queue instead of the end.\n";
out << " set PARAMETER_LIST... sets the given parameters as environment variables in the currently\n";
out << " running job. Fails if run outside of a job context.\n";
out << " abort NAME NUMBER aborts the run identified by NAME and NUMBER.\n";
out << " show-jobs lists all known jobs.\n";
out << " show-queued lists currently queued jobs.\n";
out << " show-running lists currently running jobs.\n";
out << "JOB_LIST is of the form:\n";
out << " [JOB_NAME [PARAMETER_LIST...]]...\n";
out << "PARAMETER_LIST is of the form:\n";
out << " [KEY=VALUE]...\n";
out << "Example:\n";
out << " laminarc start \\\n";
out << " nightly-build branch=master type=release \\\n";
out << " nightly-build branch=master type=debug\n";
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
if(argc < 2) // TODO: pass this through an enviroment variable set by laminard
return usage(std::cerr), EXIT_BAD_ARGUMENT; const char* address = "unix:\0laminar";
else if(strcmp("-h", argv[1]) == 0 || strcmp("--help", argv[1]) == 0)
return usage(std::cout), EXIT_SUCCESS;
struct: public kj::TaskSet::ErrorHandler { if(argc < 2) {
void taskFailed(kj::Exception&& e) override { fprintf(stderr, "Usage: %s <command> [parameters...]\n", argv[0]);
fprintf(stderr, "%s\n", e.getDescription().cStr()); return EINVAL;
ret = EXIT_OPERATION_FAILED; }
}
int ret = 0;
} errorHandler;
kj::TaskSet ts(errorHandler);
int& ret = errorHandler.ret;
const char* address = getenv("LAMINAR_HOST") ?: getenv("LAMINAR_BIND_RPC") ?: "unix-abstract:laminar"; int ret = 0;
capnp::EzRpcClient client(address); capnp::EzRpcClient client(address);
LaminarCi::Client laminar = client.getMain<LaminarCi>(); LaminarCi::Client laminar = client.getMain<LaminarCi>();
auto& waitScope = client.getWaitScope(); auto& waitScope = client.getWaitScope();
int jobNameIndex = 2; if(strcmp(argv[1], "trigger") == 0) {
bool frontOfQueue = false; if(argc < 3) {
fprintf(stderr, "Usage %s trigger <jobName>\n", argv[0]);
if(strcmp(argv[1], "queue") == 0 || strcmp(argv[1], "start") == 0 || strcmp(argv[1], "run") == 0) { return EINVAL;
if(argc < 3 || (strcmp(argv[2], "--next") == 0 && argc < 4)) {
fprintf(stderr, "Usage %s %s JOB_LIST...\n", argv[0], argv[1]);
return EXIT_BAD_ARGUMENT;
} }
if(strcmp(argv[2], "--next") == 0) { kj::Vector<capnp::RemotePromise<LaminarCi::TriggerResults>> promises;
frontOfQueue = true; int jobNameIndex = 2;
jobNameIndex++; // make a request for each job specified on the commandline
}
}
if(strcmp(argv[1], "queue") == 0) {
do { do {
auto req = laminar.queueRequest(); auto req = laminar.triggerRequest();
req.setJobName(argv[jobNameIndex]); req.setJobName(argv[jobNameIndex]);
req.setFrontOfQueue(frontOfQueue);
int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req); int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req);
ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response<LaminarCi::QueueResults> resp){ promises.add(req.send());
if(resp.getResult() != LaminarCi::MethodResult::SUCCESS) {
fprintf(stderr, "Failed to queue job '%s'\n", argv[jobNameIndex]);
ret = EXIT_OPERATION_FAILED;
} else
printTriggerLink(argv[jobNameIndex], resp.getBuildNum());
}));
jobNameIndex += n + 1; jobNameIndex += n + 1;
} while(jobNameIndex < argc); } while(jobNameIndex < argc);
// pend on the promises
for(auto& p : promises) {
if(p.wait(waitScope).getResult() != LaminarCi::MethodResult::SUCCESS) {
fprintf(stderr, "Failed to queue job '%s'\n", argv[2]);
return ENOENT;
}
}
} else if(strcmp(argv[1], "start") == 0) { } else if(strcmp(argv[1], "start") == 0) {
if(argc < 3) {
fprintf(stderr, "Usage %s start <jobName>\n", argv[0]);
return EINVAL;
}
kj::Vector<capnp::RemotePromise<LaminarCi::StartResults>> promises;
int jobNameIndex = 2;
// make a request for each job specified on the commandline
do { do {
auto req = laminar.startRequest(); auto req = laminar.startRequest();
req.setJobName(argv[jobNameIndex]); req.setJobName(argv[jobNameIndex]);
req.setFrontOfQueue(frontOfQueue);
int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req); int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req);
ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response<LaminarCi::StartResults> resp){ promises.add(req.send());
if(resp.getResult() != LaminarCi::MethodResult::SUCCESS) {
fprintf(stderr, "Failed to start job '%s'\n", argv[2]);
ret = EXIT_OPERATION_FAILED;
} else
printTriggerLink(argv[jobNameIndex], resp.getBuildNum());
}));
jobNameIndex += n + 1;
} while(jobNameIndex < argc);
} else if(strcmp(argv[1], "run") == 0) {
do {
auto req = laminar.runRequest();
req.setJobName(argv[jobNameIndex]);
req.setFrontOfQueue(frontOfQueue);
int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req);
ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response<LaminarCi::RunResults> resp){
if(resp.getResult() == LaminarCi::JobResult::UNKNOWN)
fprintf(stderr, "Failed to start job '%s'\n", argv[2]);
else
printTriggerLink(argv[jobNameIndex], resp.getBuildNum());
if(resp.getResult() != LaminarCi::JobResult::SUCCESS)
ret = EXIT_RUN_FAILED;
}));
jobNameIndex += n + 1; jobNameIndex += n + 1;
} while(jobNameIndex < argc); } while(jobNameIndex < argc);
// pend on the promises
for(auto& p : promises) {
if(p.wait(waitScope).getResult() != LaminarCi::JobResult::SUCCESS) {
ret = EFAILED;
}
}
} else if(strcmp(argv[1], "set") == 0) { } else if(strcmp(argv[1], "set") == 0) {
if(argc < 3) { if(argc < 3) {
fprintf(stderr, "Usage %s set param=value\n", argv[0]); fprintf(stderr, "Usage %s set param=value\n", argv[0]);
return EXIT_BAD_ARGUMENT; return EINVAL;
} }
if(char* pipeNum = getenv("__LAMINAR_SETENV_PIPE")) { auto req = laminar.setRequest();
LSYSCALL(write(atoi(pipeNum), argv[2], strlen(argv[2]))); char* eq = strchr(argv[2], '=');
char* job = getenv("lJobName");
char* num = getenv("lBuildNum");
if(job && num && eq) {
char* name = argv[2];
*eq++ = '\0';
char* val = eq;
req.setJobName(job);
req.setBuildNum(atoi(num));
req.getParam().setName(name);
req.getParam().setValue(val);
req.send().wait(waitScope);
} else { } else {
fprintf(stderr, "Must be run from within a laminar job\n"); fprintf(stderr, "Missing lJobName and lBuildNum or param is not in the format key=value\n");
return EXIT_BAD_ARGUMENT; return EINVAL;
}
} else if(strcmp(argv[1], "abort") == 0) {
if(argc != 4) {
fprintf(stderr, "Usage %s abort <jobName> <jobNumber>\n", argv[0]);
return EXIT_BAD_ARGUMENT;
}
auto req = laminar.abortRequest();
req.getRun().setJob(argv[2]);
req.getRun().setBuildNum(atoi(argv[3]));
ts.add(req.send().then([&ret](capnp::Response<LaminarCi::AbortResults> resp){
if(resp.getResult() != LaminarCi::MethodResult::SUCCESS)
ret = EXIT_OPERATION_FAILED;
}));
} else if(strcmp(argv[1], "show-jobs") == 0) {
if(argc != 2) {
fprintf(stderr, "Usage: %s show-jobs\n", argv[0]);
return EXIT_BAD_ARGUMENT;
}
auto jobs = laminar.listKnownRequest().send().wait(waitScope);
for(auto it : jobs.getResult()) {
printf("%s\n", it.cStr());
}
} else if(strcmp(argv[1], "show-queued") == 0) {
if(argc != 2) {
fprintf(stderr, "Usage: %s show-queued\n", argv[0]);
return EXIT_BAD_ARGUMENT;
}
auto queued = laminar.listQueuedRequest().send().wait(waitScope);
for(auto it : queued.getResult()) {
printf("%s:%d\n", it.getJob().cStr(), it.getBuildNum());
}
} else if(strcmp(argv[1], "show-running") == 0) {
if(argc != 2) {
fprintf(stderr, "Usage: %s show-running\n", argv[0]);
return EXIT_BAD_ARGUMENT;
}
auto running = laminar.listRunningRequest().send().wait(waitScope);
for(auto it : running.getResult()) {
printf("%s:%d\n", it.getJob().cStr(), it.getBuildNum());
} }
} else if(strcmp(argv[1], "wait") == 0) {
auto req = laminar.pendRequest();
req.setJobName(argv[2]);
req.setBuildNum(atoi(argv[3]));
auto response = req.send().wait(waitScope);
if(response.getResult() != LaminarCi::JobResult::SUCCESS)
return EFAILED;
} else { } else {
fprintf(stderr, "Unknown command %s\n", argv[1]); fprintf(stderr, "Unknown comand %s\n", argv[1]);
return EXIT_BAD_ARGUMENT; return EINVAL;
} }
ts.onEmpty().wait(waitScope);
return ret; return ret;
} }

View File

@ -25,13 +25,13 @@ int StringMap::convert(std::string e) { return atoi(e.c_str()); }
StringMap parseConfFile(const char* path) { StringMap parseConfFile(const char* path) {
StringMap result; StringMap result;
std::ifstream f(path); std::fstream f(path);
std::string line; std::string line;
while(std::getline(f, line)) { while(std::getline(f, line)) {
if(line[0] == '#') if(line[0] == '#')
continue; continue;
size_t p = line.find('='); int p = line.find('=');
if(p != std::string::npos) { if(p > 0) {
result.emplace(line.substr(0, p), line.substr(p+1)); result.emplace(line.substr(0, p), line.substr(p+1));
} }
} }

View File

@ -16,10 +16,9 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_CONF_H_ #ifndef _LAMINAR_CONF_H_
#define LAMINAR_CONF_H_ #define _LAMINAR_CONF_H_
#include <string>
#include <unordered_map> #include <unordered_map>
class StringMap : public std::unordered_map<std::string, std::string> { class StringMap : public std::unordered_map<std::string, std::string> {
@ -42,4 +41,4 @@ int StringMap::convert(std::string e);
StringMap parseConfFile(const char* path); StringMap parseConfFile(const char* path);
#endif // LAMINAR_CONF_H_ #endif // _LAMINAR_CONF_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2018 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -19,54 +19,17 @@
#include "database.h" #include "database.h"
#include <sqlite3.h> #include <sqlite3.h>
#include <string.h>
#include <math.h>
#include <cstdint>
struct StdevCtx {
double mean;
double M2;
int64_t count;
};
static void stdevStep(sqlite3_context *ctx, int, sqlite3_value **args)
{
StdevCtx* p = static_cast<StdevCtx*>(sqlite3_aggregate_context(ctx, sizeof(StdevCtx)));
// Welford's Online Algorithm
if(sqlite3_value_numeric_type(args[0]) != SQLITE_NULL) {
p->count++;
double val = sqlite3_value_double(args[0]);
double delta = val - p->mean;
p->mean += delta / p->count;
p->M2 += delta * (val - p->mean);
}
}
static void stdevFinalize(sqlite3_context *context){
StdevCtx* p = static_cast<StdevCtx*>(sqlite3_aggregate_context(context, 0));
if(p && p->count > 1)
sqlite3_result_double(context, sqrt(p->M2 / (p->count-1)));
else
sqlite3_result_null(context);
}
Database::Database(const char *path) { Database::Database(const char *path) {
sqlite3_open(path, &hdl); sqlite3_open(path, &hdl);
int create_func_flags = SQLITE_UTF8;
#if SQLITE_VERSION_NUMBER >= 3008003
create_func_flags |= SQLITE_DETERMINISTIC;
#endif
sqlite3_create_function(hdl, "STDEV", 1, create_func_flags, NULL, NULL, stdevStep, stdevFinalize);
} }
Database::~Database() { Database::~Database() {
sqlite3_close(hdl); sqlite3_close(hdl);
} }
Database::Statement::Statement(sqlite3 *db, const char *query) : Database::Statement::Statement(sqlite3 *db, const char *query) {
stmt(nullptr) sqlite3_prepare_v2(db, query, -1, &stmt, NULL);
{
sqlite3_prepare_v2(db, query, -1, &stmt, nullptr);
} }
Database::Statement::~Statement() { Database::Statement::~Statement() {
@ -75,63 +38,36 @@ Database::Statement::~Statement() {
bool Database::Statement::exec() { bool Database::Statement::exec() {
return sqlite3_step(stmt) == SQLITE_DONE; return sqlite3_step(stmt) == SQLITE_OK;
} }
void Database::Statement::bindValue(int i, int e) { void Database::Statement::bindValue(int i, int e) {
sqlite3_bind_int(stmt, i, e); sqlite3_bind_int(stmt, i, e);
} }
void Database::Statement::bindValue(int i, uint e) {
sqlite3_bind_int(stmt, i, static_cast<int32_t>(e));
}
void Database::Statement::bindValue(int i, long e) {
sqlite3_bind_int64(stmt, i, e);
}
void Database::Statement::bindValue(int i, ulong e) {
sqlite3_bind_int64(stmt, i, static_cast<int64_t>(e));
}
void Database::Statement::bindValue(int i, const char* e) { void Database::Statement::bindValue(int i, const char* e) {
sqlite3_bind_text(stmt, i, e, -1, nullptr); sqlite3_bind_text(stmt, i, e, -1, NULL);
} }
void Database::Statement::bindValue(int i, const std::string& e) { void Database::Statement::bindValue(int i, std::string e) {
sqlite3_bind_text(stmt, i, e.data(), static_cast<int>(e.size()), nullptr); sqlite3_bind_text(stmt, i, e.c_str(), e.length(), NULL);
} }
template<> std::string Database::Statement::fetchColumn(int col) { template<> std::string Database::Statement::fetchColumn(int col) {
uint sz = static_cast<uint>(sqlite3_column_bytes(stmt, col)); // according to documentation will never be negative return (char*)sqlite3_column_text(stmt, col);
std::string res(sz, '\0');
memcpy(&res[0], sqlite3_column_text(stmt, col), sz);
return res;
} }
template<> const char* Database::Statement::fetchColumn(int col) { template<> const char* Database::Statement::fetchColumn(int col) {
// while sqlite3_column_text maybe more correctly returns an unsigned const char*, signed const char* is more consistent return (char*)sqlite3_column_text(stmt, col);
return reinterpret_cast<const char*>(sqlite3_column_text(stmt, col));
} }
template<> int Database::Statement::fetchColumn(int col) { template<> int Database::Statement::fetchColumn(int col) {
return sqlite3_column_int(stmt, col); return sqlite3_column_int(stmt, col);
} }
template<> uint Database::Statement::fetchColumn(int col) { template<> time_t Database::Statement::fetchColumn(int col) {
return static_cast<uint>(sqlite3_column_int(stmt, col)); return sqlite3_column_int64(stmt, col);
}
template<> long Database::Statement::fetchColumn(int col) {
return static_cast<long>(sqlite3_column_int64(stmt, col));
}
template<> ulong Database::Statement::fetchColumn(int col) {
return static_cast<ulong>(sqlite3_column_int64(stmt, col));
}
template<> double Database::Statement::fetchColumn(int col) {
return sqlite3_column_double(stmt, col);
} }
bool Database::Statement::row() { bool Database::Statement::row() {

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2018 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,16 +16,12 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_DATABASE_H_ #ifndef _LAMINAR_DATABASE_H_
#define LAMINAR_DATABASE_H_ #define _LAMINAR_DATABASE_H_
#include <string> #include <string>
#include <functional> #include <functional>
// Definition needed for musl
typedef unsigned int uint;
typedef unsigned long ulong;
struct sqlite3; struct sqlite3;
struct sqlite3_stmt; struct sqlite3_stmt;
@ -56,20 +52,12 @@ private:
public: public:
Statement(sqlite3* db, const char* query); Statement(sqlite3* db, const char* query);
Statement(const Statement&) =delete;
Statement(Statement&& other) {
stmt = other.stmt;
other.stmt = nullptr;
}
~Statement(); ~Statement();
// Bind several parameters in a single call. They are bound // Bind several parameters in a single call. They are bound
// by index in the order passed into this function. Must be // by index in the order passed into this function
// passed by reference because arguments may be std::strings,
// which must be passed by reference because sqlite requires
// the bound string's lifetime to exist until sqlite3_step
template<typename...Args> template<typename...Args>
Statement& bind(const Args&...args) { Statement& bind(Args...args) {
return bindRecursive<Args...>(1, args...); return bindRecursive<Args...>(1, args...);
} }
// Fetch columns. Supply a callback that will be executed for // Fetch columns. Supply a callback that will be executed for
@ -107,12 +95,12 @@ private:
} }
}; };
template<typename...Args> template<typename...Args>
friend struct FetchMarshaller; friend class FetchMarshaller;
bool row(); bool row();
template<typename T, typename...Args> template<typename T, typename...Args>
Statement& bindRecursive(int i, const T& v, const Args&...args) { Statement& bindRecursive(int i, T v, Args...args) {
bindValue(i, v); // specialization must exist for T bindValue(i, v); // specialization must exist for T
return bindRecursive(i + 1, args...); return bindRecursive(i + 1, args...);
} }
@ -123,11 +111,8 @@ private:
// Bind value specializations // Bind value specializations
void bindValue(int i, int e); void bindValue(int i, int e);
void bindValue(int i, uint e);
void bindValue(int i, long e);
void bindValue(int i, unsigned long e);
void bindValue(int i, const char* e); void bindValue(int i, const char* e);
void bindValue(int i, const std::string& e); void bindValue(int i, std::string e);
// Declaration for fetch column interface, // Declaration for fetch column interface,
// intentionally missing definition // intentionally missing definition
@ -152,9 +137,6 @@ private:
template<> std::string Database::Statement::fetchColumn(int col); template<> std::string Database::Statement::fetchColumn(int col);
template<> const char* Database::Statement::fetchColumn(int col); template<> const char* Database::Statement::fetchColumn(int col);
template<> int Database::Statement::fetchColumn(int col); template<> int Database::Statement::fetchColumn(int col);
template<> uint Database::Statement::fetchColumn(int col); template<> time_t Database::Statement::fetchColumn(int col);
template<> long Database::Statement::fetchColumn(int col);
template<> ulong Database::Statement::fetchColumn(int col);
template<> double Database::Statement::fetchColumn(int col);
#endif // LAMINAR_DATABASE_H_ #endif // _LAMINAR_DATABASE_H_

View File

@ -1,309 +0,0 @@
///
/// Copyright 2015-2019 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include "http.h"
#include "resources.h"
#include "monitorscope.h"
#include "log.h"
#include "laminar.h"
// Helper class which wraps another class with calls to
// adding and removing a pointer to itself from a passed
// std::set reference. Used to keep track of currently
// connected clients
template<typename T, typename ...Args>
struct WithSetRef : public T {
WithSetRef(std::set<T*>& set, Args&& ...args) :
T(std::forward(args)...),
_set(set)
{
_set.insert(this);
}
~WithSetRef() {
_set.erase(this);
}
private:
std::set<T*>& _set;
};
struct EventPeer {
MonitorScope scope;
std::list<std::string> pendingOutput;
kj::Own<kj::PromiseFulfiller<void>> fulfiller;
};
struct LogWatcher {
std::string job;
uint run;
std::list<std::string> pendingOutput;
kj::Own<kj::PromiseFulfiller<bool>> fulfiller;
};
kj::Maybe<MonitorScope> fromUrl(std::string resource, char* query) {
MonitorScope scope;
if(query) {
char *sk;
for(char* k = strtok_r(query, "&", &sk); k; k = strtok_r(nullptr, "&", &sk)) {
if(char* v = strchr(k, '=')) {
*v++ = '\0';
if(strcmp(k, "page") == 0)
scope.page = atoi(v);
else if(strcmp(k, "field") == 0)
scope.field = v;
else if(strcmp(k, "order") == 0)
scope.order_desc = (strcmp(v, "dsc") == 0);
}
}
}
if(resource == "/") {
scope.type = MonitorScope::HOME;
return kj::mv(scope);
}
if(resource == "/jobs" || resource == "/wallboard") {
scope.type = MonitorScope::ALL;
return kj::mv(scope);
}
if(resource.substr(0, 5) != "/jobs")
return nullptr;
resource = resource.substr(5);
size_t split = resource.find('/',1);
std::string job = resource.substr(1,split-1);
if(job.empty())
return nullptr;
scope.job = job;
scope.type = MonitorScope::JOB;
if(split == std::string::npos)
return kj::mv(scope);
size_t split2 = resource.find('/', split+1);
std::string run = resource.substr(split+1, split2-split);
if(run.empty())
return nullptr;
scope.num = static_cast<uint>(atoi(run.c_str()));
scope.type = MonitorScope::RUN;
return kj::mv(scope);
}
// Parses the url of the form /log/NAME/NUMBER, filling in the passed
// references and returning true if successful. /log/NAME/latest is
// also allowed, in which case the num reference is set to 0
bool Http::parseLogEndpoint(kj::StringPtr url, std::string& name, uint& num) {
if(url.startsWith("/log/")) {
kj::StringPtr path = url.slice(5);
KJ_IF_MAYBE(sep, path.findFirst('/')) {
name = path.slice(0, *sep).begin();
kj::StringPtr tail = path.slice(*sep+1);
num = static_cast<uint>(atoi(tail.begin()));
name.erase(*sep);
if(tail == "latest")
num = laminar.latestRun(name);
if(num > 0)
return true;
}
}
return false;
}
kj::Promise<void> Http::cleanupPeers(kj::Timer& timer)
{
return timer.afterDelay(15 * kj::SECONDS).then([&]{
for(EventPeer* p : eventPeers) {
// Even single threaded, if load causes this timeout to be serviced
// before writeEvents has created a fulfiller, or if an exception
// caused the destruction of the promise but attach(peer) hasn't yet
// removed it from the eventPeers list, we will see a null fulfiller
// here
if(p->fulfiller) {
// an empty SSE message is a colon followed by two newlines
p->pendingOutput.push_back(":\n\n");
p->fulfiller->fulfill();
}
}
return cleanupPeers(timer);
}).eagerlyEvaluate(nullptr);
}
kj::Promise<void> writeEvents(EventPeer* peer, kj::AsyncOutputStream* stream) {
auto paf = kj::newPromiseAndFulfiller<void>();
peer->fulfiller = kj::mv(paf.fulfiller);
return paf.promise.then([=]{
kj::Promise<void> p = kj::READY_NOW;
std::list<std::string> chunks = kj::mv(peer->pendingOutput);
for(std::string& s : chunks) {
p = p.then([=,&s]{
return stream->write(s.data(), s.size());
});
}
return p.attach(kj::mv(chunks)).then([=]{
return writeEvents(peer, stream);
});
});
}
kj::Promise<void> writeLogChunk(LogWatcher* client, kj::AsyncOutputStream* stream) {
auto paf = kj::newPromiseAndFulfiller<bool>();
client->fulfiller = kj::mv(paf.fulfiller);
return paf.promise.then([=](bool done){
kj::Promise<void> p = kj::READY_NOW;
std::list<std::string> chunks = kj::mv(client->pendingOutput);
for(std::string& s : chunks) {
p = p.then([=,&s]{
return stream->write(s.data(), s.size());
});
}
return p.attach(kj::mv(chunks)).then([=]{
return done ? kj::Promise<void>(kj::READY_NOW) : writeLogChunk(client, stream);
});
});
}
kj::Promise<void> Http::request(kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders &headers, kj::AsyncInputStream &requestBody, HttpService::Response &response)
{
const char* start, *end, *content_type;
std::string badge;
// for log requests
std::string name;
uint num;
kj::HttpHeaders responseHeaders(*headerTable);
responseHeaders.clear();
bool is_sse = false;
char* queryString = nullptr;
// Clients usually expect that http servers will ignore unknown query parameters,
// and expect to use this feature to work around browser limitations like there
// being no way to programatically force a resource to be reloaded from the server
// (without "Cache-Control: no-store", which is overkill). See issue #89.
// So first parse any query parameters we *are* interested in, then simply remove
// them from the URL, to make comparisions easier.
KJ_IF_MAYBE(queryIdx, url.findFirst('?')) {
const_cast<char*>(url.begin())[*queryIdx] = '\0';
queryString = const_cast<char*>(url.begin() + *queryIdx + 1);
url = url.begin();
}
KJ_IF_MAYBE(accept, headers.get(ACCEPT)) {
is_sse = (*accept == "text/event-stream");
}
if(is_sse) {
KJ_IF_MAYBE(s, fromUrl(url.cStr(), queryString)) {
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "text/event-stream");
// Disables nginx reverse-proxy's buffering. Necessary for streamed events.
responseHeaders.add("X-Accel-Buffering", "no");
auto peer = kj::heap<WithSetRef<EventPeer>>(eventPeers);
peer->scope = *s;
std::string st = "data: " + laminar.getStatus(peer->scope) + "\n\n";
auto stream = response.send(200, "OK", responseHeaders);
return stream->write(st.data(), st.size()).attach(kj::mv(st)).then([=,s=stream.get(),p=peer.get()]{
return writeEvents(p,s);
}).attach(kj::mv(stream)).attach(kj::mv(peer));
}
} else if(url.startsWith("/archive/")) {
KJ_IF_MAYBE(file, laminar.getArtefact(url.slice(strlen("/archive/")))) {
auto array = (*file)->mmap(0, (*file)->stat().size);
responseHeaders.add("Content-Transfer-Encoding", "binary");
auto stream = response.send(200, "OK", responseHeaders, array.size());
return stream->write(array.begin(), array.size()).attach(kj::mv(array)).attach(kj::mv(file)).attach(kj::mv(stream));
}
} else if(parseLogEndpoint(url, name, num)) {
bool complete;
std::string output;
if(laminar.handleLogRequest(name, num, output, complete)) {
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "text/plain; charset=utf-8");
responseHeaders.add("Content-Transfer-Encoding", "binary");
// Disables nginx reverse-proxy's buffering. Necessary for dynamic log output.
responseHeaders.add("X-Accel-Buffering", "no");
auto stream = response.send(200, "OK", responseHeaders, nullptr);
auto s = stream.get();
auto lw = kj::heap<WithSetRef<LogWatcher>>(logWatchers);
lw->job = name;
lw->run = num;
auto promise = writeLogChunk(lw.get(), stream.get()).attach(kj::mv(stream)).attach(kj::mv(lw));
return s->write(output.data(), output.size()).attach(kj::mv(output)).then([p=kj::mv(promise),complete]() mutable {
if(complete)
return kj::Promise<void>(kj::READY_NOW);
return kj::mv(p);
});
}
} else if(resources->handleRequest(url.cStr(), &start, &end, &content_type)) {
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, content_type);
responseHeaders.add("Content-Encoding", "gzip");
responseHeaders.add("Content-Transfer-Encoding", "binary");
auto stream = response.send(200, "OK", responseHeaders, end-start);
return stream->write(start, end-start).attach(kj::mv(stream));
} else if(url.startsWith("/badge/") && url.endsWith(".svg") && laminar.handleBadgeRequest(std::string(url.begin()+7, url.size()-11), badge)) {
responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "image/svg+xml");
responseHeaders.add("Cache-Control", "no-cache");
auto stream = response.send(200, "OK", responseHeaders, badge.size());
return stream->write(badge.data(), badge.size()).attach(kj::mv(badge)).attach(kj::mv(stream));
}
return response.sendError(404, "Not Found", responseHeaders);
}
Http::Http(Laminar &li) :
laminar(li),
resources(kj::heap<Resources>())
{
kj::HttpHeaderTable::Builder builder;
ACCEPT = builder.add("Accept");
headerTable = builder.build();
}
Http::~Http()
{
LASSERT(logWatchers.size() == 0);
LASSERT(eventPeers.size() == 0);
}
kj::Promise<void> Http::startServer(kj::Timer& timer, kj::Own<kj::ConnectionReceiver>&& listener)
{
kj::Own<kj::HttpServer> server = kj::heap<kj::HttpServer>(timer, *headerTable, *this);
return server->listenHttp(*listener).attach(cleanupPeers(timer)).attach(kj::mv(listener)).attach(kj::mv(server));
}
void Http::notifyEvent(const char *data, std::string job)
{
for(EventPeer* c : eventPeers) {
if(c->scope.wantsStatus(job)) {
c->pendingOutput.push_back("data: " + std::string(data) + "\n\n");
c->fulfiller->fulfill();
}
}
}
void Http::notifyLog(std::string job, uint run, std::string log_chunk, bool eot)
{
for(LogWatcher* lw : logWatchers) {
if(lw->job == job && lw->run == run) {
lw->pendingOutput.push_back(log_chunk);
lw->fulfiller->fulfill(kj::mv(eot));
}
}
}
void Http::setHtmlTemplate(std::string tmpl)
{
resources->setHtmlTemplate(tmpl);
}

View File

@ -1,69 +0,0 @@
///
/// Copyright 2019-2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_HTTP_H_
#define LAMINAR_HTTP_H_
#include <kj/memory.h>
#include <kj/compat/http.h>
#include <string>
#include <set>
// Definition needed for musl
typedef unsigned int uint;
typedef unsigned long ulong;
class Laminar;
class Resources;
struct LogWatcher;
struct EventPeer;
class Http : public kj::HttpService {
public:
Http(Laminar&li);
virtual ~Http();
kj::Promise<void> startServer(kj::Timer &timer, kj::Own<kj::ConnectionReceiver> &&listener);
void notifyEvent(const char* data, std::string job = nullptr);
void notifyLog(std::string job, uint run, std::string log_chunk, bool eot);
// Allows supplying a custom HTML template. Pass an empty string to use the default.
void setHtmlTemplate(std::string tmpl = std::string());
private:
virtual kj::Promise<void> request(kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers,
kj::AsyncInputStream& requestBody, Response& response) override;
bool parseLogEndpoint(kj::StringPtr url, std::string &name, uint &num);
// With SSE, there is no notification if a client disappears. Also, an idle
// client must be kept alive if there is no activity in their MonitorScope.
// Deal with these by sending a periodic keepalive and reaping the client if
// the write fails.
kj::Promise<void> cleanupPeers(kj::Timer &timer);
Laminar& laminar;
std::set<EventPeer*> eventPeers;
kj::Own<kj::HttpHeaderTable> headerTable;
kj::Own<Resources> resources;
std::set<LogWatcher*> logWatchers;
kj::HttpHeaderId ACCEPT;
};
#endif //LAMINAR_HTTP_H_

119
src/interface.h Normal file
View File

@ -0,0 +1,119 @@
///
/// Copyright 2015 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef INTERFACE_H
#define INTERFACE_H
#include "run.h"
#include <kj/async.h>
#include <string>
#include <memory>
#include <unordered_map>
typedef std::unordered_map<std::string, std::string> ParamMap;
// Simple struct to define which information a frontend client is interested
// in, both in initial request phase and real-time updates. It corresponds
// loosely to frontend URLs
struct MonitorScope {
enum Type {
HOME, // home page: recent builds and statistics
ALL, // browse jobs
JOB, // a specific job page
RUN, // a specific run page
LOG // a run's log page
};
MonitorScope(Type type = HOME, std::string job = std::string(), int num = 0) :
type(type),
job(job),
num(num)
{}
// whether this scope wants status information about the given job or run
bool wantsStatus(std::string ajob, int anum = 0) const {
if(type == HOME || type == ALL) return true;
if(type == JOB) return ajob == job;
if(type == RUN) return ajob == job && anum == num;
return false;
}
bool wantsLog(std::string ajob, int anum) const {
return type == LOG && ajob == job && anum == num;
}
Type type;
std::string job;
int num = 0;
};
// Represents a (websocket) client that wants to be notified about events
// matching the supplied scope. Pass instances of this to LaminarInterface
// registerClient and deregisterClient
struct LaminarClient {
virtual void sendMessage(std::string payload) = 0;
virtual void close(bool now = true) = 0;
MonitorScope scope;
};
// The interface connecting the network layer to the application business
// logic. These methods fulfil the requirements of both the HTTP/Websocket
// and RPC interfaces.
struct LaminarInterface {
// Queues a job, returns immediately. Return value will be nullptr if
// the supplied name is not a known job.
virtual std::shared_ptr<Run> queueJob(std::string name, ParamMap params = ParamMap()) = 0;
// Returns a promise that will wait for a run matching the given name
// and build number to complete. The promise will resolve to the result
// of the run. If no such run exists, the status will be RunState::UNKNOWN
virtual kj::Promise<RunState> waitForRun(std::string name, int buildNum) = 0;
// Specialization of above for an existing Run object (for example returned
// from queueJob). Returned promise will never resolve to RunState::UNKNOWN
virtual kj::Promise<RunState> waitForRun(const Run*) = 0;
// Register a client (but don't give up ownership). The client will be
// notified with a JSON message of any events matching its scope
// (see LaminarClient and MonitorScope above)
virtual void registerClient(LaminarClient* client) = 0;
// Call this before destroying a client so that Laminar doesn't try
// to call LaminarClient::sendMessage on invalid data
virtual void deregisterClient(LaminarClient* client) = 0;
// Synchronously send a snapshot of the current status to the given
// client (as governed by the client's MonitorScope). This is called on
// initial websocket connect.
virtual void sendStatus(LaminarClient* client) = 0;
// Implements the laminar client interface allowing the setting of
// arbitrary parameters on a run (usually itself) to be available in
// the environment of subsequent scripts.
virtual bool setParam(std::string job, int buildNum, std::string param, std::string value) = 0;
// Fetches the content of an artifact given its filename relative to
// $LAMINAR_HOME/archive. This shouldn't be used, because the sysadmin
// should have configured a real webserver to serve these things.
virtual bool getArtefact(std::string path, std::string& result) = 0;
};
#endif // INTERFACE_H

View File

@ -2,18 +2,10 @@
interface LaminarCi { interface LaminarCi {
queue @0 (jobName :Text, params :List(JobParam), frontOfQueue :Bool) -> (result :MethodResult, buildNum :UInt32); trigger @0 (jobName :Text, params :List(JobParam)) -> (result :MethodResult);
start @1 (jobName :Text, params :List(JobParam), frontOfQueue :Bool) -> (result :MethodResult, buildNum :UInt32); start @1 (jobName :Text, params :List(JobParam)) -> (result :JobResult);
run @2 (jobName :Text, params :List(JobParam), frontOfQueue :Bool) -> (result :JobResult, buildNum :UInt32); pend @2 (jobName :Text, buildNum :UInt32) -> (result :JobResult);
listQueued @3 () -> (result :List(Run)); set @3 (jobName :Text, buildNum :UInt32, param :JobParam) -> (result :MethodResult);
listRunning @4 () -> (result :List(Run));
listKnown @5 () -> (result :List(Text));
abort @6 (run :Run) -> (result :MethodResult);
struct Run {
job @0 :Text;
buildNum @1 :UInt32;
}
struct JobParam { struct JobParam {
name @0 :Text; name @0 :Text;

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2022 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,122 +16,88 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_LAMINAR_H_ #ifndef _LAMINAR_LAMINAR_H_
#define LAMINAR_LAMINAR_H_ #define _LAMINAR_LAMINAR_H_
#include "interface.h"
#include "run.h" #include "run.h"
#include "monitorscope.h" #include "node.h"
#include "context.h"
#include "database.h" #include "database.h"
#include <unordered_map> #include <unordered_map>
#include <kj/filesystem.h>
#include <kj/async-io.h>
// Context name to context object map // Node name to node object map
typedef std::unordered_map<std::string, std::shared_ptr<Context>> ContextMap; typedef std::unordered_map<std::string,Node> NodeMap;
class Server; struct Server;
class Json; class Json;
class Http;
class Rpc;
struct Settings {
const char* home;
const char* bind_rpc;
const char* bind_http;
const char* archive_url;
};
// The main class implementing the application's business logic. // The main class implementing the application's business logic.
class Laminar final { // It owns a Server to manage the HTTP/websocket and Cap'n Proto RPC
// interfaces and communicates via the LaminarInterface methods and
// the LaminarClient objects (see interface.h)
class Laminar : public LaminarInterface {
public: public:
Laminar(Server& server, Settings settings); Laminar();
~Laminar() noexcept; ~Laminar();
// Queues a job, returns immediately. Return value will be nullptr if // Runs the application forever
// the supplied name is not a known job. void run();
std::shared_ptr<Run> queueJob(std::string name, ParamMap params = ParamMap(), bool frontOfQueue = false); // Call this in a signal handler to make run() return
void stop();
// Return the latest known number of the named job // Implementations of LaminarInterface
uint latestRun(std::string job); std::shared_ptr<Run> queueJob(std::string name, ParamMap params = ParamMap()) override;
kj::Promise<RunState> waitForRun(std::string name, int buildNum) override;
// Given a job name and number, return existence and (via reference params) kj::Promise<RunState> waitForRun(const Run* run) override;
// its current log output and whether the job is ongoing void registerClient(LaminarClient* client) override;
bool handleLogRequest(std::string name, uint num, std::string& output, bool& complete); void deregisterClient(LaminarClient* client) override;
void sendStatus(LaminarClient* client) override;
// Given a relevant scope, returns a JSON string describing the current bool setParam(std::string job, int buildNum, std::string param, std::string value) override;
// server status. Content differs depending on the page viewed by the user, bool getArtefact(std::string path, std::string& result) override;
// which should be provided as part of the scope.
std::string getStatus(MonitorScope scope);
// Implements the laminarc function of setting arbitrary parameters on a run,
// (typically the current run) which will be made available in the environment
// of subsequent scripts.
bool setParam(std::string job, uint buildNum, std::string param, std::string value);
// Gets the list of jobs currently waiting in the execution queue
const std::list<std::shared_ptr<Run>>& listQueuedJobs();
// Gets the list of currently executing jobs
const RunSet& listRunningJobs();
// Gets the list of known jobs - scans cfg/jobs for *.run files
std::list<std::string> listKnownJobs();
// Fetches the content of an artifact given its filename relative to
// $LAMINAR_HOME/archive. Ideally, this would instead be served by a
// proper web server which handles this url.
kj::Maybe<kj::Own<const kj::ReadableFile>> getArtefact(std::string path);
// Given the name of a job, populate the provided string reference with
// SVG content describing the last known state of the job. Returns false
// if the job is unknown.
bool handleBadgeRequest(std::string job, std::string& badge);
// Aborts a single job
bool abort(std::string job, uint buildNum);
// Abort all running jobs
void abortAll();
private: private:
bool loadConfiguration(); bool loadConfiguration();
void loadCustomizations(); void reapAdvance();
void assignNewJobs(); void assignNewJobs();
bool canQueue(const Context& ctx, const Run& run) const; bool stepRun(std::shared_ptr<Run> run);
bool tryStartRun(std::shared_ptr<Run> run, int queueIndex); void runFinished(const Run*);
void handleRunFinished(Run*); bool nodeCanQueue(const Node&, const Run&) const;
// expects that Json has started an array // expects that Json has started an array
void populateArtifacts(Json& out, std::string job, uint num, kj::Path subdir = kj::Path::parse(".")) const; void populateArtifacts(Json& out, std::string job, int num) const;
Run* activeRun(const std::string name, uint num) { Run* activeRun(std::string name, int num) {
auto it = activeJobs.byNameNumber().find(boost::make_tuple(name, num)); auto it = activeJobs.get<1>().find(boost::make_tuple(name, num));
return it == activeJobs.byNameNumber().end() ? nullptr : it->get(); return it == activeJobs.get<1>().end() ? nullptr : it->get();
} }
std::list<std::shared_ptr<Run>> queuedJobs; std::list<std::shared_ptr<Run>> queuedJobs;
// Implements the waitForRun API.
// TODO: refactor
struct Waiter {
Waiter() : paf(kj::newPromiseAndFulfiller<RunState>()) {}
void release(RunState state) {
paf.fulfiller->fulfill(RunState(state));
}
kj::Promise<RunState> takePromise() { return std::move(paf.promise); }
private:
kj::PromiseFulfillerPair<RunState> paf;
};
std::unordered_map<const Run*,std::list<Waiter>> waiters;
std::unordered_map<std::string, uint> buildNums; std::unordered_map<std::string, uint> buildNums;
std::unordered_map<std::string, std::set<std::string>> jobContexts; std::unordered_map<std::string, std::set<std::string>> jobTags;
std::unordered_map<std::string, std::string> jobDescriptions;
std::unordered_map<std::string, std::string> jobGroups;
RunSet activeJobs; RunSet activeJobs;
Database* db; Database* db;
Server& srv; Server* srv;
ContextMap contexts; NodeMap nodes;
kj::Path homePath; std::string homeDir;
kj::Own<const kj::Directory> fsHome; std::set<LaminarClient*> clients;
uint numKeepRunDirs; bool eraseWorkdir;
std::string archiveUrl; std::string archiveUrl;
kj::Own<Http> http;
kj::Own<Rpc> rpc;
}; };
#endif // LAMINAR_LAMINAR_H_ #endif // _LAMINAR_LAMINAR_H_

View File

@ -1,346 +0,0 @@
///
/// Copyright 2019-2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include "log.h"
#include <string>
#include <unistd.h>
#include <queue>
#include <dirent.h>
#if defined(__FreeBSD__)
#include <sys/procctl.h>
#else
#include <sys/prctl.h>
#endif
#include <sys/types.h>
#include <sys/wait.h>
#include <kj/async-io.h>
#include <kj/async-unix.h>
#include <kj/filesystem.h>
#include "run.h"
// short syntax helper for kj::Path
template<typename T>
inline kj::Path operator/(const kj::Path& p, const T& ext) {
return p.append(ext);
}
template<typename T>
inline kj::Path operator/(const kj::PathPtr& p, const T& ext) {
return p.append(ext);
}
struct Script {
kj::Path path;
kj::Path cwd;
bool runOnAbort;
};
static void aggressive_recursive_kill(pid_t parent) {
DIR* proc = opendir("/proc");
if(!proc)
return;
while(struct dirent* de = readdir(proc)) {
if(!isdigit(*de->d_name))
continue;
char status_file[640];
sprintf(status_file, "/proc/%s/status", de->d_name);
FILE* status_fp = fopen(status_file, "rb");
if(!status_fp)
continue;
char status_buffer[512];
int n = fread(status_buffer, 1, 512, status_fp);
if(char* p = (char*)memmem(status_buffer, n, "PPid:\t", 6)) {
pid_t ppid = strtol(p + 6, NULL, 10);
if(ppid == parent) {
pid_t pid = atoi(de->d_name);
aggressive_recursive_kill(pid);
fprintf(stderr, "[laminar] sending SIGKILL to pid %d\n", pid);
kill(pid, SIGKILL);
}
}
fclose(status_fp);
}
closedir(proc);
}
class Leader final : public kj::TaskSet::ErrorHandler {
public:
Leader(kj::AsyncIoContext& ioContext, kj::Filesystem& fs, const char* jobName, uint runNumber);
RunState run();
private:
void taskFailed(kj::Exception&& exception) override;
kj::Promise<void> step(std::queue<Script>& scripts);
kj::Promise<void> reapChildProcesses();
kj::Promise<void> readEnvPipe(kj::AsyncInputStream* stream, char* buffer);
kj::TaskSet tasks;
RunState result;
kj::AsyncIoContext& ioContext;
const kj::Directory& home;
kj::PathPtr rootPath;
std::string jobName;
uint runNumber;
pid_t currentGroupId;
pid_t currentScriptPid;
std::queue<Script> scripts;
int setEnvPipe[2];
bool aborting;
};
Leader::Leader(kj::AsyncIoContext &ioContext, kj::Filesystem &fs, const char *jobName, uint runNumber) :
tasks(*this),
result(RunState::SUCCESS),
ioContext(ioContext),
home(fs.getCurrent()),
rootPath(fs.getCurrentPath()),
jobName(jobName),
runNumber(runNumber),
aborting(false)
{
tasks.add(ioContext.unixEventPort.onSignal(SIGTERM).then([this](siginfo_t) {
while(scripts.size() && (!scripts.front().runOnAbort))
scripts.pop();
// TODO: probably shouldn't do this if we are already in a runOnAbort script
kill(-currentGroupId, SIGTERM);
return this->ioContext.provider->getTimer().afterDelay(2*kj::SECONDS).then([this]{
aborting = true;
aggressive_recursive_kill(getpid());
});
}));
LSYSCALL(pipe(setEnvPipe));
auto event = ioContext.lowLevelProvider->wrapInputFd(setEnvPipe[0], kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP);
auto buffer = kj::heapArrayBuilder<char>(1024);
tasks.add(readEnvPipe(event, buffer.asPtr().begin()).attach(kj::mv(event), kj::mv(buffer)));
}
RunState Leader::run()
{
kj::Path cfgDir{"cfg"};
// create the run directory
kj::Path rd{"run",jobName,std::to_string(runNumber)};
bool createWorkdir = true;
KJ_IF_MAYBE(ls, home.tryLstat(rd)) {
LASSERT(ls->type == kj::FsNode::Type::DIRECTORY);
LLOG(WARNING, "Working directory already exists, removing", rd.toString());
if(home.tryRemove(rd) == false) {
LLOG(WARNING, "Failed to remove working directory");
createWorkdir = false;
}
}
if(createWorkdir && home.tryOpenSubdir(rd, kj::WriteMode::CREATE|kj::WriteMode::CREATE_PARENT) == nullptr) {
LLOG(ERROR, "Could not create working directory", rd.toString());
return RunState::FAILED;
}
// create an archive directory
kj::Path archive = kj::Path{"archive",jobName,std::to_string(runNumber)};
if(home.exists(archive)) {
LLOG(WARNING, "Archive directory already exists", archive.toString());
} else if(home.tryOpenSubdir(archive, kj::WriteMode::CREATE|kj::WriteMode::CREATE_PARENT) == nullptr) {
LLOG(ERROR, "Could not create archive directory", archive.toString());
return RunState::FAILED;
}
// create a workspace for this job if it doesn't exist
kj::Path ws{"run",jobName,"workspace"};
if(!home.exists(ws)) {
home.openSubdir(ws, kj::WriteMode::CREATE|kj::WriteMode::CREATE_PARENT);
// prepend the workspace init script
if(home.exists(cfgDir/"jobs"/(jobName+".init")))
scripts.push({cfgDir/"jobs"/(jobName+".init"), kj::mv(ws), false});
}
// add scripts
// global before-run script
if(home.exists(cfgDir/"before"))
scripts.push({cfgDir/"before", rd.clone(), false});
// job before-run script
if(home.exists(cfgDir/"jobs"/(jobName+".before")))
scripts.push({cfgDir/"jobs"/(jobName+".before"), rd.clone(), false});
// main run script. must exist.
scripts.push({cfgDir/"jobs"/(jobName+".run"), rd.clone(), false});
// job after-run script
if(home.exists(cfgDir/"jobs"/(jobName+".after")))
scripts.push({cfgDir/"jobs"/(jobName+".after"), rd.clone(), true});
// global after-run script
if(home.exists(cfgDir/"after"))
scripts.push({cfgDir/"after", rd.clone(), true});
// Start executing scripts
return step(scripts).then([this](){
return result;
}).wait(ioContext.waitScope);
}
void Leader::taskFailed(kj::Exception &&exception)
{
LLOG(ERROR, exception);
}
kj::Promise<void> Leader::step(std::queue<Script> &scripts)
{
if(scripts.empty())
return kj::READY_NOW;
Script currentScript = kj::mv(scripts.front());
scripts.pop();
pid_t pid = fork();
if(pid == 0) { // child
// unblock all signals
sigset_t mask;
sigfillset(&mask);
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
// create a new process group to help us deal with any wayward forks
setpgid(0, 0);
std::string buildNum = std::to_string(runNumber);
LSYSCALL(chdir(currentScript.cwd.toString(false).cStr()));
setenv("RESULT", to_string(result).c_str(), true);
// pass the pipe through a variable to allow laminarc to send new env back
char pipeNum[4];
sprintf(pipeNum, "%d", setEnvPipe[1]);
setenv("__LAMINAR_SETENV_PIPE", pipeNum, 1);
fprintf(stderr, "[laminar] Executing %s\n", currentScript.path.toString().cStr());
kj::String execPath = (rootPath/currentScript.path).toString(true);
execl(execPath.cStr(), execPath.cStr(), NULL);
fprintf(stderr, "[laminar] Failed to execute %s\n", currentScript.path.toString().cStr());
_exit(1);
}
currentScriptPid = pid;
currentGroupId = pid;
return reapChildProcesses().then([&](){
return step(scripts);
});
}
kj::Promise<void> Leader::reapChildProcesses()
{
return ioContext.unixEventPort.onSignal(SIGCHLD).then([this](siginfo_t) -> kj::Promise<void> {
while(true) {
int status;
errno = 0;
pid_t pid = waitpid(-1, &status, WNOHANG);
if(pid == -1 && errno == ECHILD) {
// all children exited
return kj::READY_NOW;
} else if(pid == 0) {
// child processes are still running
if(currentScriptPid) {
// We could get here if a more deeply nested process was reparented to us
// before the primary script executed. Quietly wait until the process we're
// waiting for is done
return reapChildProcesses();
}
// we were aborted by the primary process already, just wait until all
// SIGKILLs are processed
if(aborting) {
return reapChildProcesses();
}
// Otherwise, reparented orphans are on borrowed time
// TODO list wayward processes?
fprintf(stderr, "[laminar] sending SIGHUP to adopted child processes\n");
kill(-currentGroupId, SIGHUP);
return ioContext.provider->getTimer().afterDelay(5*kj::SECONDS).then([this]{
// TODO: should we mark the job as failed if we had to kill reparented processes?
aggressive_recursive_kill(getpid());
return reapChildProcesses();
}).exclusiveJoin(reapChildProcesses());
} else if(pid == currentScriptPid) {
// the script we were waiting for is done
// if we already marked as failed, preserve that
if(result == RunState::SUCCESS) {
if(WIFSIGNALED(status) && (WTERMSIG(status) == SIGTERM || WTERMSIG(status) == SIGKILL))
result = RunState::ABORTED;
else if(WEXITSTATUS(status) != 0)
result = RunState::FAILED;
}
currentScriptPid = 0;
} else {
// some reparented process was reaped
}
}
});
}
kj::Promise<void> Leader::readEnvPipe(kj::AsyncInputStream *stream, char *buffer) {
return stream->tryRead(buffer, 1, 1024).then([this,stream,buffer](size_t sz) {
if(sz > 0) {
buffer[sz] = '\0';
if(char* eq = strchr(buffer, '=')) {
*eq++ = '\0';
setenv(buffer, eq, 1);
}
return readEnvPipe(stream, kj::mv(buffer));
}
return kj::Promise<void>(kj::READY_NOW);
});
}
int leader_main(void) {
auto ioContext = kj::setupAsyncIo();
auto fs = kj::newDiskFilesystem();
kj::UnixEventPort::captureSignal(SIGTERM);
// Don't use captureChildExit or onChildExit because they don't provide a way to
// reap orphaned child processes. Stick with the more fundamental onSignal.
kj::UnixEventPort::captureSignal(SIGCHLD);
// Becoming a subreaper means any descendent process whose parent process disappears
// will be reparented to this one instead of init (or higher layer subreaper).
// We do this so that the run will wait until all descedents exit before executing
// the next step.
#if defined(__FreeBSD__)
procctl(P_PID, 0, PROC_REAP_ACQUIRE, NULL);
#else
prctl(PR_SET_CHILD_SUBREAPER, 1, NULL, NULL, NULL);
#endif
// Become the leader of a new process group. This is so that all child processes
// will also get a kill signal when the run is aborted
setpgid(0, 0);
// Environment inherited from main laminard process
const char* jobName = getenv("JOB");
std::string name(jobName);
uint runNumber = atoi(getenv("RUN"));
if(!jobName || !runNumber)
return EXIT_FAILURE;
Leader leader(ioContext, *fs, jobName, runNumber);
// Parent process will cast back to RunState
return int(leader.run());
}

View File

@ -1,36 +0,0 @@
///
/// Copyright 2019 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_LEADER_H_
#define LAMINAR_LEADER_H_
// Main function for the leader process which is responsible for
// executing all the scripts which make up a Run. Separating this
// into its own process allows for a cleaner process tree view,
// where it's obvious which script belongs to which run of which
// job, and allows this leader process to act as a subreaper for
// any wayward child processes.
// This could have been implemented as a separate process, but
// instead we just fork & exec /proc/self/exe from the main laminar
// daemon, and distinguish based on argv[0]. This saves installing
// another binary and avoids some associated pitfalls.
int leader_main(void);
#endif // LAMINAR_LEADER_H_

View File

@ -1,73 +0,0 @@
///
/// Copyright 2015-2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_LOG_H_
#define LAMINAR_LOG_H_
#include <kj/debug.h>
#include <utility>
// Prevent full file paths from appearing in log messages. With compiler
// optimization, this compile-time method should completely prevent the
// paths from being encoded into the binary at all. Assumes / is the
// path separator.
namespace _ {
constexpr const char* static_basename_impl(const char* b, const char* t) {
return *t == '\0' ? b : static_basename_impl(*t == '/' ? t+1 : b, t+1);
}
constexpr const char* static_basename(const char* p) {
return static_basename_impl(p, p);
}
constexpr int static_strlen(const char* s) {
return *s == '\0' ? 0 : static_strlen(s + 1) + 1;
}
template<int N, int...I>
static constexpr decltype(auto) static_alloc_str_impl(const char* str, std::integer_sequence<int, I...>) {
typedef struct {char buf[N+1];} static_null_terminated;
return (static_null_terminated) {str[I]..., '\0'};
}
template<int N>
static constexpr decltype(auto) static_alloc_str(const char* str) {
return static_alloc_str_impl<N>(str, std::make_integer_sequence<int, N>());
}
}
#define __FILE_BASE__ (::_::static_alloc_str<::_::static_strlen(::_::static_basename(__FILE__))>\
(::_::static_basename(__FILE__)).buf)
// Provide alternative implementations to those from kj/debug.h which
// use __FILE__ directly and thus cause the full path to be encoded in
// the final binary
#define LLOG(severity, ...) \
if (!::kj::_::Debug::shouldLog(::kj::_::Debug::Severity::severity)) {} else \
::kj::_::Debug::log(__FILE_BASE__, __LINE__, \
::kj::_::Debug::Severity::severity, #__VA_ARGS__, __VA_ARGS__)
#define LASSERT(cond, ...) \
if (KJ_LIKELY(cond)) {} else \
for (::kj::_::Debug::Fault f(__FILE_BASE__, __LINE__, \
::kj::Exception::Type::FAILED, #cond, #__VA_ARGS__, ##__VA_ARGS__);; f.fatal())
#define LSYSCALL(call, ...) \
if (auto _kjSyscallResult = ::kj::_::Debug::syscall([&](){return (call);}, false)) {} else \
for (::kj::_::Debug::Fault f(__FILE_BASE__, __LINE__, \
_kjSyscallResult.getErrorNumber(), #call, #__VA_ARGS__, ##__VA_ARGS__);; f.fatal())
const char* laminar_version();
#endif // LAMINAR_LOG_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2020 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -17,98 +17,33 @@
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#include "laminar.h" #include "laminar.h"
#include "leader.h"
#include "server.h"
#include "log.h"
#include <fcntl.h>
#include <iostream>
#include <kj/async-unix.h>
#include <kj/filesystem.h>
#include <signal.h> #include <signal.h>
#include <unistd.h> #include <kj/debug.h>
#include <sys/types.h>
#include <sys/stat.h>
static Laminar* laminar; std::function<void()> sigHandler;
static Server* server; static void __sigHandler(int) { sigHandler(); }
static void laminar_quit(int) {
// Abort current jobs. Most of the time this isn't necessary since
// systemd stop or other kill mechanism will send SIGTERM to the whole
// process group.
laminar->abortAll();
server->stop();
}
namespace {
constexpr const char* INTADDR_RPC_DEFAULT = "unix-abstract:laminar";
constexpr const char* INTADDR_HTTP_DEFAULT = "*:8080";
constexpr const char* ARCHIVE_URL_DEFAULT = "/archive/";
}
static void usage(std::ostream& out) {
out << "laminard version " << laminar_version() << "\n";
out << "Usage:\n";
out << " -h|--help show this help message\n";
out << " -v enable verbose output\n";
}
static void on_sighup(int)
{
constexpr const char msg[] = "Laminar received and ignored SIGHUP\n";
// write(2) is safe to call inside signal handler.
write(STDERR_FILENO, msg, sizeof(msg) - 1);
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
if(argv[0][0] == '{')
return leader_main();
for(int i = 1; i < argc; ++i) { for(int i = 1; i < argc; ++i) {
if(strcmp(argv[i], "-v") == 0) { if(strcmp(argv[i], "-v") == 0) {
kj::_::Debug::setLogLevel(kj::_::Debug::Severity::INFO); kj::_::Debug::setLogLevel(kj::_::Debug::Severity::INFO);
} else if(strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
return usage(std::cout), EXIT_SUCCESS;
} else {
std::cerr << "Unknown argument " << argv[i] << "\n";
return usage(std::cerr), EXIT_FAILURE;
} }
} }
// The parent process hopefully connected stdin to /dev/null, but do {
// do it again here just in case. This is important because stdin Laminar laminar;
// is inherited to job runs via the leader process, and some sigHandler = [&](){
// processes misbehave if they can successfully block on reading KJ_LOG(INFO, "Received SIGINT");
// from stdin. laminar.stop();
close(STDIN_FILENO); };
LASSERT(open("/dev/null", O_RDONLY) == STDIN_FILENO); signal(SIGINT, &__sigHandler);
signal(SIGTERM, &__sigHandler);
auto ioContext = kj::setupAsyncIo(); laminar.run();
} while(false);
Settings settings; KJ_DBG("end of main");
// Default values when none were supplied in $LAMINAR_CONF_FILE (/etc/laminar.conf)
settings.home = getenv("LAMINAR_HOME") ?: "/var/lib/laminar";
settings.bind_rpc = getenv("LAMINAR_BIND_RPC") ?: INTADDR_RPC_DEFAULT;
settings.bind_http = getenv("LAMINAR_BIND_HTTP") ?: INTADDR_HTTP_DEFAULT;
settings.archive_url = getenv("LAMINAR_ARCHIVE_URL") ?: ARCHIVE_URL_DEFAULT;
server = new Server(ioContext);
laminar = new Laminar(*server, settings);
kj::UnixEventPort::captureChildExit();
signal(SIGINT, &laminar_quit);
signal(SIGTERM, &laminar_quit);
signal(SIGHUP, &on_sighup);
printf("laminard version %s started\n", laminar_version());
server->start();
delete laminar;
delete server;
LLOG(INFO, "Clean exit");
return 0; return 0;
} }

View File

@ -1,63 +0,0 @@
///
/// Copyright 2015-2019 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_MONITORSCOPE_H_
#define LAMINAR_MONITORSCOPE_H_
#include <string>
// Simple struct to define which information a frontend client is interested
// in, both in initial request phase and real-time updates. It corresponds
// loosely to frontend URLs
struct MonitorScope {
enum Type {
HOME, // home page: recent builds and statistics
ALL, // browse jobs
JOB, // a specific job page
RUN, // a specific run page
};
MonitorScope(Type type = HOME, std::string job = std::string(), uint num = 0) :
type(type),
job(job),
num(num),
page(0),
field("number"),
order_desc(true)
{}
// whether this scope wants status information for the specified job
bool wantsStatus(std::string ajob, uint anum = 0) const {
if(type == HOME || type == ALL) return true;
else return ajob == job;
// we could have checked that the run number matches, but actually the
// run page needs to know about a non-matching run number in order to
// know whether to display the "next" arrow.
}
Type type;
std::string job;
uint num ;
// sorting
uint page;
std::string field;
bool order_desc;
};
#endif // LAMINAR_MONITORSCOPE_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2020 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,25 +16,28 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_CONTEXT_H_ #ifndef _LAMINAR_NODE_H_
#define LAMINAR_CONTEXT_H_ #define _LAMINAR_NODE_H_
#include <string> #include <string>
#include <set> #include <set>
class Run; class Run;
// Represents a context within which a Run will be executed. Allows applying // Represents a group of executors. Currently almost unnecessary POD
// a certain environment to a set of Jobs, or setting a limit on the number // abstraction, but may be enhanced in the future to support e.g. tags
// of parallel Runs class Node {
class Context {
public: public:
Context() {} Node() {}
std::string name; std::string name;
int numExecutors; int numExecutors;
int busyExecutors = 0; int busyExecutors = 0;
std::set<std::string> jobPatterns; std::set<std::string> tags;
// Attempts to queue the given run to this node. Returns true if succeeded.
bool queue(const Run& run);
}; };
#endif // LAMINAR_CONTEXT_H_ #endif // _LAMINAR_NODE_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2020 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -17,118 +17,51 @@
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#include "resources.h" #include "resources.h"
#include "log.h"
#include "index_html_size.h"
#include <string.h> #include <string.h>
#include <zlib.h>
#define INIT_RESOURCE(route, name, content_type) \ #define INIT_RESOURCE(route, name) \
extern const char _binary_##name##_z_start[];\ extern const char _binary_##name##_z_start[];\
extern const char _binary_##name##_z_end[]; \ extern const char _binary_##name##_z_end[]; \
resources.emplace(route, Resource{_binary_ ## name ## _z_start, _binary_ ## name ## _z_end, content_type}) resources[route] = std::make_pair(_binary_ ## name ## _z_start, _binary_ ## name ## _z_end)
#define CONTENT_TYPE_HTML "text/html; charset=utf-8"
#define CONTENT_TYPE_ICO "image/x-icon"
#define CONTENT_TYPE_PNG "image/png"
#define CONTENT_TYPE_JS "application/javascript; charset=utf-8"
#define CONTENT_TYPE_CSS "text/css; charset=utf-8"
#define CONTENT_TYPE_MANIFEST "application/manifest+json; charset=utf-8"
#define GZIP_FORMAT 16
Resources::Resources() Resources::Resources()
{ {
INIT_RESOURCE("/favicon.ico", favicon_ico, CONTENT_TYPE_ICO); // TODO: Content-type
INIT_RESOURCE("/favicon-152.png", favicon_152_png, CONTENT_TYPE_PNG); INIT_RESOURCE("/", index_html);
INIT_RESOURCE("/icon.png", icon_png, CONTENT_TYPE_PNG); INIT_RESOURCE("/favicon.ico", favicon_ico);
INIT_RESOURCE("/js/app.js", js_app_js, CONTENT_TYPE_JS); INIT_RESOURCE("/favicon-152.png", favicon_152_png);
INIT_RESOURCE("/js/ansi_up.js", js_ansi_up_js, CONTENT_TYPE_JS); INIT_RESOURCE("/progress.png", progress_png);
INIT_RESOURCE("/js/vue.min.js", js_vue_min_js, CONTENT_TYPE_JS); INIT_RESOURCE("/icon.png", icon_png);
INIT_RESOURCE("/js/ansi_up.js", js_ansi_up_js, CONTENT_TYPE_JS); INIT_RESOURCE("/js/app.js", js_app_js);
INIT_RESOURCE("/js/Chart.min.js", js_Chart_min_js, CONTENT_TYPE_JS); INIT_RESOURCE("/js/Chart.HorizontalBar.js", js_Chart_HorizontalBar_js);
INIT_RESOURCE("/style.css", style_css, CONTENT_TYPE_CSS); INIT_RESOURCE("/js/ansi_up.js", js_ansi_up_js);
INIT_RESOURCE("/manifest.webmanifest", manifest_webmanifest, CONTENT_TYPE_MANIFEST); INIT_RESOURCE("/tpl/home.html", tpl_home_html);
// Configure the default template INIT_RESOURCE("/tpl/job.html", tpl_job_html);
setHtmlTemplate(std::string()); INIT_RESOURCE("/tpl/run.html", tpl_run_html);
} INIT_RESOURCE("/tpl/browse.html", tpl_browse_html);
INIT_RESOURCE("/js/angular.min.js", js_angular_min_js);
void Resources::setHtmlTemplate(std::string tmpl) { INIT_RESOURCE("/js/angular-route.min.js", js_angular_route_min_js);
extern const char _binary_index_html_z_start[]; INIT_RESOURCE("/js/angular-sanitize.min.js", js_angular_sanitize_min_js);
extern const char _binary_index_html_z_end[]; INIT_RESOURCE("/js/ansi_up.js", js_ansi_up_js);
INIT_RESOURCE("/js/Chart.min.js", js_Chart_min_js);
z_stream strm; INIT_RESOURCE("/js/Chart.HorizontalBar.js", js_Chart_HorizontalBar_js);
memset(&strm, 0, sizeof(z_stream)); INIT_RESOURCE("/css/bootstrap.min.css", css_bootstrap_min_css);
if(!tmpl.empty()) {
// deflate
index_html.resize(tmpl.size());
deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WBITS|GZIP_FORMAT, 8, Z_DEFAULT_STRATEGY);
strm.next_in = (unsigned char*) tmpl.data();
strm.avail_in = tmpl.size();
strm.next_out = (unsigned char*) index_html.data();
strm.avail_out = tmpl.size();
if(deflate(&strm, Z_FINISH) != Z_STREAM_END) {
LLOG(FATAL, "Failed to compress index.html");
}
index_html.resize(strm.total_out);
} else {
// use the default template from compile-time asset
if(const char* baseUrl = getenv("LAMINAR_BASE_URL")) {
// The administrator needs to customize the <base href>. Unfortunately this seems
// to be the only thing that needs to be customizable but cannot be done via dynamic
// DOM manipulation without heavy compromises. So replace the static char array with
// a modified buffer accordingly.
std::string tmp;
tmp.resize(INDEX_HTML_UNCOMPRESSED_SIZE);
// inflate
inflateInit2(&strm, MAX_WBITS|GZIP_FORMAT);
strm.next_in = (unsigned char*) _binary_index_html_z_start;
strm.avail_in = _binary_index_html_z_end - _binary_index_html_z_start;
strm.next_out = (unsigned char*) tmp.data();
strm.avail_out = INDEX_HTML_UNCOMPRESSED_SIZE;
if(inflate(&strm, Z_FINISH) != Z_STREAM_END) {
LLOG(FATAL, "Failed to uncompress index_html");
}
// replace
// There's no validation on the replacement string, so you can completely mangle
// the html if you like. This isn't really an issue because if you can modify laminar's
// environment you already have elevated permissions
if(auto it = tmp.find("base href=\"/"))
tmp.replace(it+11, 1, baseUrl);
// deflate
index_html.resize(tmp.size());
deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WBITS|GZIP_FORMAT, 8, Z_DEFAULT_STRATEGY);
strm.next_in = (unsigned char*) tmp.data();
strm.avail_in = tmp.size();
strm.next_out = (unsigned char*) index_html.data();
strm.avail_out = tmp.size();
if(deflate(&strm, Z_FINISH) != Z_STREAM_END) {
LLOG(FATAL, "Failed to compress index.html");
}
index_html.resize(strm.total_out);
} else {
index_html = std::string(_binary_index_html_z_start, _binary_index_html_z_end);
}
}
// update resource map
resources["/"] = Resource{index_html.data(), index_html.data() + index_html.size(), CONTENT_TYPE_HTML};
} }
inline bool beginsWith(std::string haystack, const char* needle) { inline bool beginsWith(std::string haystack, const char* needle) {
return strncmp(haystack.c_str(), needle, strlen(needle)) == 0; return strncmp(haystack.c_str(), needle, strlen(needle)) == 0;
} }
bool Resources::handleRequest(std::string path, const char** start, const char** end, const char** content_type) { bool Resources::handleRequest(std::string path, const char **start, const char **end) {
// need to keep the list of "application links" synchronised with the angular // need to keep the list of "application links" synchronised with the angular
// application. We cannot return a 404 for any of these // application. We cannot return a 404 for any of these
auto it = beginsWith(path,"/jobs") || path == "/wallboard" auto it = beginsWith(path,"/jobs")
? resources.find("/") ? resources.find("/")
: resources.find(path); : resources.find(path);
if(it != resources.end()) { if(it != resources.end()) {
*start = it->second.start; *start = it->second.first;
*end = it->second.end; *end = it->second.second;
*content_type = it->second.content_type;
return true; return true;
} }

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2019 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,8 +16,8 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_RESOURCES_H_ #ifndef _LAMINAR_RESOURCES_H_
#define LAMINAR_RESOURCES_H_ #define _LAMINAR_RESOURCES_H_
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
@ -30,21 +30,12 @@ public:
Resources(); Resources();
// If a resource is known for the given path, set start and end to the // If a resource is known for the given path, set start and end to the
// binary data to send to the client, and content_type to its MIME // binary data to send to the client. Function returns false if no resource
// type. Function returns false if no resource for the given path exists // for the given path is known (404)
bool handleRequest(std::string path, const char** start, const char** end, const char** content_type); bool handleRequest(std::string path, const char** start, const char** end);
// Allows providing a custom HTML template. Pass an empty string to use the default.
void setHtmlTemplate(std::string templ = std::string());
private: private:
struct Resource { std::unordered_map<std::string, std::pair<const char*, const char*>> resources;
const char* start;
const char* end;
const char* content_type;
};
std::unordered_map<std::string, Resource> resources;
std::string index_html;
}; };
#endif // LAMINAR_RESOURCES_H_ #endif // _LAMINAR_RESOURCES_H_

View File

@ -1,224 +1,77 @@
<!doctype html> <!doctype html>
<html lang="en"> <html ng-app="laminar">
<head> <head>
<base href="/"> <base href="/">
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-capable" content="yes" />
<link rel="apple-touch-icon-precomposed" href="favicon-152.png"> <link rel="apple-touch-icon-precomposed" href="/favicon-152.png">
<link rel="icon" href="favicon.ico">
<link rel="manifest" href="/manifest.webmanifest">
<title>Laminar</title> <title>Laminar</title>
<script src="js/vue.min.js"></script> <script src="/js/angular.min.js"></script>
<script src="js/ansi_up.js"></script> <script src="/js/angular-route.min.js"></script>
<script src="js/Chart.min.js"></script> <script src="/js/angular-sanitize.min.js"></script>
<script src="js/app.js" defer></script> <script src="/js/ansi_up.js" type="text/javascript"></script>
<script src="/js/Chart.min.js"></script>
<script src="/js/Chart.HorizontalBar.js"></script>
<link href="/css/bootstrap.min.css" rel="stylesheet">
<script src="/js/app.js"></script>
<style>
body, html { height: 100%; }
.navbar { margin-bottom: 0; }
.navbar-brand { margin: 0 -15px; padding: 7px 15px }
.navbar-brand>img { display: inline; }
a.navbar-btn { color: #9d9d9d; }
a.navbar-btn.active { color: #fff; }
a.navbar-btn:hover { color: #fff; text-decoration: none; }
a.navbar-btn:focus { color: #fff; }
dt,dd { line-height: 2; }
canvas {
width: 100% !important;
max-width: 800px;
height: auto !important;
}
.progress {
height: 10px;
margin-top: 5px;
margin-bottom: 0;
}
.spin {
-webkit-animation: rotation 2s infinite linear;
}
@-webkit-keyframes rotation {
from {-webkit-transform: rotate(0deg);}
to {-webkit-transform: rotate(359deg);}
}
img.spin.small {
width: 11px;
height: 11px;
}
img.spin {
-webkit-animation:spin 4s linear infinite;
-moz-animation:spin 4s linear infinite;
animation:spin 4s linear infinite;
}
@-moz-keyframes spin { 100% { -moz-transform: rotate(360deg); } }
@-webkit-keyframes spin { 100% { -webkit-transform: rotate(360deg); } }
@keyframes spin { 100% { -webkit-transform: rotate(360deg); transform:rotate(360deg); } }
<link href="style.css" rel="stylesheet"> </style>
</head> </head>
<body> <body>
<template id="home"><div id="page-home-main"> <nav class="navbar navbar-inverse">
<nav> <div class="container-fluid">
<table class="table striped"> <div>
<tr v-for="job in jobsQueued"> <a class="navbar-brand" href="/"><img src="/icon.png">{{title}}</a>
<td> <a class="btn navbar-btn pull-right" href="/jobs">Jobs</a>
<span v-html="runIcon(job.result)"></span>
<router-link :to="'jobs/'+job.name">{{job.name}}</router-link>
<router-link :to="'jobs/'+job.name+'/'+job.number">#{{job.number}}</router-link>
<i>queued</i>
</td>
</tr>
<tr v-for="job in jobsRunning">
<td>
<span v-html="runIcon(job.result)"></span>
<router-link :to="'jobs/'+job.name">{{job.name}}</router-link>
<router-link :to="'jobs/'+job.name+'/'+job.number">#{{job.number}}</router-link>
<small style="float:right;">{{formatDuration(job.started, job.completed)}}</small>
<div class="progress" style="margin-top: 5px;">
<div class="progress-bar" :class="{overtime:job.overtime,indeterminate:!job.etc}" :style="job.etc && {width:job.progress+'%'}"></div>
</div>
</td>
</tr>
<tr v-for="job in jobsRecent">
<td>
<span v-html="runIcon(job.result)"></span>
<router-link :to="'jobs/'+job.name">{{job.name}}</router-link>
<router-link :to="'jobs/'+job.name+'/'+job.number">#{{job.number}}</router-link><br>
<small>Took {{formatDuration(job.started, job.completed)}} at {{formatDate(job.started)}}</small>
</td>
</tr>
</table>
</nav>
<section style="border-left: 1px solid #d0d0d0;">
<div id="page-home-stats">
<div>
<h3>Recent regressions</h3>
<table>
<tr v-for="job in resultChanged" v-if="job.lastFailure>job.lastSuccess"><td><router-link :to="'jobs/'+job.name+'/'+job.lastFailure">{{job.name}} #{{job.lastFailure}}</router-link> since <router-link :to="'jobs/'+job.name+'/'+job.lastSuccess">#{{job.lastSuccess}}</router-link></tr>
</table>
</div>
<div>
<h3>Low pass rates</h3>
<table>
<tr v-for="job in lowPassRates"><td><router-link :to="'jobs/'+job.name">{{job.name}}</router-link></td><td>{{Math.round(job.passRate*100)}}&nbsp;%</td></tr>
</table>
</div>
<div>
<h3>Utilization</h3>
<div><canvas id="chartUtil"></canvas></div>
</div>
</div>
<div id="page-home-plots">
<div><canvas id="chartBpd"></canvas></div>
<div><canvas id="chartBpj"></canvas></div>
<div><canvas id="chartTpj"></canvas></div>
<div><canvas id="chartBuildTimeChanges"></canvas></div>
</div>
</section>
</div></template>
<template id="jobs"><div>
<nav style="display: grid; grid-auto-flow: column; justify-content: space-between; align-items: end; padding: 10px 15px;">
<div style="display:grid; grid-auto-flow: column; grid-gap: 15px; padding: 5px 0;">
<a v-show="ungrouped.length" :class="{'active':group==null}" href v-on:click.prevent="group = null">Ungrouped Jobs</a>
<a v-for="g in Object.keys(groups)" :class="{'active':g==group}" href v-on:click.prevent="group = g">{{g}}</a>
</div>
<div style="display: grid; grid-auto-flow: column; align-items: center; gap: 15px">
<router-link :to="wallboardLink()" style="display: inherit;" title="Wallboard">
<svg width="18" viewBox="0 0 13 13">
<g fill="#728494">
<rect x="0" y="2" width="6" height="4" />
<rect x="0" y="7" width="6" height="4" />
<rect x="7" y="2" width="6" height="4" />
<rect x="7" y="7" width="6" height="4" />
</g>
</svg>
</router-link>
<input class="form-control" id="jobFilter" v-model="search" placeholder="Filter...">
</div>
</nav>
<table class="striped" id="job-list">
<tr v-for="job in filteredJobs()">
<td><router-link :to="'jobs/'+job.name">{{job.name}}</router-link></td>
<td style="white-space: nowrap;"><span v-html="runIcon(job.result)"></span> <router-link :to="'jobs/'+job.name+'/'+job.number">#{{job.number}}</router-link></td>
<td>{{formatDate(job.started)}}</td>
<td>{{formatDuration(job.started,job.completed)}}</td>
</tr>
</table>
</div></template>
<template id="wallboard"><div class="wallboard">
<router-link :to="'jobs/'+job.name+'/'+job.number" tag="div" v-for="job in wallboardJobs()" :data-result="job.result">
<span style="font-size: 36px; font-weight: bold;">{{job.name}} #{{job.number}}</span><br>
<span style="font-size: 30px;">{{formatDate(job.started)}}</span><br>
<span style="font-size: 26px;">{{job.reason}}</span>
</router-link>
</div></template>
<template id="job"><div id="page-job-main">
<div style="padding: 15px;">
<h2>{{route.params.name}}</h2>
<div v-html="description"></div>
<dl>
<dt>Last Successful Run</dt>
<dd><router-link v-if="lastSuccess" :to="'jobs/'+route.params.name+'/'+lastSuccess.number">#{{lastSuccess.number}}</router-link> {{lastSuccess?' - at '+formatDate(lastSuccess.started):'never'}}</dd>
<dt>Last Failed Run</dt>
<dd><router-link v-if="lastFailed" :to="'jobs/'+route.params.name+'/'+lastFailed.number">#{{lastFailed.number}}</router-link> {{lastFailed?' - at '+formatDate(lastFailed.started):'never'}}</dd>
</dl>
</div>
<div style="display: grid; justify-content: center; padding: 15px;">
<canvas id="chartBt"></canvas>
</div>
<div style="grid-column: 1/-1">
<table class="striped">
<thead><tr>
<th><a class="sort" :class="(sort.field=='result'?sort.order:'')" v-on:click="do_sort('result')">&nbsp;</a></th>
<th>Run <a class="sort" :class="(sort.field=='number'?sort.order:'')" v-on:click="do_sort('number')">&nbsp;</a></th>
<th class="text-center">Started <a class="sort" :class="(sort.field=='started'?sort.order:'')" v-on:click="do_sort('started')">&nbsp;</a></th>
<th class="text-center">Duration <a class="sort" :class="(sort.field=='duration'?sort.order:'')" v-on:click="do_sort('duration')">&nbsp;</a></th>
<th class="text-center vp-sm-hide">Reason <a class="sort" :class="(sort.field=='reason'?sort.order:'')" v-on:click="do_sort('reason')">&nbsp;</a></th>
</tr></thead>
<tr v-for="job in jobsQueued.concat(jobsRunning).concat(jobsRecent)" track-by="$index">
<td style="width:1px"><span v-html="runIcon(job.result)"></span></td>
<td><router-link :to="'jobs/'+route.params.name+'/'+job.number">#{{job.number}}</router-link></td>
<td class="text-center"><span v-if="job.result!='queued'">{{formatDate(job.started)}}</span></td>
<td class="text-center"><span v-if="job.result!='queued'">{{formatDuration(job.started, job.completed)}}</span></td>
<td class="text-center vp-sm-hide">{{job.reason}}</td>
</tr>
</table>
<div style="float: right; margin: 15px; display: inline-grid; grid-auto-flow: column; gap: 10px; align-items: center">
<button v-on:click="page_prev" :disabled="sort.page==0">&laquo;</button>
<span>Page {{sort.page+1}} of {{pages}}</span>
<button class="btn" v-on:click="page_next" :disabled="sort.page==pages-1">&raquo;</button>
</div> </div>
</div> </div>
</div></template> </nav>
<ol class="breadcrumb">
<template id="run"><div style="display: grid; grid-template-rows: auto 1fr"> <li ng-repeat="n in bc.nodes track by $index"><a href="{{n.href}}">{{n.label}}</a></li>
<div style="padding: 15px"> <li class="active">{{bc.current}}</li>
<div style="display: grid; grid-template-columns: auto 25px auto auto 1fr 400px; gap: 5px; align-items: center"> </ol>
<h2 style="white-space: nowrap"><span v-html="runIcon(job.result)"></span> <router-link :to="'jobs/'+route.params.name">{{route.params.name}}</router-link> #{{route.params.number}}</h2> <div ng-view></div>
<span></span>
<router-link :disabled="route.params.number == 1" :to="'jobs/'+route.params.name+'/'+(route.params.number-1)" tag="button">&laquo;</router-link>
<router-link :disabled="route.params.number == latestNum" :to="'jobs/'+route.params.name+'/'+(parseInt(route.params.number)+1)" tag="button">&raquo;</router-link>
<span></span>
<div class="progress" v-show="job.result == 'running'">
<div class="progress-bar" :class="{overtime:job.overtime,indeterminate:!job.etc}" :style="job.etc && {width:job.progress+'%'}"></div>
</div>
</div>
<div id="page-run-detail">
<dl>
<dt>Reason</dt><dd>{{job.reason}}</dd>
<dt v-show="job.upstream.num > 0">Upstream</dt><dd v-show="job.upstream.num > 0"><router-link :to="'jobs/'+job.upstream.name">{{job.upstream.name}}</router-link> <router-link :to="'jobs/'+job.upstream.name+'/'+job.upstream.num">#{{job.upstream.num}}</router-link></li></dd>
<dt>Queued for</dt><dd>{{formatDuration(job.queued, job.started ? job.started : Math.floor(Date.now()/1000))}}</dd>
<dt v-show="job.started">Started</dt><dd v-show="job.started">{{formatDate(job.started)}}</dd>
<dt v-show="runComplete(job)">Completed</dt><dd v-show="job.completed">{{formatDate(job.completed)}}</dd>
<dt v-show="job.started">Duration</dt><dd v-show="job.started">{{formatDuration(job.started, job.completed)}}</dd>
</dl>
<dl v-show="job.artifacts.length">
<dt>Artifacts</dt>
<dd>
<ul style="margin-bottom: 0">
<li v-for="art in job.artifacts"><a :href="art.url" target="_self">{{art.filename}}</a> [{{ art.size | iecFileSize }}]</li>
</ul>
</dd>
</dl>
</div>
</div>
<div class="console-log">
<code></code>
<span v-show="!logComplete" v-html="runIcon('running')" style="display: block;"></span>
</div>
</div></template>
<main id="app" style="display: grid; grid-template-rows: auto 1fr auto; height: 100%;">
<nav id="nav-top" style="display: grid; grid-template-columns: auto auto 1fr auto auto; grid-gap: 15px;">
<router-link to="." style="display: grid; grid-auto-flow: column; align-items: center; margin: 5px; font-size: 20px;">
<img src="icon.png"> {{title}}
</router-link>
<div id="nav-top-links" style="display: grid; grid-auto-flow: column; justify-content: start; gap: 15px; padding: 0 15px; align-items: center; font-size: 16px;">
<router-link to="jobs">Jobs</router-link>
<router-link v-for="(crumb,i) in route.path.slice(1).split('/').slice(1,-1)" :to="route.path.split('/').slice(0,i+3).join('/')">{{crumb}}</router-link>
</div>
<div></div>
<span class="version">{{version}}</span>
<div style="display: grid; align-items: center; padding: 0 15px">
<a v-on:click="toggleNotifications(!notify)" class="nav-icon" :class="{active:notify}" v-show="supportsNotifications" :title="(notify?'Disable':'Enable')+' notifications'">
<svg width="18" viewBox="0 0 12 12">
<g stroke-width="0.5">
<path d="m 6,9 c -1,0 -1,0 -1,1 0,1 2,1 2,0 0,-1 0,-1 -1,-1 z" />
<path d="m 1,10 c 3,-3 1,-9 5,-9 4,0 2,6 5,9 1,1 -3,-1 -5,-1 -2,0 -6,2 -5,1 z" />
</g>
</svg>
</a>
</div>
</nav>
<router-view></router-view>
<div id="connecting-overlay" :class="{shown:!connected}">
<div><span v-html="runIcon('running')"></span> Connecting...</div>
</div>
</main>
</body> </body>
</html> </html>

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
{
"short_name": "Laminar",
"name": "Laminar",
"description": "Lightweight Continuous Integration",
"icons": [
{
"src": "/icon.png",
"type": "image/png",
"sizes": "36x36"
},
{
"src": "/favicon-152.png",
"type": "image/png",
"sizes": "152x152"
}
],
"start_url": "/",
"background_color": "#2F3340",
"display": "standalone",
"scope": "/"
}

BIN
src/resources/progress.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 816 B

View File

@ -1,327 +0,0 @@
/* colour scheme */
:root {
--main-bg: #fff;
--main-fg: #333;
--nav-bg: #2F3340;
--nav-bg-darker: #292b33;
--nav-fg: #d0d0d0;
--nav-fg-light: #fafafa;
--icon-enabled: #d8cb83;
--success: #74af77;
--failure: #883d3d;
--running: #4786ab;
--warning: #de9a34;
--link-fg: #2f4579;
--alt-row-bg: #fafafa;
--border-grey: #d0d0d0;
}
/* basic resets */
html { box-sizing: border-box; }
*, *:before, *:after { box-sizing: inherit; }
body, h1, h2, h3, h4, h5, h6, p, ol, ul {
margin: 0;
padding: 0;
font-weight: normal;
}
ol, ul { list-style: none; }
body, html { height: 100%; }
body {
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
color: var(--main-fg);
}
/* main header bar */
#nav-top { background-color: var(--nav-bg); }
#nav-top-links { background-color: var(--nav-bg-darker); }
#nav-top a { color: var(--nav-fg); }
#nav-top a:hover { color: white; text-decoration: none; }
.version {
align-self: center;
font-size: x-small;
color: rgba(255,255,255,0.3);
}
/* navbar svg icons (enable notifications) */
.nav-icon { display: inherit; }
.nav-icon svg { fill: var(--nav-fg); stroke: #000; }
.nav-icon:hover { cursor: pointer; }
.nav-icon:hover svg { fill: var(--nav-fg-light); }
.nav-icon.active svg { fill: var(--icon-enabled); }
/* anchors */
a { color: var(--link-fg); text-decoration: none; }
a:visited { color: var(--link-fg); }
a:active { color: var(--link-fg); }
a:hover { text-decoration: underline; }
/* charts */
canvas {
width: 100% !important;
max-width: 800px;
height: auto !important;
}
#popup-connecting {
position: fixed;
background: white;
border: 1px solid #ddd;
bottom: 10px;
right: 10px;
padding: 20px;
}
/* status icons */
.status {
display: inline-block;
width: 1em;
vertical-align: middle;
margin-top: -2px; /* pixel-pushing */
}
svg.success path { fill: var(--success); }
svg.failed path { fill: var(--failure); }
svg.running circle { stroke: var(--running); }
svg.queued circle { fill: var(--nav-fg); }
svg.queued path { stroke: white; }
/* sort indicators */
a.sort {
position: relative;
margin-left: 7px;
}
a.sort:before, a.sort:after {
border: 4px solid transparent;
content: "";
position: absolute;
display: block;
height: 0;
width: 0;
right: 0;
top: 50%;
}
a.sort:before {
border-bottom-color: var(--border-grey);
margin-top: -9px;
}
a.sort:after {
border-top-color: var(--border-grey);
margin-top: 1px;
}
a.sort.dsc:after { border-top-color: var(--main-fg); }
a.sort.asc:before { border-bottom-color: var(--main-fg); }
a.sort:hover { text-decoration: none; cursor:pointer; }
a.sort:not(.asc):hover:before { border-bottom-color: var(--main-fg); }
a.sort:not(.dsc):hover:after { border-top-color: var(--main-fg); }
/* job group tabs */
a.active { color: var(--main-fg); }
a.active:hover { text-decoration: none; }
/* run console ansi colors (based on base16-default-dark and base16-bright) */
:root {
--ansi-black: #181818;
--ansi-red: #ab4642;
--ansi-green: #a1b56c;
--ansi-yellow: #f7ca88;
--ansi-blue: #7cafc2;
--ansi-magenta: #ba8baf;
--ansi-cyan: #86c1b9;
--ansi-white: #d8d8d8;
--ansi-brightblack: #000000;
--ansi-brightred: #fb0120;
--ansi-brightgreen: #a1c659;
--ansi-brightyellow: #fda331;
--ansi-brightblue: #6fb3d2;
--ansi-brightmagenta: #d381c3;
--ansi-brightcyan: #76c7b7;
--ansi-brightwhite: #e0e0e0;
}
.ansi-black-fg { color: var(--ansi-black); } .ansi-black-bg { background-color: var(--ansi-black); }
.ansi-red-fg { color: var(--ansi-red); } .ansi-red-bg { background-color: var(--ansi-red); }
.ansi-green-fg { color: var(--ansi-green); } .ansi-green-bg { background-color: var(--ansi-green); }
.ansi-yellow-fg { color: var(--ansi-yellow); } .ansi-yellow-bg { background-color: var(--ansi-yellow); }
.ansi-blue-fg { color: var(--ansi-blue); } .ansi-blue-bg { background-color: var(--ansi-blue); }
.ansi-magenta-fg { color: var(--ansi-magenta); } .ansi-magenta-bg { background-color: var(--ansi-magenta); }
.ansi-cyan-fg { color: var(--ansi-cyan); } .ansi-cyan-bg { background-color: var(--ansi-cyan); }
.ansi-white-fg { color: var(--ansi-white); } .ansi-white-bg { background-color: var(--ansi-white); }
.ansi-bright-black-fg { color: var(--ansi-brightblack); } .ansi-bright-black-bg { background-color: var(--ansi-brightblack); }
.ansi-bright-red-fg { color: var(--ansi-brightred); } .ansi-bright-red-bg { background-color: var(--ansi-brightred); }
.ansi-bright-green-fg { color: var(--ansi-brightgreen); } .ansi-bright-green-bg { background-color: var(--ansi-brightgreen); }
.ansi-bright-yellow-fg { color: var(--ansi-brightyellow); } .ansi-bright-yellow-bg { background-color: var(--ansi-brightyellow); }
.ansi-bright-blue-fg { color: var(--ansi-brightblue); } .ansi-bright-blue-bg { background-color: var(--ansi-brightblue); }
.ansi-bright-magenta-fg { color: var(--ansi-brightmagenta); } .ansi-bright-magenta-bg { background-color: var(--ansi-brightmagenta); }
.ansi-bright-cyan-fg { color: var(--ansi-brightcyan); } .ansi-bright-cyan-bg { background-color: var(--ansi-brightcyan); }
.ansi-bright-white-fg { color: var(--ansi-brightwhite); } .ansi-bright-white-bg { background-color: var(--ansi-brightwhite); }
/* run console */
.console-log { padding: 15px; background-color: var(--ansi-black); }
.console-log code { white-space: pre-wrap; color: var(--ansi-white); }
.console-log a { color: var(--ansi-brightwhite); }
/* text input (job filtering) */
input { padding: 5px 8px; }
/* description list (run detail) */
dl { display: grid; grid-template-columns: auto 1fr; }
dt { text-align: right; font-weight: bold; min-width: 85px; }
dt,dd { line-height: 2; }
/* tables */
table { border-spacing: 0; width: 100%; }
th { text-align: left; border-bottom: 1px solid var(--border-grey); }
td, th { padding: 8px; }
table.striped td { border-top: 1px solid var(--border-grey); }
table.striped tr:nth-child(even) { background-color: var(--alt-row-bg); }
td:first-child, th:first-child { padding-left: 15px; }
td:last-child, th:last-child { padding-right: 15px; }
/* next/prev navigation buttons */
button {
border: 1px solid var(--border-grey);
background-color: var(--alt-row-bg);
padding: 6px;
min-width: 29px;
}
button[disabled] { cursor: not-allowed; color: var(--border-grey); }
button:not([disabled]) { cursor: pointer; color: var(--main-fg); }
/* progress bar */
.progress {
width: 100%;
height: 8px;
border: 1px solid;
border-radius: 4px;
overflow: hidden;
border-color: var(--border-grey);
background-color: var(--alt-row-bg);
}
.progress-bar {
height: 100%;
background-color: var(--running);
background-image: linear-gradient(45deg, transparent 35%, rgba(255,255,255,0.18) 35% 65%, transparent 65%);
background-size: 1rem;
transition: width .6s linear;
}
.progress-bar.overtime { background-color: var(--warning); }
.progress-bar.indeterminate {
animation: animate-stripes 1s linear infinite;
}
@keyframes animate-stripes {
from { background-position: 1rem 0; } to { background-position: 0 0; }
}
/* wallboard */
.wallboard {
display: flex;
flex-wrap: wrap-reverse;
flex-direction: row-reverse;
gap: 20px;
padding: 20px;
position: fixed;
height: 100%;
width: 100%;
overflow: auto;
background-color: #000
}
.wallboard > div {
padding: 30px;
flex-grow: 1;
background-color: var(--failure);
color: var(--nav-fg-light);
}
.wallboard > div:hover {
cursor: pointer;
}
.wallboard > div[data-result="running"] {
animation: wallboard-bg-fade 2s ease infinite;
}
@keyframes wallboard-bg-fade {
from { background-color: #4786ab; }
50% { background-color: #446597; }
to { background-color: #4786ab; }
}
.wallboard > div[data-result="success"] {
background-color: var(--success);
color: var(--main-fg);
}
/* connecting overlay */
#connecting-overlay {
position: fixed;
top: 0; right: 0; bottom: 0; left: 0;
display: grid;
align-content: end; justify-content: end;
color: var(--nav-fg-light);
font-size: 18px;
padding: 30px;
visibility: hidden;
background-color: rgba(0,0,0,0.75);
opacity: 0;
transition: opacity 0.5s ease, visibility 0s 0.5s;
}
#connecting-overlay.shown {
visibility: visible;
opacity: 1;
transition: opacity 0.5s ease 2s;
}
#connecting-overlay > div { opacity: 1; }
/* responsive layout */
#page-home-main {
display: grid;
grid-template-columns: auto 1fr;
}
@media (max-width: 865px) {
#page-home-main {
grid-template-columns: 1fr;
}
.vp-sm-hide { display: none; }
}
#page-home-stats {
display: grid;
grid-template-columns: 1fr 1fr 1fr;
padding: 15px;
gap: 15px;
max-width: 1600px;
margin: auto;
text-align: center;
}
@media (max-width: 650px) {
#page-home-stats {
grid-template-columns: 1fr;
}
}
#page-home-plots {
display: grid;
grid-template-columns: 1fr 1fr;
padding: 5px;
gap: 5px;
max-width: 1600px;
margin: auto;
}
@media (max-width: 1095px) {
#page-home-plots {
grid-template-columns: 1fr;
}
}
#page-job-main {
display: grid;
grid-template: auto 1fr / minmax(550px, 1fr) 1fr;
}
@media (max-width: 965px) {
#page-job-main {
grid-template: auto auto 1fr / 1fr;
}
}
#page-run-detail {
display: grid;
grid-template-columns: minmax(400px, auto) 1fr;
gap: 5px;
}
@media (max-width: 780px) {
#page-run-detail {
grid-template-columns: 1fr;
}
}

View File

@ -0,0 +1,21 @@
<div class="container-fluid">
<div class="row">
<div class="col-xs-12">
<div class="pull-right">
<input class="form-control" id="jobFilter" ng-model="search.name" placeholder="Filter...">
</div>
<ul class="nav nav-tabs">
<li ng-class="{active:activeTag(null)}"><a href ng-click="currentTag = null">All Jobs</a></li>
<li ng-repeat="tag in tags" ng-class="{active:activeTag(tag)}"><a href ng-click="$parent.currentTag = tag">{{tag}}</a></li>
</ul>
<style>table#joblist tr:first-child td { border-top: 0; }</style>
<table class="table table-striped" id="joblist">
<tr class="animate-repeat" ng-repeat="job in jobs | filter:bytag | filter:search">
<td><a href="jobs/{{job.name}}">{{job.name}}</a></td>
<td class="text-center"><span ng-bind-html="runIcon(job.result)"></span> <a href="jobs/{{job.name}}/{{job.number}}">#{{job.number}}</a></td>
<td class="text-center">{{formatDate(job.started)}}</a></td>
</tr>
</table>
</div>
</div>
</div>

View File

@ -0,0 +1,58 @@
<div class="container-fluid">
<div class="row">
<div class="col-sm-5 col-md-4 col-lg-3 dash">
<table class="table table-bordered">
<tr class="animate-repeat" ng-repeat="job in jobsQueued track by $index">
<td><a href="jobs/{{job.name}}">{{job.name}}</a> <i>queued</i></td>
</tr>
<tr class="animate-repeat" ng-repeat="job in jobsRunning track by $index">
<td><img class="spin small" src="/progress.png"> <a href="jobs/{{job.name}}">{{job.name}}</a> <a href="jobs/{{job.name}}/{{job.number}}">#{{job.number}}</a> <div class="progress">
<div class="progress-bar progress-bar-{{job.overtime?'warning':'info'}} progress-bar-striped {{job.etc?'':'active'}}" style="width:{{!job.etc?'100':job.progress}}%"></div>
</div>
</td>
</tr>
<tr class="animate-repeat" ng-repeat="job in jobsRecent track by $index">
<td><span ng-bind-html="runIcon(job.result)"></span> <a href="jobs/{{job.name}}">{{job.name}}</a> <a href="jobs/{{job.name}}/{{job.number}}">#{{job.number}}</a><br><small>Took {{job.duration}}s at {{formatDate(job.started)}}</small></td>
</tr>
</table>
</div>
<div class="col-sm-7 col-md-8 col-lg-9">
<div class="row">
<div class="col-md-6">
<div class="panel panel-default">
<div class="panel-heading">Total builds per day this week</div>
<div class="panel-body">
<canvas id="chartBpd"></canvas>
</div>
</div>
</div>
<div class="col-md-6">
<div class="panel panel-default">
<div class="panel-heading">Builds per job in the last 24 hours</div>
<div class="panel-body" id="chartStatus">
<canvas id="chartBpj"></canvas>
</div>
</div>
</div>
<div class="col-md-6">
<div class="panel panel-default">
<div class="panel-heading">Average build time per job this week</div>
<div class="panel-body">
<canvas id="chartTpj"></canvas>
</div>
</div>
</div>
<div class="col-md-6">
<div class="panel panel-default">
<div class="panel-heading">Current executor utilization</div>
<div class="panel-body">
<canvas id="chartUtil"></canvas>
</div>
</div>
</div>
</div>
</div>
</div>
</div>

View File

@ -0,0 +1,51 @@
<div class="container-fluid">
<div class="row">
<div class="col-sm-5 col-md-6 col-lg-7">
<h3>{{name}}</h3>
<dl class="dl-horizontal">
<dt>Last Successful Run</dt><dd>
<a ng-show="lastSuccess" href="jobs/{{name}}/{{lastSuccess.number}}">#{{lastSuccess.number}}</a>
{{lastSuccess?" - at "+formatDate(lastSuccess.started):"never"}}</dd>
<dt>Last Failed Run</dt><dd>
<a ng-show="lastFailed" href="jobs/{{name}}/{{lastFailed.number}}">#{{lastFailed.number}}</a>
{{lastFailed?" - at "+formatDate(lastFailed.started):"never"}}</dd>
</dl>
</div>
<div class="col-sm-7 col-md-6 col-lg-5">
<div class="panel panel-default">
<div class="panel-heading">Build time</div>
<div class="panel-body">
<canvas id="chartBt"></canvas>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-xs-12">
<table class="table table-striped"><thead>
<tr><th>Run</th><th class="text-center">Started</th><th class="text-center">Duration</th><th class="text-center hidden-xs">Reason</th></tr></thead>
<tr ng-show="nQueued">
<td colspan="4"><i>{{nQueued}} run(s) queued</i></td>
</tr>
<tr class="animate-repeat" ng-repeat="job in jobsRunning track by $index">
<td><img class="spin small" src="/progress.png"> <a href="jobs/{{name}}/{{job.number}}">#{{job.number}}</a></td>
<td class="text-center">{{formatDate(job.started)}}</td>
<td class="text-center">--</td>
<td class="text-center hidden-xs">{{job.reason}}</td>
</tr>
<tr class="animate-repeat" ng-repeat="job in jobsRecent track by $index">
<td><span ng-bind-html="runIcon(job.result)"></span> <a href="jobs/{{name}}/{{job.number}}">#{{job.number}}</a></td>
<td class="text-center">{{formatDate(job.started)}}</td>
<td class="text-center">{{job.duration + " seconds"}}</td>
<td class="text-center hidden-xs">{{job.reason}}</td>
</tr>
</table>
</div>
</div>
</div>

View File

@ -0,0 +1,43 @@
<div class="container-fluid">
<div class="row">
<div class="col-sm-5 col-md-6 col-lg-7">
<h3 style="float:left"><img class="spin" src="/progress.png" ng-hide="job.result"><span ng-bind-html="runIcon(job.result)"></span> {{name}} #{{num}}</h3>
<nav class="pull-left">
<ul class="pagination" style="margin:15px 20px">
<li><a href="jobs/{{name}}/{{num-1}}">&laquo;</a></li>
<li ng-show="latestNum > num"><a ng-href="jobs/{{name}}/{{num+1}}">&raquo;</a></li>
</ul>
</nav>
<div style="clear:both;"></div>
<dl class="dl-horizontal">
<dt>Reason</dt><dd>{{job.reason}}</dd>
<dt>Queued for</dt><dd>{{job.queued}}s</dd>
<dt>Started</dt><dd>{{formatDate(job.started)}}</dd>
<dt ng-show="job.result">Completed</dt><dd ng-show="job.result">{{formatDate(job.completed)}}</dd>
<dt ng-show="job.result">Duration</dt><dd ng-show="job.result">{{job.duration}}s</dd>
</dl>
</div>
<div class="col-sm-7 col-md-6 col-lg-5">
<div class="progress" ng-hide="job.result">
<div class="progress-bar progress-bar-{{job.overtime?'warning':'info'}} progress-bar-striped {{job.etc?'':'active'}}" style="width:{{!job.etc?'100':job.progress}}%;"></div>
</div>
<div class="panel panel-default" ng-show="job.artifacts.length">
<div class="panel-heading">Artifacts</div>
<div class="panel-body">
<ul class="list-unstyled" style="margin-bottom: 0">
<li ng-repeat="art in job.artifacts">
<a href="{{art.url}}" target="_self">{{art.filename}}</a>
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-xs-12">
<button type="button" class="btn btn-default btn-xs pull-right" ng-class="{active:autoscroll}" ng-click="autoscroll = !autoscroll" style="margin-top:10px">Autoscroll</button>
<h4>Console output</h4>
<pre ng-bind-html="log"></pre>
</div>
</div>
</div>

View File

@ -1,182 +0,0 @@
///
/// Copyright 2015-2022 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include "rpc.h"
#include "laminar.capnp.h"
#include "laminar.h"
#include "log.h"
namespace {
// Used for returning run state to RPC clients
LaminarCi::JobResult fromRunState(RunState state) {
switch(state) {
case RunState::SUCCESS: return LaminarCi::JobResult::SUCCESS;
case RunState::FAILED: return LaminarCi::JobResult::FAILED;
case RunState::ABORTED: return LaminarCi::JobResult::ABORTED;
default:
return LaminarCi::JobResult::UNKNOWN;
}
}
}
// This is the implementation of the Laminar Cap'n Proto RPC interface.
// As such, it implements the pure virtual interface generated from
// laminar.capnp with calls to the primary Laminar class
class RpcImpl : public LaminarCi::Server {
public:
RpcImpl(Laminar& l) :
LaminarCi::Server(),
laminar(l)
{
}
virtual ~RpcImpl() {
}
// Queue a job, without waiting for it to start
kj::Promise<void> queue(QueueContext context) override {
std::string jobName = context.getParams().getJobName();
LLOG(INFO, "RPC queue", jobName);
std::shared_ptr<Run> run = laminar.queueJob(jobName, params(context.getParams().getParams()), context.getParams().getFrontOfQueue());
if(Run* r = run.get()) {
context.getResults().setResult(LaminarCi::MethodResult::SUCCESS);
context.getResults().setBuildNum(r->build);
} else {
context.getResults().setResult(LaminarCi::MethodResult::FAILED);
}
return kj::READY_NOW;
}
// Start a job, without waiting for it to finish
kj::Promise<void> start(StartContext context) override {
std::string jobName = context.getParams().getJobName();
LLOG(INFO, "RPC start", jobName);
std::shared_ptr<Run> run = laminar.queueJob(jobName, params(context.getParams().getParams()), context.getParams().getFrontOfQueue());
if(Run* r = run.get()) {
return r->whenStarted().then([context,r]() mutable {
context.getResults().setResult(LaminarCi::MethodResult::SUCCESS);
context.getResults().setBuildNum(r->build);
});
} else {
context.getResults().setResult(LaminarCi::MethodResult::FAILED);
return kj::READY_NOW;
}
}
// Start a job and wait for the result
kj::Promise<void> run(RunContext context) override {
std::string jobName = context.getParams().getJobName();
LLOG(INFO, "RPC run", jobName);
std::shared_ptr<Run> run = laminar.queueJob(jobName, params(context.getParams().getParams()), context.getParams().getFrontOfQueue());
if(run) {
return run->whenFinished().then([context,run](RunState state) mutable {
context.getResults().setResult(fromRunState(state));
context.getResults().setBuildNum(run->build);
});
} else {
context.getResults().setResult(LaminarCi::JobResult::UNKNOWN);
return kj::READY_NOW;
}
}
// List jobs in queue
kj::Promise<void> listQueued(ListQueuedContext context) override {
const std::list<std::shared_ptr<Run>>& queue = laminar.listQueuedJobs();
auto res = context.getResults().initResult(queue.size());
int i = 0;
for(auto it : queue) {
res[i].setJob(it->name);
res[i].setBuildNum(it->build);
i++;
}
return kj::READY_NOW;
}
// List running jobs
kj::Promise<void> listRunning(ListRunningContext context) override {
const RunSet& active = laminar.listRunningJobs();
auto res = context.getResults().initResult(active.size());
int i = 0;
for(auto it : active) {
res[i].setJob(it->name);
res[i].setBuildNum(it->build);
i++;
}
return kj::READY_NOW;
}
// List known jobs
kj::Promise<void> listKnown(ListKnownContext context) override {
std::list<std::string> known = laminar.listKnownJobs();
auto res = context.getResults().initResult(known.size());
int i = 0;
for(auto it : known) {
res.set(i++, it);
}
return kj::READY_NOW;
}
kj::Promise<void> abort(AbortContext context) override {
std::string jobName = context.getParams().getRun().getJob();
uint buildNum = context.getParams().getRun().getBuildNum();
LLOG(INFO, "RPC abort", jobName, buildNum);
LaminarCi::MethodResult result = laminar.abort(jobName, buildNum)
? LaminarCi::MethodResult::SUCCESS
: LaminarCi::MethodResult::FAILED;
context.getResults().setResult(result);
return kj::READY_NOW;
}
private:
// Helper to convert an RPC parameter list to a hash map
ParamMap params(const capnp::List<LaminarCi::JobParam>::Reader& paramReader) {
ParamMap res;
for(auto p : paramReader) {
res[p.getName().cStr()] = p.getValue().cStr();
}
return res;
}
Laminar& laminar;
std::unordered_map<const Run*, std::list<kj::PromiseFulfillerPair<RunState>>> runWaiters;
};
Rpc::Rpc(Laminar& li) :
rpcInterface(kj::heap<RpcImpl>(li))
{}
// Context for an RPC connection
struct RpcConnection {
RpcConnection(kj::Own<kj::AsyncIoStream>&& stream,
capnp::Capability::Client bootstrap,
capnp::ReaderOptions readerOpts) :
stream(kj::mv(stream)),
network(*this->stream, capnp::rpc::twoparty::Side::SERVER, readerOpts),
rpcSystem(capnp::makeRpcServer(network, bootstrap))
{
}
kj::Own<kj::AsyncIoStream> stream;
capnp::TwoPartyVatNetwork network;
capnp::RpcSystem<capnp::rpc::twoparty::VatId> rpcSystem;
};
kj::Promise<void> Rpc::accept(kj::Own<kj::AsyncIoStream>&& connection) {
auto server = kj::heap<RpcConnection>(kj::mv(connection), rpcInterface, capnp::ReaderOptions());
return server->network.onDisconnect().attach(kj::mv(server));
}

View File

@ -1,36 +0,0 @@
///
/// Copyright 2019-2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_RPC_H_
#define LAMINAR_RPC_H_
#include <capnp/ez-rpc.h>
#include <capnp/rpc-twoparty.h>
#include <capnp/rpc.capnp.h>
class Laminar;
class Rpc {
public:
Rpc(Laminar&li);
kj::Promise<void> accept(kj::Own<kj::AsyncIoStream>&& connection);
capnp::Capability::Client rpcInterface;
};
#endif //LAMINAR_RPC_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2020 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -17,194 +17,115 @@
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#include "run.h" #include "run.h"
#include "context.h" #include "node.h"
#include "conf.h" #include "conf.h"
#include "log.h"
#include <sys/wait.h>
#include <iostream> #include <iostream>
#include <kj/debug.h>
#include <unistd.h> #include <unistd.h>
#include <signal.h>
#if defined(__FreeBSD__) #include <boost/filesystem.hpp>
#include <sys/sysctl.h> namespace fs = boost::filesystem;
#include <sys/limits.h>
#endif
// short syntax helper for kj::Path
template<typename T>
inline kj::Path operator/(const kj::Path& p, const T& ext) {
return p.append(ext);
}
std::string to_string(const RunState& rs) { std::string to_string(const RunState& rs) {
switch(rs) { switch(rs) {
case RunState::QUEUED: return "queued"; case RunState::PENDING: return "pending";
case RunState::RUNNING: return "running";
case RunState::ABORTED: return "aborted"; case RunState::ABORTED: return "aborted";
case RunState::FAILED: return "failed"; case RunState::FAILED: return "failed";
case RunState::SUCCESS: return "success"; case RunState::SUCCESS: return "success";
case RunState::UNKNOWN:
default: default:
return "unknown"; return "unknown";
} }
} }
Run::Run(std::string name, uint num, ParamMap pm, kj::Path&& rootPath) : Run::Run() {
result(RunState::SUCCESS), result = RunState::SUCCESS;
name(name), lastResult = RunState::UNKNOWN;
build(num),
params(kj::mv(pm)),
queuedAt(time(nullptr)),
rootPath(kj::mv(rootPath)),
started(kj::newPromiseAndFulfiller<void>()),
startedFork(started.promise.fork()),
finished(kj::newPromiseAndFulfiller<RunState>()),
finishedFork(finished.promise.fork())
{
for(auto it = params.begin(); it != params.end();) {
if(it->first[0] == '=') {
if(it->first == "=parentJob") {
parentName = it->second;
} else if(it->first == "=parentBuild") {
parentBuild = atoi(it->second.c_str());
} else if(it->first == "=reason") {
reasonMsg = it->second;
} else {
LLOG(ERROR, "Unknown internal job parameter", it->first);
}
it = params.erase(it);
} else
++it;
}
} }
Run::~Run() { Run::~Run() {
LLOG(INFO, "Run destroyed"); KJ_DBG("Run destroyed");
}
static void setEnvFromFile(const kj::Path& rootPath, kj::Path file) {
StringMap vars = parseConfFile((rootPath/file).toString(true).cStr());
for(auto& it : vars) {
setenv(it.first.c_str(), it.second.c_str(), true);
}
}
kj::Promise<RunState> Run::start(RunState lastResult, std::shared_ptr<Context> ctx, const kj::Directory &fsHome, std::function<kj::Promise<int>(kj::Maybe<pid_t>&)> getPromise)
{
kj::Path cfgDir{"cfg"};
// add job timeout if specified
if(fsHome.exists(cfgDir/"jobs"/(name+".conf"))) {
timeout = parseConfFile((rootPath/cfgDir/"jobs"/(name+".conf")).toString(true).cStr()).get<int>("TIMEOUT", 0);
}
int plog[2];
LSYSCALL(pipe(plog));
// Fork a process leader to run all the steps of the job. This gives us a nice
// process tree output (job name and number as the process name) and helps
// contain any wayward descendent processes.
pid_t leader;
LSYSCALL(leader = fork());
if(leader == 0) {
// All output from this process will be captured in the plog pipe
close(plog[0]);
dup2(plog[1], STDOUT_FILENO);
dup2(plog[1], STDERR_FILENO);
close(plog[1]);
// All initial/fixed env vars can be set here. Dynamic ones, including
// "RESULT" and any set by `laminarc set` have to be handled in the subprocess.
// add environment files
if(fsHome.exists(cfgDir/"env"))
setEnvFromFile(rootPath, cfgDir/"env");
if(fsHome.exists(cfgDir/"contexts"/(ctx->name+".env")))
setEnvFromFile(rootPath, cfgDir/"contexts"/(ctx->name+".env"));
if(fsHome.exists(cfgDir/"jobs"/(name+".env")))
setEnvFromFile(rootPath, cfgDir/"jobs"/(name+".env"));
// parameterized vars
for(auto& pair : params) {
setenv(pair.first.c_str(), pair.second.c_str(), false);
}
std::string PATH = (rootPath/"cfg"/"scripts").toString(true).cStr();
if(const char* p = getenv("PATH")) {
PATH.append(":");
PATH.append(p);
}
std::string runNumStr = std::to_string(build);
setenv("PATH", PATH.c_str(), true);
setenv("RUN", runNumStr.c_str(), true);
setenv("JOB", name.c_str(), true);
setenv("CONTEXT", ctx->name.c_str(), true);
setenv("LAST_RESULT", to_string(lastResult).c_str(), true);
setenv("WORKSPACE", (rootPath/"run"/name/"workspace").toString(true).cStr(), true);
setenv("ARCHIVE", (rootPath/"archive"/name/runNumStr).toString(true).cStr(), true);
// RESULT set in leader process
// leader process assumes $LAMINAR_HOME as CWD
LSYSCALL(chdir(rootPath.toString(true).cStr()));
setenv("PWD", rootPath.toString(true).cStr(), 1);
// We could just fork/wait over all the steps here directly, but then we
// can't set a nice name for the process tree. There is pthread_setname_np,
// but it's limited to 16 characters, which most of the time probably isn't
// enough. Instead, we'll just exec ourselves and handle that in laminard's
// main() by calling leader_main()
char* procName;
if(asprintf(&procName, "{laminar} %s:%d", name.data(), build) > 0)
#if defined(__FreeBSD__)
{
int sysctl_rq[] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
size_t self_exe_len = PATH_MAX;
char self_exe[PATH_MAX];
if (sysctl(sysctl_rq, 4, self_exe, &self_exe_len, NULL, 0))
_exit(EXIT_FAILURE);
execl(self_exe, procName, NULL); // does not return
}
#else
execl("/proc/self/exe", procName, NULL); // does not return
#endif
_exit(EXIT_FAILURE);
}
// All good, we've "started"
startedAt = time(nullptr);
context = ctx;
output_fd = plog[0];
close(plog[1]);
pid = leader;
// notifies the rpc client if the start command was used
started.fulfiller->fulfill();
return getPromise(pid).then([this](int status){
// The leader process passes a RunState through the return value.
// Check it didn't die abnormally, then cast to get it back.
result = WIFEXITED(status) ? RunState(WEXITSTATUS(status)) : RunState::ABORTED;
finished.fulfiller->fulfill(RunState(result));
return result;
});
} }
std::string Run::reason() const { std::string Run::reason() const {
if(!parentName.empty()) {
return std::string("Triggered by upstream ") + parentName + " #" + std::to_string(parentBuild);
}
return reasonMsg; return reasonMsg;
} }
bool Run::abort() { bool Run::step() {
// if the Maybe is empty, wait() was already called on this process if(!currentScript.empty() && procStatus != 0)
KJ_IF_MAYBE(p, pid) { result = RunState::FAILED;
kill(-*p, SIGTERM);
if(scripts.size()) {
currentScript = scripts.front();
scripts.pop();
int pfd[2];
pipe(pfd);
pid_t pid = fork();
if(pid == 0) {
close(pfd[0]);
dup2(pfd[1], 1);
dup2(pfd[1], 2);
close(pfd[1]);
std::string buildNum = std::to_string(build);
std::string PATH = (fs::path(laminarHome)/"cfg"/"scripts").string() + ":";
if(const char* p = getenv("PATH")) {
PATH.append(p);
}
chdir(wd.c_str());
for(std::string file : env) {
StringMap vars = parseConfFile(file.c_str());
for(auto& it : vars) {
setenv(it.first.c_str(), it.second.c_str(), true);
}
}
setenv("PATH", PATH.c_str(), true);
setenv("lBuildNum", buildNum.c_str(), true);
setenv("lJobName", name.c_str(), true);
if(!node->name.empty())
setenv("lNode", node->name.c_str(), true);
setenv("lResult", to_string(result).c_str(), true);
setenv("lLastResult", to_string(lastResult).c_str(), true);
setenv("lWorkspace", (fs::path(laminarHome)/"run"/name/"workspace").string().c_str(), true);
setenv("lArchive", (fs::path(laminarHome)/"archive"/name/buildNum.c_str()).string().c_str(), true);
for(auto& pair : params) {
setenv(pair.first.c_str(), pair.second.c_str(), false);
}
printf("[laminar] Executing %s\n", currentScript.c_str());
execl(currentScript.c_str(), currentScript.c_str(), NULL);
KJ_LOG(FATAL, "execl returned", strerror(errno));
_exit(1);
}
KJ_LOG(INFO, "Forked", currentScript, pid);
close(pfd[1]);
fd = pfd[0];
this->pid = pid;
return false;
} else {
return true; return true;
} }
return false; }
void Run::addScript(std::string script) {
scripts.push(script);
}
void Run::addEnv(std::string path) {
env.push_back(path);
}
void Run::reaped(int status) {
procStatus = status;
}
void Run::complete() {
notifyCompletion(this);
} }

100
src/run.h
View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2018 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,8 +16,8 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_RUN_H_ #ifndef _LAMINAR_RUN_H_
#define LAMINAR_RUN_H_ #define _LAMINAR_RUN_H_
#include <string> #include <string>
#include <queue> #include <queue>
@ -25,17 +25,10 @@
#include <functional> #include <functional>
#include <ostream> #include <ostream>
#include <unordered_map> #include <unordered_map>
#include <memory>
#include <kj/async.h>
#include <kj/filesystem.h>
// Definition needed for musl
typedef unsigned int uint;
enum class RunState { enum class RunState {
UNKNOWN, UNKNOWN,
QUEUED, PENDING,
RUNNING,
ABORTED, ABORTED,
FAILED, FAILED,
SUCCESS SUCCESS
@ -43,66 +36,63 @@ enum class RunState {
std::string to_string(const RunState& rs); std::string to_string(const RunState& rs);
class Context; class Node;
typedef std::unordered_map<std::string, std::string> ParamMap; // Represents an execution of a job. Not much more than POD
// Represents an execution of a job.
class Run { class Run {
public: public:
Run(std::string name, uint num, ParamMap params, kj::Path&& rootPath); Run();
~Run(); ~Run();
// copying this class would be asking for trouble... // copying this class would be asking for trouble...
Run(const Run&) = delete; Run(const Run&) = delete;
Run& operator=(const Run&) = delete; Run& operator=(const Run&) = delete;
kj::Promise<RunState> start(RunState lastResult, std::shared_ptr<Context> ctx, const kj::Directory &fsHome, std::function<kj::Promise<int>(kj::Maybe<pid_t>&)> getPromise); // executes the next script (if any), returning true if there is nothing
// more to be done - in this case the caller should call complete()
bool step();
// aborts this run // call this when all scripts are done to get the notifyCompletion callback
bool abort(); void complete();
// adds a script to the queue of scripts to be executed by this run
void addScript(std::string script);
// adds an environment file that will be sourced before this run
void addEnv(std::string path);
// called when a process owned by this run has been reaped. The status
// may be used to set the run's job status
void reaped(int status);
std::string reason() const; std::string reason() const;
kj::Promise<void> whenStarted() { return startedFork.addBranch(); } std::function<void(const Run*)> notifyCompletion;
kj::Promise<RunState> whenFinished() { return finishedFork.addBranch(); } Node* node;
std::shared_ptr<Context> context;
RunState result; RunState result;
RunState lastResult;
std::string laminarHome;
std::string name; std::string name;
std::string wd;
std::string parentName; std::string parentName;
int parentBuild = 0; int parentBuild = 0;
uint build = 0; std::string reasonMsg;
int build = 0;
std::string log; std::string log;
kj::Maybe<pid_t> pid; pid_t pid;
int output_fd; int fd;
int procStatus;
std::unordered_map<std::string, std::string> params; std::unordered_map<std::string, std::string> params;
int timeout = 0;
time_t queuedAt; time_t queuedAt;
time_t startedAt; time_t startedAt;
private: private:
// adds a script to the queue of scripts to be executed by this run std::queue<std::string> scripts;
void addScript(kj::Path scriptPath, kj::Path scriptWorkingDir, bool runOnAbort = false); std::string currentScript;
std::list<std::string> env;
// adds an environment file that will be sourced before this run
void addEnv(kj::Path path);
struct Script {
kj::Path path;
kj::Path cwd;
bool runOnAbort;
};
kj::Path rootPath;
std::string reasonMsg;
kj::PromiseFulfillerPair<void> started;
kj::ForkedPromise<void> startedFork;
kj::PromiseFulfillerPair<RunState> finished;
kj::ForkedPromise<RunState> finishedFork;
}; };
// All this below is a somewhat overengineered method of keeping track of // All this below is a somewhat overengineered method of keeping track of
// currently executing builds (Run objects). This would probably scale // currently executing builds (Run objects). This would probably scale
// very well, but it's completely gratuitous since we are not likely to // very well, but it's completely gratuitous since we are not likely to
@ -126,11 +116,13 @@ struct _run_same {
// A single Run can be fetched by... // A single Run can be fetched by...
struct _run_index : bmi::indexed_by< struct _run_index : bmi::indexed_by<
// their current running pid
bmi::hashed_unique<bmi::member<Run, pid_t, &Run::pid>>,
bmi::hashed_unique<bmi::composite_key< bmi::hashed_unique<bmi::composite_key<
std::shared_ptr<Run>, std::shared_ptr<Run>,
// a combination of their job name and build number // a combination of their job name and build number
bmi::member<Run, std::string, &Run::name>, bmi::member<Run, std::string, &Run::name>,
bmi::member<Run, uint, &Run::build> bmi::member<Run, int, &Run::build>
>>, >>,
// or a pointer to a Run object. // or a pointer to a Run object.
bmi::hashed_unique<_run_same>, bmi::hashed_unique<_run_same>,
@ -145,17 +137,7 @@ struct RunSet: public boost::multi_index_container<
std::shared_ptr<Run>, std::shared_ptr<Run>,
_run_index _run_index
> { > {
typename bmi::nth_index<RunSet, 0>::type& byNameNumber() { return get<0>(); } // TODO: getters for each index
typename bmi::nth_index<RunSet, 0>::type const& byNameNumber() const { return get<0>(); }
typename bmi::nth_index<RunSet, 1>::type& byRunPtr() { return get<1>(); }
typename bmi::nth_index<RunSet, 1>::type const& byRunPtr() const { return get<1>(); }
typename bmi::nth_index<RunSet, 2>::type& byStartedAt() { return get<2>(); }
typename bmi::nth_index<RunSet, 2>::type const& byStartedAt() const { return get<2>(); }
typename bmi::nth_index<RunSet, 3>::type& byJobName() { return get<3>(); }
typename bmi::nth_index<RunSet, 3>::type const& byJobName() const { return get<3>(); }
}; };
#endif // LAMINAR_RUN_H_ #endif // _LAMINAR_RUN_H_

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2021 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -17,161 +17,414 @@
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#include "server.h" #include "server.h"
#include "log.h" #include "interface.h"
#include "rpc.h" #include "laminar.capnp.h"
#include "http.h" #include "resources.h"
#include "laminar.h"
#include <capnp/ez-rpc.h>
#include <capnp/rpc-twoparty.h>
#include <capnp/rpc.capnp.h>
#include <kj/async-io.h> #include <kj/async-io.h>
#include <kj/async-unix.h> #include <kj/debug.h>
#include <kj/threadlocal.h> #include <kj/threadlocal.h>
#include <signal.h> #include <websocketpp/config/core.hpp>
#include <websocketpp/server.hpp>
#include <sys/eventfd.h> #include <sys/eventfd.h>
#include <sys/stat.h>
#include <sys/inotify.h>
// Size of buffer used to read from file descriptors. Should be // Configuration struct for the websocketpp template library.
// a multiple of sizeof(struct signalfd_siginfo) == 128 struct wsconfig : public websocketpp::config::core {
#define PROC_IO_BUFSIZE 4096 // static const websocketpp::log::level elog_level =
// websocketpp::log::elevel::info;
Server::Server(kj::AsyncIoContext& io) : // static const websocketpp::log::level alog_level =
ioContext(io), // websocketpp::log::alevel::access_core |
listeners(kj::heap<kj::TaskSet>(*this)), // websocketpp::log::alevel::message_payload ;
childTasks(*this)
static const websocketpp::log::level elog_level =
websocketpp::log::elevel::none;
static const websocketpp::log::level alog_level =
websocketpp::log::alevel::none;
typedef struct { LaminarClient* lc; } connection_base;
};
typedef websocketpp::server<wsconfig> websocket;
namespace {
// Used for returning run state to RPC clients
LaminarCi::JobResult fromRunState(RunState state) {
switch(state) {
case RunState::SUCCESS: return LaminarCi::JobResult::SUCCESS;
case RunState::FAILED: return LaminarCi::JobResult::FAILED;
default:
KJ_DBG("TODO log state", to_string(state));
return LaminarCi::JobResult::UNKNOWN;
}
}
}
// This is the implementation of the Laminar Cap'n Proto RPC interface.
// As such, it implements the pure virtual interface generated from
// laminar.capnp with calls to the LaminarInterface
class RpcImpl : public LaminarCi::Server {
public:
RpcImpl(LaminarInterface& l) :
LaminarCi::Server(),
laminar(l)
{
}
// Start a job, without waiting for it to finish
kj::Promise<void> trigger(TriggerContext context) override {
std::string jobName = context.getParams().getJobName();
KJ_LOG(INFO, "RPC trigger", jobName);
ParamMap params;
for(auto p : context.getParams().getParams()) {
params[p.getName().cStr()] = p.getValue().cStr();
}
LaminarCi::MethodResult result = laminar.queueJob(jobName, params)
? LaminarCi::MethodResult::SUCCESS
: LaminarCi::MethodResult::FAILED;
context.getResults().setResult(result);
return kj::READY_NOW;
}
// Start a job and wait for the result
kj::Promise<void> start(StartContext context) override {
std::string jobName = context.getParams().getJobName();
KJ_LOG(INFO, "RPC start", jobName);
ParamMap params;
for(auto p : context.getParams().getParams()) {
params[p.getName().cStr()] = p.getValue().cStr();
}
std::shared_ptr<Run> run = laminar.queueJob(jobName, params);
if(run.get()) {
return laminar.waitForRun(run.get()).then([context](RunState state) mutable {
context.getResults().setResult(fromRunState(state));
});
} else {
context.getResults().setResult(LaminarCi::JobResult::UNKNOWN);
return kj::READY_NOW;
}
}
// Wait for an already-running job to complete, returning the result
kj::Promise<void> pend(PendContext context) override {
std::string jobName = context.getParams().getJobName();
int buildNum = context.getParams().getBuildNum();
KJ_LOG(INFO, "RPC pend", jobName, buildNum);
kj::Promise<RunState> promise = laminar.waitForRun(jobName, buildNum);
return promise.then([context](RunState state) mutable {
context.getResults().setResult(fromRunState(state));
});
}
// Set a parameter on a running build
kj::Promise<void> set(SetContext context) override {
std::string jobName = context.getParams().getJobName();
int buildNum = context.getParams().getBuildNum();
KJ_LOG(INFO, "RPC set", jobName, buildNum);
LaminarCi::MethodResult result = laminar.setParam(jobName, buildNum,
context.getParams().getParam().getName(), context.getParams().getParam().getValue())
? LaminarCi::MethodResult::SUCCESS
: LaminarCi::MethodResult::FAILED;
context.getResults().setResult(result);
return kj::READY_NOW;
}
private:
LaminarInterface& laminar;
kj::LowLevelAsyncIoProvider* asyncio;
};
// This is the implementation of the HTTP/Websocket interface. It exposes
// websocket connections as LaminarClients and registers them with the
// LaminarInterface so that status messages will be delivered to the client.
// On opening a websocket connection, it delivers a status snapshot message
// (see LaminarInterface::sendStatus)
class Server::HttpImpl {
public:
HttpImpl(LaminarInterface& l) :
laminar(l)
{
// debug logging
// wss.set_access_channels(websocketpp::log::alevel::all);
// wss.set_error_channels(websocketpp::log::elevel::all);
// TODO: This could be used in the future to trigger actions on the
// server in response to a web client request. Currently not supported.
// wss.set_message_handler([](std::weak_ptr<void> s, websocket::message_ptr msg){
// msg->get_payload();
// });
// Handle plain HTTP requests by delivering the binary resource
wss.set_http_handler([this](websocketpp::connection_hdl hdl){
websocket::connection_ptr c = wss.get_con_from_hdl(hdl);
const char* start, *end;
std::string resource = c->get_resource();
if(resource.compare(0, strlen("/archive/"), "/archive/") == 0) {
std::string file(resource.substr(strlen("/archive/")));
std::string content;
if(laminar.getArtefact(file, content)) {
c->set_status(websocketpp::http::status_code::ok);
c->append_header("Content-Transfer-Encoding", "binary");
c->set_body(content);
} else {
c->set_status(websocketpp::http::status_code::not_found);
}
} else if(resources.handleRequest(resource, &start, &end)) {
c->set_status(websocketpp::http::status_code::ok);
c->append_header("Content-Encoding", "gzip");
c->append_header("Content-Transfer-Encoding", "binary");
std::string response(start,end);
c->set_body(response);
} else {
// 404
c->set_status(websocketpp::http::status_code::not_found);
}
c->lc->close(false);
});
// Handle new websocket connection. Parse the URL to determine
// the client's scope of interest, register the client for update
// messages, and call sendStatus.
wss.set_open_handler([this](websocketpp::connection_hdl hdl){
websocket::connection_ptr c = wss.get_con_from_hdl(hdl);
std::string res = c->get_resource();
if(res.substr(0, 5) == "/jobs") {
if(res.length() == 5) {
c->lc->scope.type = MonitorScope::ALL;
} else {
res = res.substr(5);
int split = res.find('/',1);
std::string job = res.substr(1,split-1);
if(!job.empty()) {
c->lc->scope.job = job;
c->lc->scope.type = MonitorScope::JOB;
}
if(split != std::string::npos) {
int split2 = res.find('/', split+1);
std::string run = res.substr(split+1, split2-split);
if(!run.empty()) {
c->lc->scope.num = atoi(run.c_str());
c->lc->scope.type = MonitorScope::RUN;
}
if(split2 != std::string::npos && res.compare(split2, 4, "/log") == 0) {
c->lc->scope.type = MonitorScope::LOG;
}
}
}
}
laminar.registerClient(c->lc);
laminar.sendStatus(c->lc);
});
wss.set_close_handler([this](websocketpp::connection_hdl hdl){
websocket::connection_ptr c = wss.get_con_from_hdl(hdl);
laminar.deregisterClient(c->lc);
c->lc->close();
});
}
// Return a new connection object linked with the context defined below.
// This is a bit untidy, it would be better to make them a single object,
// but I didn't yet figure it out
websocket::connection_ptr newConnection(LaminarClient* lc) {
websocket::connection_ptr c = wss.get_connection();
c->lc = lc;
return c;
}
private:
Resources resources;
LaminarInterface& laminar;
websocket wss;
};
// Context for an RPC connection
struct RpcConnection {
RpcConnection(kj::Own<kj::AsyncIoStream>&& stream,
capnp::Capability::Client bootstrap,
capnp::ReaderOptions readerOpts) :
stream(kj::mv(stream)),
network(*this->stream, capnp::rpc::twoparty::Side::SERVER, readerOpts),
rpcSystem(capnp::makeRpcServer(network, bootstrap))
{
}
kj::Own<kj::AsyncIoStream> stream;
capnp::TwoPartyVatNetwork network;
capnp::RpcSystem<capnp::rpc::twoparty::VatId> rpcSystem;
};
// Context for a WebsocketConnection (implements LaminarClient)
// This object is a streambuf and reimplements xsputn so that it can follow any
// write the websocketpp library makes to it with a write to the appropriate
// descriptor in the kj-async context.
struct Server::WebsocketConnection : public LaminarClient, public std::streambuf {
WebsocketConnection(kj::Own<kj::AsyncIoStream>&& stream, Server::HttpImpl& http) :
stream(kj::mv(stream)),
out(this),
cn(http.newConnection(this)),
writePaf(kj::newPromiseAndFulfiller<void>()),
closeOnComplete(false)
{
cn->register_ostream(&out);
cn->start();
}
~WebsocketConnection() noexcept(true) {
outputBuffer.clear();
writePaf.fulfiller->fulfill();
}
kj::Promise<void> pend() {
return stream->tryRead(ibuf, 1, sizeof(ibuf)).then([this](size_t sz){
cn->read_all(ibuf, sz);
if(sz == 0 || cn->get_state() == websocketpp::session::state::closed) {
cn->eof();
return kj::Promise<void>(kj::READY_NOW);
}
return pend();
});
}
kj::Promise<void> writeTask() {
return writePaf.promise.then([this]() {
std::string payload;
// clear the outputBuffer for more context, and take a chunk
// to send now
payload.swap(outputBuffer);
writePaf = kj::newPromiseAndFulfiller<void>();
if(payload.empty()) {
stream->shutdownWrite();
return kj::Promise<void>(kj::READY_NOW);
} else {
return stream->write(payload.data(), payload.size()).then([this](){
return closeOnComplete ? stream->shutdownWrite(), kj::Promise<void>(kj::READY_NOW) : writeTask();
}).attach(kj::mv(payload));
}
});
}
void sendMessage(std::string payload) override {
cn->send(payload, websocketpp::frame::opcode::text);
}
void close(bool now) override {
closeOnComplete = true;
if(now) {
outputBuffer.clear();
writePaf.fulfiller->fulfill();
}
}
std::streamsize xsputn(const char* s, std::streamsize sz) override {
outputBuffer.append(std::string(s, sz));
writePaf.fulfiller->fulfill();
return sz;
}
kj::Own<kj::AsyncIoStream> stream;
std::ostream out;
websocket::connection_ptr cn;
std::string outputBuffer;
kj::PromiseFulfillerPair<void> writePaf;
char ibuf[131072];
bool closeOnComplete;
};
Server::Server(LaminarInterface& li, kj::StringPtr rpcBindAddress,
kj::StringPtr httpBindAddress) :
rpcInterface(kj::heap<RpcImpl>(li)),
httpInterface(new HttpImpl(li)),
ioContext(kj::setupAsyncIo()),
tasks(*this)
{ {
// RPC task
tasks.add(ioContext.provider->getNetwork().parseAddress(rpcBindAddress, 0)
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
acceptRpcClient(addr->listen());
}));
// HTTP task
tasks.add(ioContext.provider->getNetwork().parseAddress(httpBindAddress, 0)
.then([this](kj::Own<kj::NetworkAddress>&& addr) {
acceptHttpClient(addr->listen());
}));
} }
Server::~Server() { Server::~Server() {
// RpcImpl is deleted through Capability::Client.
// Deal with the HTTP interface the old-fashioned way
delete httpInterface;
} }
void Server::start() { void Server::start() {
// The eventfd is used to quit the server later since we need to trigger // this eventfd is just to allow us to quit the server at some point
// a reaction from the event loop // in the future by adding this event to the async loop. I couldn't see
efd_quit = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK); // a simpler way...
kj::evalLater([this](){ efd = eventfd(0,0);
kj::Promise<void> quit = kj::evalLater([this](){
static uint64_t _; static uint64_t _;
auto wakeEvent = ioContext.lowLevelProvider->wrapInputFd(efd_quit); auto wakeEvent = ioContext.lowLevelProvider->wrapInputFd(efd);
return wakeEvent->read(&_, sizeof(uint64_t)).attach(std::move(wakeEvent)); return wakeEvent->read(&_, sizeof(uint64_t)).attach(std::move(wakeEvent));
}).wait(ioContext.waitScope); });
// Execution arrives here when the eventfd is triggered (in stop()) quit.wait(ioContext.waitScope);
// Shutdown sequence:
// 1. stop accepting new connections
listeners = nullptr;
// 2. wait for all children to close
childTasks.onEmpty().wait(ioContext.waitScope);
// TODO not sure the comments below are true
// 3. run the loop once more to send any pending output to http clients
ioContext.waitScope.poll();
// 4. return: http connections will be destructed when class is deleted
} }
void Server::stop() { void Server::stop() {
// This method is expected to be called in signal context, so an eventfd eventfd_write(efd, 1);
// is used to get the main loop to react. See run()
eventfd_write(efd_quit, 1);
} }
kj::Promise<void> Server::readDescriptor(int fd, std::function<void(const char*,size_t)> cb) { void Server::addProcess(int fd, std::function<void(char*,size_t)> readCb, std::function<void()> cb) {
auto event = this->ioContext.lowLevelProvider->wrapInputFd(fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP); auto event = this->ioContext.lowLevelProvider->wrapInputFd(fd);
auto buffer = kj::heapArrayBuilder<char>(PROC_IO_BUFSIZE); tasks.add(handleProcessOutput(event,readCb).attach(std::move(event)).then(std::move(cb)));
return handleFdRead(event, buffer.asPtr().begin(), cb).attach(std::move(event)).attach(std::move(buffer));
} }
void Server::addTask(kj::Promise<void>&& task) { void Server::acceptHttpClient(kj::Own<kj::ConnectionReceiver>&& listener) {
childTasks.add(kj::mv(task)); auto ptr = listener.get();
tasks.add(ptr->accept().then(kj::mvCapture(kj::mv(listener),
[this](kj::Own<kj::ConnectionReceiver>&& listener,
kj::Own<kj::AsyncIoStream>&& connection) {
acceptHttpClient(kj::mv(listener));
auto conn = kj::heap<WebsocketConnection>(kj::mv(connection), *httpInterface);
auto promises = kj::heapArrayBuilder<kj::Promise<void>>(2);
promises.add(std::move(conn->pend()));
promises.add(std::move(conn->writeTask()));
return kj::joinPromises(promises.finish()).attach(std::move(conn));
}))
);
} }
kj::Promise<void> Server::addTimeout(int seconds, std::function<void ()> cb) { void Server::acceptRpcClient(kj::Own<kj::ConnectionReceiver>&& listener) {
return ioContext.lowLevelProvider->getTimer().afterDelay(seconds * kj::SECONDS).then([cb](){ auto ptr = listener.get();
cb(); tasks.add(ptr->accept().then(kj::mvCapture(kj::mv(listener),
}).eagerlyEvaluate(nullptr); [this](kj::Own<kj::ConnectionReceiver>&& listener,
kj::Own<kj::AsyncIoStream>&& connection) {
acceptRpcClient(kj::mv(listener));
auto server = kj::heap<RpcConnection>(kj::mv(connection), rpcInterface, capnp::ReaderOptions());
tasks.add(server->network.onDisconnect().attach(kj::mv(server)));
}))
);
} }
kj::Promise<int> Server::onChildExit(kj::Maybe<pid_t> &pid) { // handles stdout/stderr from a child process by sending it to the provided
return ioContext.unixEventPort.onChildExit(pid); // callback function
} kj::Promise<void> Server::handleProcessOutput(kj::AsyncInputStream* stream, std::function<void(char*,size_t)> readCb) {
static char* buffer = new char[131072];
Server::PathWatcher& Server::watchPaths(std::function<void()> fn) return stream->tryRead(buffer, 1, sizeof(buffer)).then([this,stream,readCb](size_t sz) {
{ readCb(buffer, sz);
struct PathWatcherImpl final : public PathWatcher {
PathWatcher& addPath(const char* path) override {
inotify_add_watch(fd, path, IN_ONLYDIR | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE);
return *this;
}
int fd;
};
auto pwi = kj::heap<PathWatcherImpl>();
PathWatcher* pw = pwi.get();
pwi->fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
listeners->add(readDescriptor(pwi->fd, [fn](const char*, size_t){
fn();
}).attach(kj::mv(pwi)));
return *pw;
}
void Server::listenRpc(Rpc &rpc, kj::StringPtr rpcBindAddress)
{
if(rpcBindAddress.startsWith("unix:"))
unlink(rpcBindAddress.slice(strlen("unix:")).cStr());
listeners->add(ioContext.provider->getNetwork().parseAddress(rpcBindAddress)
.then([this,&rpc,rpcBindAddress](kj::Own<kj::NetworkAddress>&& addr) {
kj::Own<kj::ConnectionReceiver> listener = addr->listen();
if(rpcBindAddress.startsWith("unix:"))
chmod(rpcBindAddress.slice(strlen("unix:")).cStr(), 0660);
return acceptRpcClient(rpc, kj::mv(listener));
}));
}
void Server::listenHttp(Http &http, kj::StringPtr httpBindAddress)
{
if(httpBindAddress.startsWith("unix:"))
unlink(httpBindAddress.slice(strlen("unix:")).cStr());
listeners->add(ioContext.provider->getNetwork().parseAddress(httpBindAddress)
.then([this,&http,httpBindAddress](kj::Own<kj::NetworkAddress>&& addr) {
kj::Own<kj::ConnectionReceiver> listener = addr->listen();
if(httpBindAddress.startsWith("unix:"))
chmod(httpBindAddress.slice(strlen("unix:")).cStr(), 0660);
return http.startServer(ioContext.lowLevelProvider->getTimer(), kj::mv(listener));
}).catch_([this,&http,httpBindAddress](kj::Exception&&e) mutable -> kj::Promise<void> {
if(e.getType() == kj::Exception::Type::DISCONNECTED) {
LLOG(ERROR, "HTTP disconnect, restarting server", e.getDescription());
listenHttp(http, httpBindAddress);
return kj::READY_NOW;
}
// otherwise propagate the exception
return kj::mv(e);
}));
}
kj::Promise<void> Server::acceptRpcClient(Rpc& rpc, kj::Own<kj::ConnectionReceiver>&& listener) {
kj::ConnectionReceiver& cr = *listener.get();
return cr.accept().then(kj::mvCapture(kj::mv(listener),
[this, &rpc](kj::Own<kj::ConnectionReceiver>&& listener, kj::Own<kj::AsyncIoStream>&& connection) {
addTask(rpc.accept(kj::mv(connection)));
return acceptRpcClient(rpc, kj::mv(listener));
}));
}
// returns a promise which will read a chunk of data from the file descriptor
// wrapped by stream and invoke the provided callback with the read data.
// Repeats until ::read returns <= 0
kj::Promise<void> Server::handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function<void(const char*,size_t)> cb) {
return stream->tryRead(buffer, 1, PROC_IO_BUFSIZE).then([this,stream,buffer,cb](size_t sz) {
if(sz > 0) { if(sz > 0) {
cb(buffer, sz); return handleProcessOutput(stream, readCb);
return handleFdRead(stream, kj::mv(buffer), cb);
} }
return kj::Promise<void>(kj::READY_NOW); return kj::Promise<void>(kj::READY_NOW);
}); });
} }
void Server::taskFailed(kj::Exception &&exception) {
//kj::throwFatalException(kj::mv(exception));
// prettier
fprintf(stderr, "fatal: %s\n", exception.getDescription().cStr());
exit(EXIT_FAILURE);
}

View File

@ -1,5 +1,5 @@
/// ///
/// Copyright 2015-2020 Oliver Giles /// Copyright 2015 Oliver Giles
/// ///
/// This file is part of Laminar /// This file is part of Laminar
/// ///
@ -16,60 +16,47 @@
/// You should have received a copy of the GNU General Public License /// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/> /// along with Laminar. If not, see <http://www.gnu.org/licenses/>
/// ///
#ifndef LAMINAR_SERVER_H_ #ifndef _LAMINAR_SERVER_H_
#define LAMINAR_SERVER_H_ #define _LAMINAR_SERVER_H_
#include <kj/async-io.h> #include <kj/async-io.h>
#include <kj/compat/http.h>
#include <capnp/message.h> #include <capnp/message.h>
#include <capnp/capability.h> #include <capnp/capability.h>
#include <functional> #include <functional>
#include <sys/types.h>
class Laminar; struct LaminarInterface;
class Http;
class Rpc;
// This class manages the program's asynchronous event loop // This class abstracts the HTTP/Websockets and Cap'n Proto RPC interfaces.
// It also manages the program's asynchronous event loop
class Server final : public kj::TaskSet::ErrorHandler { class Server final : public kj::TaskSet::ErrorHandler {
public: public:
Server(kj::AsyncIoContext& ioContext); // Initializes the server with a LaminarInterface to handle requests from
// HTTP/Websocket or RPC clients and bind addresses for each of those
// interfaces. See the documentation for kj::AsyncIoProvider::getNetwork
// for a description of the address format
Server(LaminarInterface& li, kj::StringPtr rpcBindAddress, kj::StringPtr httpBindAddress);
~Server(); ~Server();
void start(); void start();
void stop(); void stop();
void addProcess(int fd, std::function<void(char*,size_t)> readCb, std::function<void()> cb);
// add a file descriptor to be monitored for output. The callback will be
// invoked with the read data
kj::Promise<void> readDescriptor(int fd, std::function<void(const char*,size_t)> cb);
void addTask(kj::Promise<void> &&task);
// add a one-shot timer callback
kj::Promise<void> addTimeout(int seconds, std::function<void()> cb);
// get a promise which resolves when a child process exits
kj::Promise<int> onChildExit(kj::Maybe<pid_t>& pid);
struct PathWatcher {
virtual PathWatcher& addPath(const char* path) = 0;
};
PathWatcher& watchPaths(std::function<void()>);
void listenRpc(Rpc& rpc, kj::StringPtr rpcBindAddress);
void listenHttp(Http& http, kj::StringPtr httpBindAddress);
private: private:
kj::Promise<void> acceptRpcClient(Rpc& rpc, kj::Own<kj::ConnectionReceiver>&& listener); void acceptHttpClient(kj::Own<kj::ConnectionReceiver>&& listener);
kj::Promise<void> handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function<void(const char*,size_t)> cb); void acceptRpcClient(kj::Own<kj::ConnectionReceiver>&& listener);
kj::Promise<void> handleProcessOutput(kj::AsyncInputStream* stream, std::function<void(char*,size_t)> readCb);
void taskFailed(kj::Exception&& exception) override; void taskFailed(kj::Exception&& exception) override {
kj::throwFatalException(kj::mv(exception));
}
private: private:
int efd_quit; int efd;
kj::AsyncIoContext& ioContext; capnp::Capability::Client rpcInterface;
kj::Own<kj::TaskSet> listeners; struct WebsocketConnection;
kj::TaskSet childTasks; struct HttpImpl;
kj::Maybe<kj::Promise<void>> reapWatch; HttpImpl* httpInterface;
kj::AsyncIoContext ioContext;
kj::TaskSet tasks;
}; };
#endif // LAMINAR_SERVER_H_ #endif // _LAMINAR_SERVER_H_

View File

@ -1,25 +0,0 @@
///
/// Copyright 2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#define str(x) #x
#define xstr(x) str(x)
const char* laminar_version() {
return xstr(LAMINAR_VERSION);
}

View File

@ -1,71 +0,0 @@
///
/// Copyright 2019 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_EVENTSOURCE_H_
#define LAMINAR_EVENTSOURCE_H_
#include <kj/async-io.h>
#include <kj/compat/http.h>
#include <rapidjson/document.h>
#include <vector>
class EventSource {
public:
EventSource(kj::AsyncIoContext& ctx, const char* httpConnectAddr, const char* path) :
networkAddress(ctx.provider->getNetwork().parseAddress(httpConnectAddr).wait(ctx.waitScope)),
httpClient(kj::newHttpClient(ctx.lowLevelProvider->getTimer(), headerTable, *networkAddress)),
headerTable(),
headers(headerTable),
buffer(kj::heapArrayBuilder<char>(BUFFER_SIZE))
{
headers.add("Accept", "text/event-stream");
auto resp = httpClient->request(kj::HttpMethod::GET, path, headers).response.wait(ctx.waitScope);
promise = waitForMessages(resp.body.get(), 0).attach(kj::mv(resp));
}
const std::vector<rapidjson::Document>& messages() {
return receivedMessages;
}
private:
kj::Own<kj::NetworkAddress> networkAddress;
kj::Own<kj::HttpClient> httpClient;
kj::HttpHeaderTable headerTable;
kj::HttpHeaders headers;
kj::ArrayBuilder<char> buffer;
kj::Maybe<kj::Promise<void>> promise;
std::vector<rapidjson::Document> receivedMessages;
kj::Promise<void> waitForMessages(kj::AsyncInputStream* stream, ulong offset) {
return stream->read(buffer.asPtr().begin() + offset, 1, BUFFER_SIZE).then([=](size_t s) {
ulong end = offset + s;
buffer.asPtr().begin()[end] = '\0';
if(strcmp(&buffer.asPtr().begin()[end - 2], "\n\n") == 0) {
rapidjson::Document d;
d.Parse(buffer.begin() + strlen("data: "));
receivedMessages.emplace_back(kj::mv(d));
end = 0;
}
return waitForMessages(stream, end);
});
}
static const int BUFFER_SIZE = 1024;
};
#endif // LAMINAR_EVENTSOURCE_H_

View File

@ -1,156 +0,0 @@
///
/// Copyright 2019-2022 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_FIXTURE_H_
#define LAMINAR_FIXTURE_H_
#include "laminar.capnp.h"
#include "eventsource.h"
#include "tempdir.h"
#include "laminar.h"
#include "log.h"
#include "server.h"
#include "conf.h"
#include <capnp/rpc-twoparty.h>
#include <gtest/gtest.h>
class LaminarFixture : public ::testing::Test {
public:
LaminarFixture() {
home = tmp.path.toString(true).cStr();
bind_rpc = std::string("unix:/") + home + "/rpc.sock";
bind_http = std::string("unix:/") + home + "/http.sock";
settings.home = home.c_str();
settings.bind_rpc = bind_rpc.c_str();
settings.bind_http = bind_http.c_str();
settings.archive_url = "/test-archive/";
}
~LaminarFixture() noexcept(true) {}
void SetUp() override {
tmp.init();
server = new Server(*ioContext);
laminar = new Laminar(*server, settings);
}
void TearDown() override {
delete server;
delete laminar;
tmp.clean();
}
kj::Own<EventSource> eventSource(const char* path) {
return kj::heap<EventSource>(*ioContext, bind_http.c_str(), path);
}
void defineJob(const char* name, const char* scriptContent, const char* configContent = nullptr) {
KJ_IF_MAYBE(f, tmp.fs->tryOpenFile(kj::Path{"cfg", "jobs", std::string(name) + ".run"},
kj::WriteMode::CREATE | kj::WriteMode::CREATE_PARENT | kj::WriteMode::EXECUTABLE)) {
(*f)->writeAll(std::string("#!/bin/sh\n") + scriptContent + "\n");
}
if(configContent) {
KJ_IF_MAYBE(f, tmp.fs->tryOpenFile(kj::Path{"cfg", "jobs", std::string(name) + ".conf"}, kj::WriteMode::CREATE)) {
(*f)->writeAll(configContent);
}
}
}
struct RunExec {
LaminarCi::JobResult result;
kj::String log;
};
RunExec runJob(const char* name, kj::Maybe<StringMap> params = nullptr) {
auto req = client().runRequest();
req.setJobName(name);
KJ_IF_MAYBE(p, params) {
auto params = req.initParams(p->size());
int i = 0;
for(auto kv : *p) {
params[i].setName(kv.first);
params[i].setValue(kv.second);
i++;
}
}
auto res = req.send().wait(ioContext->waitScope);
std::string path = std::string{"/log/"} + name + "/" + std::to_string(res.getBuildNum());
kj::HttpHeaderTable headerTable;
kj::String log = kj::newHttpClient(ioContext->lowLevelProvider->getTimer(), headerTable,
*ioContext->provider->getNetwork().parseAddress(bind_http.c_str()).wait(ioContext->waitScope))
->request(kj::HttpMethod::GET, path, kj::HttpHeaders(headerTable)).response.wait(ioContext->waitScope).body
->readAllText().wait(ioContext->waitScope);
return { res.getResult(), kj::mv(log) };
}
void setNumExecutors(int nexec) {
KJ_IF_MAYBE(f, tmp.fs->tryOpenFile(kj::Path{"cfg", "contexts", "default.conf"},
kj::WriteMode::CREATE | kj::WriteMode::MODIFY | kj::WriteMode::CREATE_PARENT)) {
std::string content = "EXECUTORS=" + std::to_string(nexec);
(*f)->writeAll(content);
}
}
kj::String stripLaminarLogLines(const kj::String& str) {
auto out = kj::heapString(str.size());
char *o = out.begin();
for(const char *p = str.cStr(), *e = p + str.size(); p < e;) {
const char *nl = strchrnul(p, '\n');
if(!kj::StringPtr{p}.startsWith("[laminar]")) {
memcpy(o, p, nl - p + 1);
o += nl - p + 1;
}
p = nl + 1;
}
*o = '\0';
return out;
}
StringMap parseFromString(kj::StringPtr content) {
char tmp[16] = "/tmp/lt.XXXXXX";
int fd = mkstemp(tmp);
LSYSCALL(write(fd, content.begin(), content.size()));
close(fd);
StringMap map = parseConfFile(tmp);
unlink(tmp);
return map;
}
LaminarCi::Client client() {
if(!rpc) {
auto stream = ioContext->provider->getNetwork().parseAddress(bind_rpc).wait(ioContext->waitScope)->connect().wait(ioContext->waitScope);
auto net = kj::heap<capnp::TwoPartyVatNetwork>(*stream, capnp::rpc::twoparty::Side::CLIENT);
rpc = kj::heap<capnp::RpcSystem<capnp::rpc::twoparty::VatId>>(*net, nullptr).attach(kj::mv(net), kj::mv(stream));
}
static capnp::word scratch[4];
memset(scratch, 0, sizeof(scratch));
auto hostId = capnp::MallocMessageBuilder(scratch).getRoot<capnp::rpc::twoparty::VatId>();
hostId.setSide(capnp::rpc::twoparty::Side::SERVER);
return rpc->bootstrap(hostId).castAs<LaminarCi>();
}
kj::Own<capnp::RpcSystem<capnp::rpc::twoparty::VatId>> rpc;
TempDir tmp;
std::string home, bind_rpc, bind_http;
Settings settings;
Server* server;
Laminar* laminar;
static kj::AsyncIoContext* ioContext;
};
#endif // LAMINAR_FIXTURE_H_

View File

@ -1,188 +0,0 @@
///
/// Copyright 2019-2022 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include <kj/async-unix.h>
#include "laminar-fixture.h"
#include "conf.h"
// TODO: consider handling this differently
kj::AsyncIoContext* LaminarFixture::ioContext;
TEST_F(LaminarFixture, EmptyStatusMessageStructure) {
auto es = eventSource("/");
ioContext->waitScope.poll();
ASSERT_EQ(1, es->messages().size());
auto json = es->messages().front().GetObject();
EXPECT_STREQ("status", json["type"].GetString());
EXPECT_STREQ("Laminar", json["title"].GetString());
EXPECT_LT(time(nullptr) - json["time"].GetInt64(), 1);
auto data = json["data"].GetObject();
EXPECT_TRUE(data.HasMember("recent"));
EXPECT_TRUE(data.HasMember("running"));
EXPECT_TRUE(data.HasMember("queued"));
EXPECT_TRUE(data.HasMember("executorsTotal"));
EXPECT_TRUE(data.HasMember("executorsBusy"));
EXPECT_TRUE(data.HasMember("buildsPerDay"));
EXPECT_TRUE(data.HasMember("buildsPerJob"));
EXPECT_TRUE(data.HasMember("timePerJob"));
EXPECT_TRUE(data.HasMember("resultChanged"));
EXPECT_TRUE(data.HasMember("lowPassRates"));
EXPECT_TRUE(data.HasMember("buildTimeChanges"));
}
TEST_F(LaminarFixture, JobNotifyHomePage) {
defineJob("foo", "true");
auto es = eventSource("/");
runJob("foo");
ASSERT_EQ(4, es->messages().size());
auto job_queued = es->messages().at(1).GetObject();
EXPECT_STREQ("job_queued", job_queued["type"].GetString());
EXPECT_STREQ("foo", job_queued["data"]["name"].GetString());
auto job_started = es->messages().at(2).GetObject();
EXPECT_STREQ("job_started", job_started["type"].GetString());
EXPECT_STREQ("foo", job_started["data"]["name"].GetString());
auto job_completed = es->messages().at(3).GetObject();
EXPECT_STREQ("job_completed", job_completed["type"].GetString());
EXPECT_STREQ("foo", job_completed["data"]["name"].GetString());
}
TEST_F(LaminarFixture, OnlyRelevantNotifications) {
defineJob("job1", "true");
defineJob("job2", "true");
auto esHome = eventSource("/");
auto esJobs = eventSource("/jobs");
auto es1Job = eventSource("/jobs/job1");
auto es2Job = eventSource("/jobs/job2");
auto es1Run = eventSource("/jobs/job1/1");
auto es2Run = eventSource("/jobs/job2/1");
runJob("job1");
runJob("job2");
EXPECT_EQ(7, esHome->messages().size());
EXPECT_EQ(7, esJobs->messages().size());
EXPECT_EQ(4, es1Job->messages().size());
EXPECT_EQ(4, es2Job->messages().size());
EXPECT_EQ(4, es1Run->messages().size());
EXPECT_EQ(4, es2Run->messages().size());
}
TEST_F(LaminarFixture, FailedStatus) {
defineJob("job1", "false");
auto run = runJob("job1");
ASSERT_EQ(LaminarCi::JobResult::FAILED, run.result);
}
TEST_F(LaminarFixture, WorkingDirectory) {
defineJob("job1", "pwd");
auto run = runJob("job1");
ASSERT_EQ(LaminarCi::JobResult::SUCCESS, run.result);
std::string cwd{tmp.path.append(kj::Path{"run","job1","1"}).toString(true).cStr()};
EXPECT_EQ(cwd + "\n", stripLaminarLogLines(run.log).cStr());
}
TEST_F(LaminarFixture, Environment) {
defineJob("foo", "env");
auto run = runJob("foo");
std::string ws{tmp.path.append(kj::Path{"run","foo","workspace"}).toString(true).cStr()};
std::string archive{tmp.path.append(kj::Path{"archive","foo","1"}).toString(true).cStr()};
StringMap map = parseFromString(run.log);
EXPECT_EQ("1", map["RUN"]);
EXPECT_EQ("foo", map["JOB"]);
EXPECT_EQ("success", map["RESULT"]);
EXPECT_EQ("unknown", map["LAST_RESULT"]);
EXPECT_EQ(ws, map["WORKSPACE"]);
EXPECT_EQ(archive, map["ARCHIVE"]);
}
TEST_F(LaminarFixture, ParamsToEnv) {
defineJob("foo", "env");
StringMap params;
params["foo"] = "bar";
auto run = runJob("foo", params);
StringMap map = parseFromString(run.log);
EXPECT_EQ("bar", map["foo"]);
}
TEST_F(LaminarFixture, Abort) {
defineJob("job1", "sleep inf");
auto req = client().runRequest();
req.setJobName("job1");
auto res = req.send();
// There isn't a nice way of knowing when the leader process is ready to
// handle SIGTERM. Just wait until it prints something to the log
ioContext->waitScope.poll();
kj::HttpHeaderTable headerTable;
char _;
kj::newHttpClient(ioContext->lowLevelProvider->getTimer(), headerTable,
*ioContext->provider->getNetwork().parseAddress(bind_http.c_str()).wait(ioContext->waitScope))
->request(kj::HttpMethod::GET, "/log/job1/1", kj::HttpHeaders(headerTable)).response.wait(ioContext->waitScope).body
->tryRead(&_, 1, 1).wait(ioContext->waitScope);
// now it should be ready to abort
ASSERT_TRUE(laminar->abort("job1", 1));
EXPECT_EQ(LaminarCi::JobResult::ABORTED, res.wait(ioContext->waitScope).getResult());
}
TEST_F(LaminarFixture, JobDescription) {
defineJob("foo", "true", "DESCRIPTION=bar");
auto es = eventSource("/jobs/foo");
ioContext->waitScope.poll();
ASSERT_EQ(1, es->messages().size());
auto json = es->messages().front().GetObject();
ASSERT_TRUE(json.HasMember("data"));
auto data = json["data"].GetObject();
ASSERT_TRUE(data.HasMember("description"));
EXPECT_STREQ("bar", data["description"].GetString());
}
TEST_F(LaminarFixture, QueueFront) {
setNumExecutors(0);
defineJob("foo", "true");
defineJob("bar", "true");
auto es = eventSource("/");
auto req1 = client().queueRequest();
req1.setJobName("foo");
auto res1 = req1.send();
auto req2 = client().queueRequest();
req2.setFrontOfQueue(true);
req2.setJobName("bar");
auto res2 = req2.send();
ioContext->waitScope.poll();
setNumExecutors(2);
ioContext->waitScope.poll();
ASSERT_GE(es->messages().size(), 5);
auto started1 = es->messages().at(3).GetObject();
EXPECT_STREQ("job_started", started1["type"].GetString());
EXPECT_STREQ("bar", started1["data"]["name"].GetString());
auto started2 = es->messages().at(4).GetObject();
EXPECT_STREQ("job_started", started2["type"].GetString());
EXPECT_STREQ("foo", started2["data"]["name"].GetString());
}

View File

@ -1,40 +0,0 @@
///
/// Copyright 2019 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include <kj/async-unix.h>
#include <gtest/gtest.h>
#include <kj/debug.h>
#include "laminar-fixture.h"
#include "leader.h"
// gtest main supplied in order to call captureChildExit and handle process leader
int main(int argc, char **argv) {
if(argv[0][0] == '{')
return leader_main();
// TODO: consider handling this differently
auto ioContext = kj::setupAsyncIo();
LaminarFixture::ioContext = &ioContext;
kj::UnixEventPort::captureChildExit();
//kj::_::Debug::setLogLevel(kj::_::Debug::Severity::INFO);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,60 +0,0 @@
///
/// Copyright 2018-2022 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#ifndef LAMINAR_TEMPDIR_H_
#define LAMINAR_TEMPDIR_H_
#include "log.h"
#include <kj/filesystem.h>
#include <stdlib.h>
class TempDir {
public:
TempDir() :
path(mkdtemp()),
fs(kj::newDiskFilesystem()->getRoot().openSubdir(path, kj::WriteMode::MODIFY))
{
}
~TempDir() noexcept {
kj::newDiskFilesystem()->getRoot().remove(path);
}
void init() {
// set up empty directory structure
fs->openSubdir(kj::Path{"cfg"}, kj::WriteMode::CREATE);
fs->openSubdir(kj::Path{"cfg", "jobs"}, kj::WriteMode::CREATE);
fs->openSubdir(kj::Path{"cfg", "contexts"}, kj::WriteMode::CREATE);
}
void clean() {
// rm -rf in config folder
for(kj::StringPtr name : fs->listNames()) {
fs->remove(kj::Path{name});
}
}
kj::Path path;
kj::Own<const kj::Directory> fs;
private:
static kj::Path mkdtemp() {
char dir[] = "/tmp/laminar-test-XXXXXX";
LASSERT(::mkdtemp(dir) != nullptr, "mkdtemp failed");
return kj::Path::parse(&dir[1]);
}
};
#endif // LAMINAR_TEMPDIR_H_

View File

@ -1,64 +0,0 @@
///
/// Copyright 2018-2020 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include "conf.h"
#include "log.h"
#include <gtest/gtest.h>
class ConfTest : public ::testing::Test {
protected:
void SetUp() override {
fd = mkstemp(tmpFile);
}
void TearDown() override {
close(fd);
unlink(tmpFile);
}
void parseConf(std::string conf) {
lseek(fd, SEEK_SET, 0);
LSYSCALL(write(fd, conf.data(), conf.size()));
cfg = parseConfFile(tmpFile);
}
StringMap cfg;
int fd;
char tmpFile[32] = "/tmp/lt.XXXXXX";
};
TEST_F(ConfTest, Empty) {
EXPECT_TRUE(cfg.empty());
parseConf("");
EXPECT_TRUE(cfg.empty());
}
TEST_F(ConfTest, Comments) {
parseConf("#");
EXPECT_TRUE(cfg.empty());
parseConf("#foo=bar");
EXPECT_TRUE(cfg.empty());
}
TEST_F(ConfTest, Parse) {
parseConf("foo=bar\nbar=3");
ASSERT_EQ(2, cfg.size());
EXPECT_EQ("bar", cfg.get("foo", std::string("fallback")));
EXPECT_EQ(3, cfg.get("bar", 0));
}
TEST_F(ConfTest, Fallback) {
EXPECT_EQ("foo", cfg.get("test", std::string("foo")));
}

View File

@ -1,83 +0,0 @@
///
/// Copyright 2018 Oliver Giles
///
/// This file is part of Laminar
///
/// Laminar is free software: you can redistribute it and/or modify
/// it under the terms of the GNU General Public License as published by
/// the Free Software Foundation, either version 3 of the License, or
/// (at your option) any later version.
///
/// Laminar is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with Laminar. If not, see <http://www.gnu.org/licenses/>
///
#include <gtest/gtest.h>
#include "database.h"
class DatabaseTest : public ::testing::Test {
protected:
DatabaseTest() :
::testing::Test(),
db(":memory:")
{}
Database db;
};
TEST_F(DatabaseTest, Exec) {
EXPECT_FALSE(db.exec("garbage non-sql"));
EXPECT_TRUE(db.exec("create temporary table test(id int)"));
}
TEST_F(DatabaseTest, Fetch) {
int n = 0;
db.stmt("select 2, 'cat', 4294967299").fetch<int, std::string, uint64_t>([&](int i, std::string s, uint64_t ui){
n++;
EXPECT_EQ(2, i);
EXPECT_EQ("cat", s);
EXPECT_EQ(4294967299, ui);
});
EXPECT_EQ(1, n);
}
TEST_F(DatabaseTest, Bind) {
int n = 0;
db.stmt("select ? * 2").bind(2).fetch<int>([&](int i){
n++;
EXPECT_EQ(4, i);
});
EXPECT_EQ(1, n);
}
TEST_F(DatabaseTest, Strings) {
std::string res;
db.stmt("select ? || ?").bind("a", "b").fetch<std::string>([&res](std::string s){
EXPECT_TRUE(res.empty());
res = s;
});
EXPECT_EQ("ab", res);
}
TEST_F(DatabaseTest, MultiRow) {
ASSERT_TRUE(db.exec("create table test(id int)"));
int i = 0;
while(i < 10)
EXPECT_TRUE(db.stmt("insert into test values(?)").bind(i++).exec());
i = 0;
db.stmt("select * from test").fetch<int>([&](int r){
EXPECT_EQ(i++, r);
});
EXPECT_EQ(10, i);
}
TEST_F(DatabaseTest, StdevFunc) {
double res = 0;
db.stmt("with a (x) as (values (7),(3),(45),(23)) select stdev(x) from a").fetch<double>([&](double r){
res = r;
});
EXPECT_FLOAT_EQ(19.0700463205171, res);
}