aboutsummaryrefslogtreecommitdiff
path: root/manifests
diff options
context:
space:
mode:
Diffstat (limited to 'manifests')
-rw-r--r--manifests/auth/ldap.pp49
-rw-r--r--manifests/auth/local.pp72
-rw-r--r--manifests/database.pp13
-rw-r--r--manifests/fly.pp12
-rw-r--r--manifests/init.pp11
-rw-r--r--manifests/keys.pp8
-rw-r--r--manifests/proxy/nginx.pp34
-rw-r--r--manifests/web.pp165
-rw-r--r--manifests/worker.pp140
-rw-r--r--manifests/worker_key.pp10
10 files changed, 514 insertions, 0 deletions
diff --git a/manifests/auth/ldap.pp b/manifests/auth/ldap.pp
new file mode 100644
index 0000000..7e4472b
--- /dev/null
+++ b/manifests/auth/ldap.pp
@@ -0,0 +1,49 @@
+# @summary Concourse local authentication
+# @param users
+# List of local users.
+# @param main_team_users
+# List of users which should be added to the "main" team.
+# @param main_team_group
+# Ignored, but here to keep the same "API" with the other auth modules.
+class concourse::auth::local (
+ Array[Struct[{
+ 'name' => String,
+ 'password' => Variant[String, Sensitive[String]],
+ }]] $users,
+ Optional[Array[String]] $main_team_user,
+ Optional[Array[String]] $main_team_group, # ignored
+ Enum['absent', 'present'] $ensure = 'present',
+) {
+ $env_file = "${concourse::web::conf_dir}/auth-local"
+
+ $environment = {
+ 'CONCOURSE_ADD_LOCAL_USER' => $users.map |$user| {
+ $name = $user['name']
+ $pass = $user['password'] ? {
+ String => $user['password'],
+ default => $user['password'].unwrap,
+ }
+ "${name}:${pass}"
+ }.join(','),
+ 'CONCOURSE_MAIN_TEAM_LOCAL_USER' => $main_team_group ? {
+ Array => $main_team_group.join(','),
+ default => undef,
+ },
+ }
+
+ file { $env_file:
+ ensure => $ensure,
+ content => epp("${module_name}/env.epp", $environment),
+ # To not show new password
+ show_diff => false,
+ mode => '0600',
+ }
+
+ systemd::manage_dropin { 'concourse-local-auth':
+ ensure => $ensure,
+ unit => $concourse::web::service,
+ service_entry => {
+ 'EnvironmentFile' => $env_file,
+ },
+ }
+}
diff --git a/manifests/auth/local.pp b/manifests/auth/local.pp
new file mode 100644
index 0000000..289ce15
--- /dev/null
+++ b/manifests/auth/local.pp
@@ -0,0 +1,72 @@
+# @summary Concourse LDAP authentication
+# Most attributes maps directly to concourse's options, but with
+# `CONCOURSE_LDAP_` prefixed.
+class concourse::auth::ldap (
+ String $host,
+ String $bind_dn,
+ Variant[String, Sensitive[String]] $bind_pw,
+ String $user_search_base_dn,
+ String $user_search_username = 'uid',
+ Optional[String] $display_name = undef,
+ Optional[String] $user_search_filter = undef,
+ Optioal[String] $user_search_id_attr = undef,
+ Optional[String] $user_search_email_attr = undef,
+ Optional[String] $user_search_name_attr = undef,
+ Optional[Stdlib::Absolutepath] $ca_cert = undef,
+ Boolean $insecure_no_ssl = false,
+ Optional[String] $group_search_base_dn = undef,
+ String $group_search_name_attr = 'ou',
+ String $group_search_user_attr = 'uid',
+ String $group_search_group_attr = 'members',
+ Optional[String] $group_search_filter = undef,
+ Optional[Array[String]] $main_team_user,
+ Optional[Array[String]] $main_team_group,
+
+ Enum['absent', 'present'] $ensure = 'present',
+) {
+ $env_file = "${concourse::web::conf_dir}/auth-ldap"
+
+ $environment = {
+ 'CONCOURSE_LDAP_HOST' => $host,
+ 'CONCOURSE_LDAP_BIND_DN' => $bind_dn,
+ 'CONCOURSE_LDAP_BIND_PW' => $bind_pw,
+ 'CONCOURSE_LDAP_USER_SEARCH_BASE_DN' => $user_search_base_dn,
+ 'CONCOURSE_LDAP_USER_SEARCH_USERNAME' => $user_search_username,
+ 'CONCOURSE_LDAP_DISPLAY_NAME' => $display_name,
+ 'CONCOURSE_LDAP_USER_SEARCH_FILTER' => $user_search_filter,
+ 'CONCOURSE_LDAP_USER_SEARCH_ID_ATTR' => $user_search_id_attr,
+ 'CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR' => $user_search_email_attr,
+ 'CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR' => $user_search_name_attr,
+ 'CONCOURSE_LDAP_CA_CERT' => $ca_cert,
+ 'CONCOURSE_LDAP_INSECURE_NO_SSL' => $insecure_no_ssl,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN' => $group_search_base_dn,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR' => $group_search_name_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR' => $group_search_user_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR' => $group_search_group_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_FILTER' => $group_search_filter,
+ 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_USER' => $main_team_user ? {
+ Array => $main_team_user.join(','),
+ default => undef,
+ },
+ 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_GROUP' => $main_team_group ? {
+ Array => $main_team_user.join(','),
+ default => undef,
+ },
+ }
+
+ file { $env_file:
+ ensure => $ensure,
+ content => epp("${module_name}/env.epp", $environment),
+ # To not show new password
+ show_diff => false,
+ mode => '0600',
+ }
+
+ systemd::manage_dropin { 'concourse-ldap-auth':
+ ensure => $ensure,
+ unit => $concourse::web::service,
+ service_entry => {
+ 'EnvironmentFile' => $env_file,
+ },
+ }
+}
diff --git a/manifests/database.pp b/manifests/database.pp
new file mode 100644
index 0000000..d921cc9
--- /dev/null
+++ b/manifests/database.pp
@@ -0,0 +1,13 @@
+class concourse::database (
+ String $username = lookup("concourse::${cluster}::postgres_user"),,
+ Variant[String, Sensitive[String]] $password = lookup("concourse::${cluster}::postgres_user"),
+ String $db_name = "atc-${cluster}",
+ String $cluster = $concourse::default_cluster,
+) {
+ postgresql::server::db { $db_name:
+ user => $username,
+ password => $password,
+ grant => 'ALL',
+ comment => 'Concourse CI',
+ }
+}
diff --git a/manifests/fly.pp b/manifests/fly.pp
new file mode 100644
index 0000000..b9e1e71
--- /dev/null
+++ b/manifests/fly.pp
@@ -0,0 +1,12 @@
+# @summary Manage a concourse client node
+#
+# Fly is the name of the concourse client command line.
+#
+# @param ensure
+class concourse::fly (
+ Enum['absent', 'present'] $ensure = 'present',
+) {
+ ensure_packages(['concourse-fly-cli'], {
+ 'ensure' => $ensure,
+ })
+}
diff --git a/manifests/init.pp b/manifests/init.pp
new file mode 100644
index 0000000..8b70bd6
--- /dev/null
+++ b/manifests/init.pp
@@ -0,0 +1,11 @@
+# Global defaults for defined resource types.
+# @param worker_work_dir
+# Default work dir for each worker
+# @param default_cluster
+# Cluster used by all resources if no specific cluster is specified.
+class concourse (
+ String $worker_work_dir = '/opt/concourse/worker',
+ String $default_cluster = 'default',
+ String $worker_service = 'concourse-worker',
+) {
+}
diff --git a/manifests/keys.pp b/manifests/keys.pp
new file mode 100644
index 0000000..dc5fe19
--- /dev/null
+++ b/manifests/keys.pp
@@ -0,0 +1,8 @@
+define concourse::keys (
+) {
+ @@tsa_host_key {
+ }
+
+ @@kj
+}
+
diff --git a/manifests/proxy/nginx.pp b/manifests/proxy/nginx.pp
new file mode 100644
index 0000000..7e4b9a2
--- /dev/null
+++ b/manifests/proxy/nginx.pp
@@ -0,0 +1,34 @@
+define concourse::proxy::nginx (
+ String $server_name,
+ String $cluster,
+ Enum['absent', 'present'] $ensure = 'present',
+) {
+ include concourse
+
+ nginx::resource::upstream { "concourse - ${cluster}":
+ ensure => $ensure,
+ }
+
+ nginx::resource::server { $server_name:
+ }
+
+ nginx::resource::location { "${server_name} - /":
+ location => '/',
+ proxy_pass => "http://${cluster}",
+ }
+
+ nginx::resource::location { "${server_name} - ~ /hijack$":
+ location => '~ /hijack$',
+ proxy_pass => "http://${cluster}",
+ proxy_set_header => [
+ 'Host $host',
+ 'X-Real-IP $remote_addr',
+ 'X-Forwarded-For $proxy_add_x_forwarded_for',
+ 'X-Forwarded-Host $host',
+ 'X-Forwarded-Proto $scheme',
+ 'Proxy ""',
+ 'Upgrade $http_upgrade',
+ 'Connection "upgrade"',
+ ],
+ }
+}
diff --git a/manifests/web.pp b/manifests/web.pp
new file mode 100644
index 0000000..f89ac4e
--- /dev/null
+++ b/manifests/web.pp
@@ -0,0 +1,165 @@
+# @summary A concourse web node.
+# @param service
+# The name of the system service.
+# This service WILL be managed by us.
+# @param service_unit
+# Exact unit name (in terms of systemd) of the service.
+# @param conf_file
+# Where configuration environment variables will be stored.
+# Currently hard-coded in the service file.
+# @param conf_dir
+# Where additional environment files will be stored. Used (at
+# least) by each auth resource.
+# @param purge_conf_dir
+# Should the directory mentioned in `conf_dir` be purged. If this
+# is true then decomissioning sub-configurations are done by simply
+# removing that resource.
+# @param ensure
+# @param cluster
+# If this web node is part of a cluster of web nodes, name that
+# cluster. This will create an `nginx::resoruce::upstream::member`
+# resource for this node, which should be realized by
+# `concourse::proxy::nginx`
+#
+# Also requires `peer_address` to be set
+#
+# @param peer_address
+# Peer address used when used in a cluster
+#
+# Also requires `cluster` to be set.
+#
+# Remaining keys maps directly to concourse configurations.
+class concourse::web (
+ String $postgres_user = lookup("concourse::${cluster}::postgres_user"),
+ Variant[String, Sensitive[String]] $postgres_password = lookup("concourse::${cluster}::postgres_password"),
+
+ Variant[String, Sensitive[String]] $session_signing_key = lookup("concourse::${cluster}::session_signing_key"),
+ Variant[String, Sensitive[String]] $tsa_private_key = lookup("concourse::${cluster}::tsa_private_key"),
+ Variant[String, Sensitive[String]] $tsa_public_key = lookup("concourse::${cluster}::tsa_public_key"),
+ Array[String] $worker_public_keys = [],
+
+ String $key_dir = '/usr/lib/concourse',
+ String $session_signing_key_file = "${key_dir}/session_signing_key",
+ String $tsa_host_key_file = "${key_dir}/tsa_host_key",
+ String $tsa_authorized_keys_file = "${key_dir}/authorized_worker_keys",
+
+ String $cluster = 'default',
+ Optional[String] $peer_address = undef,
+
+ Optional[String] $postgres_host = undef,
+ Optional[String] $postgres_port = undef,
+ Optional[Stdlib::Unixpath] $postgres_socket = undef,
+
+ Optional[String] $postgres_database = undef,
+
+ Optional[String] $external_url = undef,
+
+ Optional[Integer] $api_max_conns = undef,
+ Optional[Integer] $backend_max_conns = undef,
+
+ String $service = 'concourse',
+ String $service_unit = "${service}.service",
+ Std::AbsolutePath $conf_file = '/etc/conf.d/concourse',
+ Std::AbsolutePath $conf_dir = '/etc/conf.d/concourse.d',
+ Boolean $purge_conf_dir = true,
+ Enum['absent', 'present'] $ensure = 'present',
+
+ Array[String] $packages = [
+ 'concourse',
+ 'councourse-resource-types',
+ ],
+) {
+ include concourse
+
+ ensure_packages($packages, {
+ ensure => $ensure,
+ })
+
+ $env = {
+ 'CONCOURSE_SESSION_SIGNING_KEY' => $session_signing_key_file,
+ 'CONCOURSE_TSA_HOST_KEY' => $tsa_host_key_file,
+ 'CONCOURSE_TSA_AUTHORIZED_KEYS' => $tsa_authorized_keys_file,
+ 'CONCOURSE_POSTGRES_USER' => $postgres_user,
+ 'CONCOURSE_POSTGRES_PASSWORD' => $postgres_password ? {
+ String => $postgres_password,
+ default => $postgres_password.unwrap,
+ },
+ 'CONCOURSE_CLUSTER' => $cluster,
+ 'CONCOURSE_PEER_ADDRESS' => $peer_address,
+ 'CONCOURSE_POSTGRES_HOST' => $postgres_host,
+ 'CONCOURSE_POSTGRES_PORT' => $postgres_port,
+ 'CONCOURSE_POSTGRES_SOCKET' => $postgres_socket,
+ 'CONCOURSE_POSTGRES_DATABASE' => $postgres_database,
+ 'CONCOURSE_EXTERNAL_URL' => $external_url,
+ 'CONCOURSE_API_MAX_CONNS' => $api_max_conns,
+ 'CONCOURSE_BACKEND_MAX_CONNS' => $backend_max_conns,
+ }
+
+ file { $conf_file:
+ ensure => $ensure,
+ mode => '0600',
+ show_diff => false,
+ content => epp("${module_name}/env.epp", $env),
+ }
+
+ file { $conf_dir:
+ ensure => if $ensure == 'present' { 'directory' } else { 'absent' },
+ purge => $purge_conf_dir,
+ recurse => true,
+ notify => Service[$service],
+ }
+
+ file { $key_dir:
+ ensure => if $ensure == 'present' { 'directory' } else { 'absent' },
+ mode => '0700',
+ recurse => true,
+ forge => true,
+ }
+
+ file {
+ default:
+ ensure => $ensure,
+ mode => '0600',
+ ;
+ $session_signing_key_file:
+ content => $session_signing_key,
+ ;
+ $tsa_host_key_file:
+ conent => $tsa_private_key,
+ ;
+ "${tsa_host_key_file}.pub":
+ content => $tsa_public_key,
+ ;
+ }
+
+ concat { "authorized_workers_key - ${cluster}":
+ target => $tsa_authorized_keys_file,
+ warning => '# File managed by puppet, local changes WILL be overwritten',
+ ensure_newline => true,
+ }
+
+ $worker_public_keys.each |$key| {
+ concat::fragment { sha1($key):
+ content => $key,
+ target => "authorized_worker_keys - ${cluster}",
+ }
+ }
+
+ Worker_key <<| cluster == $cluster |>>
+
+ systemd::unit_file { $service_unit:
+ ensure => $ensure,
+ source => "puppet:///modules/${module_name}/concourse-web.service",
+ } ~> service { $service:
+ ensure => if $ensure == 'present' { 'running' } else { 'stopped' },
+ enable => true,
+ }
+
+ if $peer_address {
+ @@nginx::resource::upstream::member { $facts['trusted']['certname']:
+ ensure => $ensure,
+ upstream => "concourse - ${cluster}",
+ server => $peer_address,
+ }
+ }
+}
diff --git a/manifests/worker.pp b/manifests/worker.pp
new file mode 100644
index 0000000..18703f2
--- /dev/null
+++ b/manifests/worker.pp
@@ -0,0 +1,140 @@
+# @summary A Concourse workre
+#
+# Declared as a class, since the upstream documentation explicitly states
+# that multiple workers on a single node is nonsensical. This may however
+# change in future versions of this module, since you the option to limit
+# a worker to a specific team or tag exists, and linux can limit the amount
+# of resources given to a given process (this gets even easier through systemd,
+# which the module currently uses extensively).
+
+# @param key_dir
+# Directory in which keys should be stored.
+# @param worker_key_file
+# File in which the worker's public key should be stored
+# @param worker_private_key_file
+# File in which the worker ns private key should be stored.
+# @param cluster
+# Which concourse cluster this worker should be part of.
+# @param service
+# Name of the worker service
+# @param service_unit
+# Name of the (systemd) service unit for the worker.
+# @param ensure
+# @param work_dir
+# Working directory for the worker.
+# @param tsa_host
+# Network address to the master (web) node that this worker should connect to.
+# @param tsa_public_key
+# Public key of this workers master.
+# @param worker_public_key
+# Public key of this worker. Only used if `$manage_private_key` is
+# false, otherwise a key will be automatically generated.
+# public key exported as a fact.
+# @param worker_private_key
+# Private key of this worker. Like `worker_public_key`, will only
+# be used if `$manage_private_key` is false. This value will however
+# *not* be exported.
+# @param manage_private_key
+# Should this node manage and generate its own public key. If true
+# (the default) then a key will automatically be generated, and the
+# public portion exported as a fact.
+# @param export_public_key
+# Should an exported resource with this nodes public key be created.
+# This reads the fact from `$worker_public_key` and creates an exported
+# resource of type `concourse::worker_key`, which will allow the master
+# to realize it.
+# @param tag
+# List of arbitrary tags to connnect to this worker. Can be used by
+# pipelines which requires specific environments.
+# @param team
+# Limit this worker to a specific team.
+# @param healthcheck_bind_ip
+# Address to bind the healthcheck endpoint to.
+# @param healthcheck_bind_port
+# Port to bind the health endpoint to.
+# @param healthcheck_timeout
+# Timeout for health check.
+# @param extra_env
+# A hash of extra environment variables which will be passed directly
+# to the worker process.
+class concourse::worker (
+ Std::AbsolutePath $key_dir = '/usr/lib/concourse',
+ Std::AbsolutePath $worker_key_file = "${key_dir}/worker_key",
+ Std::AbsolutePath $worker_private_key_file = "${worker_key_file}.pub",
+ String $cluster = $concourse::default_cluster,
+ String $service = $concourse::worker_service,
+ String $service_unit = "${service}.service",
+ Enum['absent', 'present'] $ensure = 'present',
+
+ String $work_dir = $concourse::worker_work_dir,
+ String $tsa_host = lookup("concourse::${cluster}::tsa_host"),
+ String $tsa_public_key = lookup("concourse::${cluster}::tsa_public_key"),
+ Optinal[String] $worker_public_key = undef,
+ Optinal[String] $worker_private_key = undef,
+ Boolean $manage_private_key = $worker_private_key == undef,
+ Boolean $export_public_key = true,
+ Optional[Array[String]] $tag = undef,
+ Optinal[String] $team = undef,
+
+ String $healthcheck_bind_ip = '0.0.0.0',
+ Stdlib::Port $healthcheck_bind_port = 8888,
+ String $healthcheck_timeout = '5s',
+
+ Hash[String, Any] $extra_env = {},
+) {
+ ensure_packages([
+ 'concourse',
+ ])
+
+ if $manage_private_key {
+ exec { 'Concourse generate worker key':
+ command => ['concourse', 'generate-key', '-t', 'ssh', '-f', $worker_key_file],
+ creates => $worker_private_key_file, # and worker_key_file
+ path => ['/sbin', '/usr/sbin', '/bin', '/usr/bin',]
+ }
+ } else {
+ file { $worker_key_file:
+ content => $worker_public_key,
+ }
+
+ file { $worker_private_key_file:
+ mode => '0600',
+ content => $worker_private_key,
+ }
+ }
+
+ if $export_public_key {
+ @@concourse::worker_key { "${facts['trusted']['certname']} worker key":
+ content => $facts['concourse_worker_key'],
+ cluster => $cluster,
+ }
+ }
+
+ systemd::unit_file { $service_unit:
+ ensure => $ensure,
+ soruce => "puppet:///modules/${module_name}/concourse-worker.service",
+ } ~> service { $service:
+ ensure => if $ensure == 'present' { 'running' } else { 'stopped' },
+ enable => true,
+ }
+
+ $env = {
+ 'CONCOURSE_WORK_DIR' => $work_dir,
+ 'CONCOURSE_TSA_HOST' => $tsa_host,
+ 'CONCOURSE_TSA_PUBLIC_KEY' => $tsa_public_key,
+ 'CONCOURSE_TSA_WORKER_PRIVATE_KEY' => $worker_private_key_file,
+ 'CONCOURSE_TAG' => $tag,
+ 'CONCOURSE_TEAM' => $team,
+ 'HEALTHCHECK_BIND_IP' => $healthcheck_bind_ip,
+ 'HEALTHCHECK_BIND_PORT' => $healthcheck_bind_port,
+ 'HEALTHCHECK_TIMEOUT' => $healthcheck_timeout,
+ } + $extra_env
+
+
+ file { '/etc/conf.d/concourse-worker':
+ ensure => $ensure,
+ mode => '0600',
+ show_diff => false,
+ content => epp("${module_name}/env.epp", $env),
+ }
+}
diff --git a/manifests/worker_key.pp b/manifests/worker_key.pp
new file mode 100644
index 0000000..320bba1
--- /dev/null
+++ b/manifests/worker_key.pp
@@ -0,0 +1,10 @@
+# @api private
+define concourse::worker_key (
+ String $content,
+ String $cluster,
+) {
+ concat::fragment { $name:
+ content => $content,
+ target => "authorized_worker_keys - ${cluster}",
+ }
+}