aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugo Hörnquist <hugo@lysator.liu.se>2023-06-18 20:35:48 +0200
committerHugo Hörnquist <hugo@lysator.liu.se>2023-06-20 00:26:09 +0200
commit5e1032519189f3b6fa793cec81833a781a91d8f2 (patch)
tree51a5ba59974e61f7a56128afcb324d49c9f8b7c8
parentInitial add. (diff)
downloadconcourse-5e1032519189f3b6fa793cec81833a781a91d8f2.tar.gz
concourse-5e1032519189f3b6fa793cec81833a781a91d8f2.tar.xz
Rewrote almost everything.
-rw-r--r--README.md169
-rw-r--r--lib/facter/concourse_worker_key.rb8
-rw-r--r--manifests/auth/ldap.pp131
-rw-r--r--manifests/auth/local.pp80
-rw-r--r--manifests/database.pp25
-rw-r--r--manifests/fly.pp2
-rw-r--r--manifests/init.pp48
-rw-r--r--manifests/keys.pp8
-rw-r--r--manifests/proxy/nginx.pp22
-rw-r--r--manifests/web.pp100
-rw-r--r--manifests/worker.pp87
-rw-r--r--manifests/worker_key.pp13
-rw-r--r--metadata.json2
-rw-r--r--templates/env.epp4
14 files changed, 476 insertions, 223 deletions
diff --git a/README.md b/README.md
index 6f51548..a97f7ec 100644
--- a/README.md
+++ b/README.md
@@ -7,71 +7,160 @@ nodes, and databases.
Usage
-----
+### Overview
+Concourse is configured as a set of clusters. ach cluster consists of
+
+- 1 database (a database within PostgreSQL)
+- 1 or more worker nodes
+- 1 load balancing nginx
+ (this is needed for also a single node, due to how this module is written).
+- 1 or more worker nodes
+
+### Keys
+
+There are also a number of [different keys](https://concourse-ci.org/concourse-generate-key.html)
+needed for concourse to operate correctly.
+
+These are
+
+- The session signing key, used by the web node for signing user session tokens.
+- the TSA host key, used by worker nodes to verify their connection to the web node
+- The worker keys, simple ssh keys used by the nodes when connecting.
+
+The session signing key, and the TSA host key are **NOT** managed by this
+module. This since they need to be the same for all nodes in a cluster (and
+there isn't a good way to mark a single node as the "master" without extra
+work, which might as well be used for manually generating the keys).
+
+The worker keys *are* however managed by this module. Each worker
+generates its own key, and then creates an exported resource which
+each web node realizes. (this is bounded to within the cluster).
+
+### Example Configuration
+
+A complete concourse configuration might look like this.
+
+Note that the `session_signing_key`, `tsa_private_key`, and `tsa_public_key` is
+found through Hiera in this example, as explained under [Keys](#Keys).
+
+```puppet
+$cluster = 'default'
+$external_domain = 'concourse.example.com'
+
+# Cluster configuration should be set on the main resource. All other resources
+# references this hash, referenced by the cluster parameter.
+class { 'concourse':
+ default_cluster => $cluster,
+ clusters => {
+ $cluster => {
+ 'postgres_user' => 'concourse',
+ 'postgres_password' => 'CHANGEME',
+ 'external_url' => "https://${external_domain}",
+
+ # Keys are gotten through Hiera here.
+ 'session_signing_key' => lookup('session_signing_key'),
+ 'tsa_private_key' => lookup('tsa_private_key'),
+ 'tsa_public_key' => lookup('tsa_public_key'),
+ }
+ }
+}
+
+# Creates the database and user.
+# Omit this if managing the database elsewhere
+concourse::database {
+ cluster => $cluster,
+}
+
+# Configures the load balancer.
+# Should only be done once for the cluster
+# (unless you load balance you load balancers...)
+#
+# ensure that `nginx::stream` is set to true.
+concourse::proxy::nginx { $external_domain:
+ cluster => $cluster,
+}
+
+# Configures a web node, and attach it to the cluster.
+# Note that multiple web nodes in the same cluster should have identical
+# configurations (except for their peer_address).
+# Note that concourse currently always bind to port 8080.
+class { 'concourse::web':
+ cluster => $cluster,
+}
+
+# Some authentication method needs to be configured. The authentication happens
+# in the web nodes (although an extra layer could be added through nginx).
+# Check the `concourse::auth::` module for available methods.
+#
+# The simplest is `concourse::auth::local`:
+class { 'concourse::auth::local':
+ users => [
+ {
+ 'name' => 'hugo',
+ 'password' => 'This password is stored in cleartext',
+ }
+ ]
+}
+
+# Configure a worker node, and also attach that to the cluster.
+class { 'concourse::worker':
+ cluster => $cluster,
+}
+
+# Finally, this installs the fly cli.
+include concourse::fly
+```
+
+Note that only some keys are managed through the
+`concourse::configured_clusters`, and for Hiera is *strongly* recommended for
+more advanced setups with multi-node clusters.
+
### Nodes
+As mentioned above, a concourse cluster contains a number of different roles
+(here called nodes). A short summary of each node.
#### Web node
Web nodes acts as the front-end, and dispatcher.
Each web node is stateless, and manages its state through a shared
-database. If multiple nodes are used, then a
-[web node cluster](#web node cluster)
+database. If multiple nodes are used, then a
+[web node cluster](#web node cluster)
should be used.
(technically clusters are always used, and default to the cluster "default").
-```puppet
-class { 'concourse::web':
- postgres_user => '',
- postgres_password => '',
-}
-```
-
##### Authentication
-#### Worker Node
-
-#### Database
+TODO
-#### Fly Client
+#### Worker Node
-#### Web node cluster
+TODO
+#### Database
-### Special Hiera Keys
-- `concourse::${cluster}::postgres_user`
-- `concourse::${cluster}::postgres_password`
-- `concourse::${cluster}::session_signing_key`
-- `concourse::${cluster}::tsa_private_key`
-- `concourse::${cluster}::tsa_public_key`
+TODO
-Keys
-----
-### Session signing key
-Used by the web node for signing and verifying user session tokens.
+#### Fly Client
-### TSA host key
-Used by the web node for the SSH worker registration gateway server ("TSA").
+TODO
-The public key is given to each worker node to verify the remote host wthen
-connecting via SSH.
+#### Web node cluster
-### Worker key
+TODO
-Each worker node verifies its registration with the web node via a SSH key.
-The public key must be listed in the web node's *authorized worker keys* file
-in order for the worker to register.
+### Special Hiera Keys
-Hiera Examples
---------------
+TODO
```yaml
-concourse::cluster::tsa_host: concourse.example.com
-concourse::cluster::postgres_user: concourse
-concourse::cluster::postgres_password: MySuperSecretPassword
-concourse::cluster::session_signing_key: 'A valid key'
-concourse::cluster::tsa_private_key: 'A valid key'
-concourse::cluster::tsa_private_key: 'A valid key'
+concourse::${cluster}:
+ postgres_user: pg_username
+ postgres_password: pg_password
+ session_signing_key: 'A valid key'
+ tsa_private_key: 'A valid key'
+ tsa_public_key: 'A public key matching the private key'
```
[CONCOURSE]: https://concourse-ci.org/
diff --git a/lib/facter/concourse_worker_key.rb b/lib/facter/concourse_worker_key.rb
deleted file mode 100644
index f0d9398..0000000
--- a/lib/facter/concourse_worker_key.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-Facter.add(:concourse_worker_key) do
- confine do
- File.exists? '/usr/lib/concourse/worker_key.pub'
- end
- setcode do
- Facter::Core::Execution.execute('cat /usr/lib/concourse/worker_key.pub')
- end
-end
diff --git a/manifests/auth/ldap.pp b/manifests/auth/ldap.pp
index 7e4472b..70df4a8 100644
--- a/manifests/auth/ldap.pp
+++ b/manifests/auth/ldap.pp
@@ -1,47 +1,122 @@
-# @summary Concourse local authentication
-# @param users
-# List of local users.
-# @param main_team_users
-# List of users which should be added to the "main" team.
+# @summary Concourse LDAP authentication
+# Most attributes maps directly to concourse's options, but with
+# `CONCOURSE_LDAP_` prefixed.
+#
+# @param host
+# LDAP host to bind to, e.x. ldap.example.com
+# @param bind_dn
+# Distinguished name used when binding to the ldap server.
+# e.x. `cn=read-only-admin,dc=example,dc=com`
+# @param bind_pw
+# Password used when binding to the ldap server.
+# @param user_search_base_dn
+# Base distinguished name when searching for user, together with
+# `user_search_username` creates the query:
+# `${user_search_username}=%,${user_search_base_dn}`.
+#
+# Should be something along the lines of `cn=users,dc=example,dc=com`.
+# @param user_search_username
+# See `user_search_base_dn`.
+#
+# Should probably be `uid` or `cn`.
+# @param display_name
+# Fancy name to display for this authentication method.
+# @param user_search_filter
+# LDAP filter to limit which users are queried
+# @param user_search_id_attr
+# LDAP attribute used to specify the users id
+# @param user_search_email_attr
+# LDAP attribute used to specify the users email address
+# @param user_search_name_attr
+# LDAP attribute used to specify the users name.
+# @param ca_cert
+# Path to a CA CERT used when connecting to the LDAP server.
+# Probably mutually exclusive with `insecure_no_ssl`.
+# @param insecure_no_ssl
+# Allow unencrypted connections to the ldap server.
+# @param group_search_base_dn
+# Base for LDAP search for groups. If this is set then LDAP groups
+# are mapped to teams in Concourse.
+#
+# e.x. `cn=group,dc=example,dc=com`
+# @param group_search_name_attr
+# LDAP attribute to use as key when searching for groups under
+# `group_search_base_dn`.
+# @param group_search_user_attr
+# LDAP attribute used to get the "name" of a given user.
+# Should match with what is used in `group_search_group_attr`.
+# @param group_search_group_attr
+# LDAP attribute used to determine which users are part of which group.
+# Should match with what is used in `group_search_user_attr`
+# @param group_search_filter
+# LDAP filter to limit which users are returned when searching
+# for who is part of which group
+# @param main_team_user
# @param main_team_group
-# Ignored, but here to keep the same "API" with the other auth modules.
-class concourse::auth::local (
- Array[Struct[{
- 'name' => String,
- 'password' => Variant[String, Sensitive[String]],
- }]] $users,
- Optional[Array[String]] $main_team_user,
- Optional[Array[String]] $main_team_group, # ignored
- Enum['absent', 'present'] $ensure = 'present',
+# @param ensure
+class concourse::auth::ldap (
+ String $host,
+ String $bind_dn,
+ Variant[String, Sensitive[String]] $bind_pw,
+ String $user_search_base_dn,
+ String $user_search_username = 'uid',
+ Optional[String] $display_name = undef,
+ Optional[String] $user_search_filter = undef,
+ Optional[String] $user_search_id_attr = undef,
+ Optional[String] $user_search_email_attr = undef,
+ Optional[String] $user_search_name_attr = undef,
+ Optional[Stdlib::Absolutepath] $ca_cert = undef,
+ Boolean $insecure_no_ssl = false,
+ Optional[String] $group_search_base_dn = undef,
+ String $group_search_name_attr = 'ou',
+ String $group_search_user_attr = 'uid',
+ String $group_search_group_attr = 'members',
+ Optional[String] $group_search_filter = undef,
+ Optional[Array[String]] $main_team_user = undef,
+ Optional[Array[String]] $main_team_group = undef,
+ Enum['absent', 'present'] $ensure = 'present',
) {
- $env_file = "${concourse::web::conf_dir}/auth-local"
+ $env_file = "${concourse::web::conf_dir}/auth-ldap"
$environment = {
- 'CONCOURSE_ADD_LOCAL_USER' => $users.map |$user| {
- $name = $user['name']
- $pass = $user['password'] ? {
- String => $user['password'],
- default => $user['password'].unwrap,
- }
- "${name}:${pass}"
- }.join(','),
- 'CONCOURSE_MAIN_TEAM_LOCAL_USER' => $main_team_group ? {
- Array => $main_team_group.join(','),
+ 'CONCOURSE_LDAP_HOST' => $host,
+ 'CONCOURSE_LDAP_BIND_DN' => $bind_dn,
+ 'CONCOURSE_LDAP_BIND_PW' => $bind_pw,
+ 'CONCOURSE_LDAP_USER_SEARCH_BASE_DN' => $user_search_base_dn,
+ 'CONCOURSE_LDAP_USER_SEARCH_USERNAME' => $user_search_username,
+ 'CONCOURSE_LDAP_DISPLAY_NAME' => $display_name,
+ 'CONCOURSE_LDAP_USER_SEARCH_FILTER' => $user_search_filter,
+ 'CONCOURSE_LDAP_USER_SEARCH_ID_ATTR' => $user_search_id_attr,
+ 'CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR' => $user_search_email_attr,
+ 'CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR' => $user_search_name_attr,
+ 'CONCOURSE_LDAP_CA_CERT' => $ca_cert,
+ 'CONCOURSE_LDAP_INSECURE_NO_SSL' => $insecure_no_ssl,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN' => $group_search_base_dn,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR' => $group_search_name_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR' => $group_search_user_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR' => $group_search_group_attr,
+ 'CONCOURSE_LDAP_GROUP_SEARCH_FILTER' => $group_search_filter,
+ 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_USER' => $main_team_user ? {
+ Array => $main_team_user.join(','),
+ default => undef,
+ },
+ 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_GROUP' => $main_team_group ? {
+ Array => $main_team_user.join(','),
default => undef,
},
}
file { $env_file:
ensure => $ensure,
- content => epp("${module_name}/env.epp", $environment),
+ content => epp("${module_name}/env.epp", { 'entries' => $environment }),
# To not show new password
show_diff => false,
mode => '0600',
}
- systemd::manage_dropin { 'concourse-local-auth':
+ systemd::manage_dropin { 'concourse-ldap-auth':
ensure => $ensure,
- unit => $concourse::web::service,
+ unit => $concourse::web::service_unit,
service_entry => {
'EnvironmentFile' => $env_file,
},
diff --git a/manifests/auth/local.pp b/manifests/auth/local.pp
index 289ce15..bc15dad 100644
--- a/manifests/auth/local.pp
+++ b/manifests/auth/local.pp
@@ -1,70 +1,48 @@
-# @summary Concourse LDAP authentication
-# Most attributes maps directly to concourse's options, but with
-# `CONCOURSE_LDAP_` prefixed.
-class concourse::auth::ldap (
- String $host,
- String $bind_dn,
- Variant[String, Sensitive[String]] $bind_pw,
- String $user_search_base_dn,
- String $user_search_username = 'uid',
- Optional[String] $display_name = undef,
- Optional[String] $user_search_filter = undef,
- Optioal[String] $user_search_id_attr = undef,
- Optional[String] $user_search_email_attr = undef,
- Optional[String] $user_search_name_attr = undef,
- Optional[Stdlib::Absolutepath] $ca_cert = undef,
- Boolean $insecure_no_ssl = false,
- Optional[String] $group_search_base_dn = undef,
- String $group_search_name_attr = 'ou',
- String $group_search_user_attr = 'uid',
- String $group_search_group_attr = 'members',
- Optional[String] $group_search_filter = undef,
- Optional[Array[String]] $main_team_user,
- Optional[Array[String]] $main_team_group,
-
+# @summary Concourse local authentication
+# @param users
+# List of local users.
+# @param main_team_user
+# List of users which should be added to the "main" team.
+# @param main_team_group
+# Ignored, but here to keep the same "API" with the other auth modules.
+# @param ensure
+class concourse::auth::local (
+ Array[Struct[{
+ 'name' => String,
+ 'password' => Variant[String, Sensitive[String]],
+ }]] $users,
+ Optional[Array[String]] $main_team_user = undef,
+ Optional[Array[String]] $main_team_group = undef, # ignored
Enum['absent', 'present'] $ensure = 'present',
) {
- $env_file = "${concourse::web::conf_dir}/auth-ldap"
+ $env_file = "${concourse::web::conf_dir}/auth-local"
$environment = {
- 'CONCOURSE_LDAP_HOST' => $host,
- 'CONCOURSE_LDAP_BIND_DN' => $bind_dn,
- 'CONCOURSE_LDAP_BIND_PW' => $bind_pw,
- 'CONCOURSE_LDAP_USER_SEARCH_BASE_DN' => $user_search_base_dn,
- 'CONCOURSE_LDAP_USER_SEARCH_USERNAME' => $user_search_username,
- 'CONCOURSE_LDAP_DISPLAY_NAME' => $display_name,
- 'CONCOURSE_LDAP_USER_SEARCH_FILTER' => $user_search_filter,
- 'CONCOURSE_LDAP_USER_SEARCH_ID_ATTR' => $user_search_id_attr,
- 'CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR' => $user_search_email_attr,
- 'CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR' => $user_search_name_attr,
- 'CONCOURSE_LDAP_CA_CERT' => $ca_cert,
- 'CONCOURSE_LDAP_INSECURE_NO_SSL' => $insecure_no_ssl,
- 'CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN' => $group_search_base_dn,
- 'CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR' => $group_search_name_attr,
- 'CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR' => $group_search_user_attr,
- 'CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR' => $group_search_group_attr,
- 'CONCOURSE_LDAP_GROUP_SEARCH_FILTER' => $group_search_filter,
- 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_USER' => $main_team_user ? {
- Array => $main_team_user.join(','),
- default => undef,
- },
- 'CONCOURSE_LDAP_MAIN_TEAM_LDAP_GROUP' => $main_team_group ? {
- Array => $main_team_user.join(','),
+ 'CONCOURSE_ADD_LOCAL_USER' => $users.map |$user| {
+ $name = $user['name']
+ $pass = $user['password'] ? {
+ String => $user['password'],
+ default => $user['password'].unwrap,
+ }
+ "${name}:${pass}"
+ }.join(','),
+ 'CONCOURSE_MAIN_TEAM_LOCAL_USER' => $main_team_group ? {
+ Array => $main_team_group.join(','),
default => undef,
},
}
file { $env_file:
ensure => $ensure,
- content => epp("${module_name}/env.epp", $environment),
+ content => epp("${module_name}/env.epp", { 'entries' => $environment }),
# To not show new password
show_diff => false,
mode => '0600',
}
- systemd::manage_dropin { 'concourse-ldap-auth':
+ systemd::manage_dropin { 'concourse-local-auth':
ensure => $ensure,
- unit => $concourse::web::service,
+ unit => $concourse::web::service_unit,
service_entry => {
'EnvironmentFile' => $env_file,
},
diff --git a/manifests/database.pp b/manifests/database.pp
index d921cc9..bc5046a 100644
--- a/manifests/database.pp
+++ b/manifests/database.pp
@@ -1,8 +1,23 @@
-class concourse::database (
- String $username = lookup("concourse::${cluster}::postgres_user"),,
- Variant[String, Sensitive[String]] $password = lookup("concourse::${cluster}::postgres_user"),
- String $db_name = "atc-${cluster}",
- String $cluster = $concourse::default_cluster,
+# @summary Manage the database posts for concourse.
+#
+# A single database is needed for each cluster.
+# This resource sets up up postgres database for a given cluster.
+#
+# @param username
+# Username used to connect to the postgres instance
+# Cluster specific.
+# @param password
+# Password used to connect to the postgres instance.
+# Cluster specific.
+# @param db_name
+# Name of the database to use for this cluster.
+# @param cluster
+# Name of the cluster in question. Changes all other values's defaults.
+define concourse::database (
+ String $cluster = $concourse::default_cluster,
+ String $username = $concourse::configured_clusters[$cluster]['postgres_user'],
+ Variant[String, Sensitive[String]] $password = $concourse::configured_clusters[$cluster]['postgres_password'],
+ String $db_name = $concourse::configured_clusters[$cluster]['db_name'],
) {
postgresql::server::db { $db_name:
user => $username,
diff --git a/manifests/fly.pp b/manifests/fly.pp
index b9e1e71..f80f8a6 100644
--- a/manifests/fly.pp
+++ b/manifests/fly.pp
@@ -7,6 +7,6 @@ class concourse::fly (
Enum['absent', 'present'] $ensure = 'present',
) {
ensure_packages(['concourse-fly-cli'], {
- 'ensure' => $ensure,
+ 'ensure' => $ensure,
})
}
diff --git a/manifests/init.pp b/manifests/init.pp
index 8b70bd6..368b558 100644
--- a/manifests/init.pp
+++ b/manifests/init.pp
@@ -1,11 +1,55 @@
-# Global defaults for defined resource types.
+# @summary Global defaults for defined resource types.
+#
# @param worker_work_dir
# Default work dir for each worker
# @param default_cluster
# Cluster used by all resources if no specific cluster is specified.
+# @param worker_service
+# Name of the the system service for workers.
+# @param clusters
+# Hash from cluster name to default values for each cluster.
+# Each key should be the name of a cluster, and the options are as follows:
+# @option clusters :external_domain
+# @option clusters :postgres_user
+# @option clusters :postgres_password
+# @option clusters :session_signing_key
+# @option clusters :tsa_private_key
+# @option clusters :tsa_public_key
+# @option clusters :db_name
class concourse (
+ String $default_cluster,
String $worker_work_dir = '/opt/concourse/worker',
- String $default_cluster = 'default',
String $worker_service = 'concourse-worker',
+ Hash[String, Hash[String, Any]] $clusters = {},
) {
+ # Merge all configured clusters we find in hiera, and append those
+ # explicitly added to the class.
+ $configured_clusters_ = lookup('concourse::clusters', {
+ merge => 'hash',
+ default_value => {},
+ }) + $clusters
+
+ # Populate each configured cluster with some default values.
+ $populated_clusters_ = $configured_clusters_.map |$cluster_name, $opts| {
+ # Defaults need to be declared *inside* the loop, since they may
+ # depend on other values in the configuration.
+ $cluster_defaults = {
+ 'db_name' => "atc-${cluster_name}",
+ 'postgres_user' => 'concourse',
+ }
+
+ $finalized_opts = $cluster_defaults.keys().reduce($opts) |$opts, $key| {
+ if $key in $opts {
+ $opts
+ } else {
+ $opts + { $key => $cluster_defaults[$key] }
+ }
+ }
+
+ [$cluster_name, $finalized_opts]
+ }
+
+ # This variable is the "exported" item other resources should look at to get
+ # cluster default configuration.
+ $configured_clusters = Hash($populated_clusters_)
}
diff --git a/manifests/keys.pp b/manifests/keys.pp
deleted file mode 100644
index dc5fe19..0000000
--- a/manifests/keys.pp
+++ /dev/null
@@ -1,8 +0,0 @@
-define concourse::keys (
-) {
- @@tsa_host_key {
- }
-
- @@kj
-}
-
diff --git a/manifests/proxy/nginx.pp b/manifests/proxy/nginx.pp
index 7e4b9a2..3e2d180 100644
--- a/manifests/proxy/nginx.pp
+++ b/manifests/proxy/nginx.pp
@@ -1,6 +1,16 @@
+# @summary Revproxy for concourse
+#
+# Rev-proxy, which also gathers all web nodes in a cluster, into a
+# single web endpoint
+#
+# @param server_name
+# Name of the nginx server, will also be used for rev-proxy routing.
+# @param cluster
+# Name of the concourse cluster.
+# @param ensure
define concourse::proxy::nginx (
- String $server_name,
- String $cluster,
+ String $server_name = $name,
+ String $cluster = $concourse::default_cluster,
Enum['absent', 'present'] $ensure = 'present',
) {
include concourse
@@ -10,6 +20,14 @@ define concourse::proxy::nginx (
}
nginx::resource::server { $server_name:
+ # TODO everything here
+ }
+
+ nginx::resource::streamhost { "${server_name}-stream":
+ listen_port => 2222,
+ ipv6_enable => true,
+ ipv6_listen_options => '',
+ proxy => $server_name,
}
nginx::resource::location { "${server_name} - /":
diff --git a/manifests/web.pp b/manifests/web.pp
index f89ac4e..db28e07 100644
--- a/manifests/web.pp
+++ b/manifests/web.pp
@@ -1,4 +1,5 @@
# @summary A concourse web node.
+# #
# @param service
# The name of the system service.
# This service WILL be managed by us.
@@ -16,26 +17,62 @@
# removing that resource.
# @param ensure
# @param cluster
-# If this web node is part of a cluster of web nodes, name that
-# cluster. This will create an `nginx::resoruce::upstream::member`
-# resource for this node, which should be realized by
-# `concourse::proxy::nginx`
-#
-# Also requires `peer_address` to be set
+# Which concourse this web node is part of. An
+# `nginx::resource::upstream::member` resource will be exported,
+# which can be realized by the `concourse::proxy::nginx` resource.
#
# @param peer_address
-# Peer address used when used in a cluster
+# Address to use when connecting this node to the cluster.
+# Should preferably be a private address, since the cluster should
+# only be exposed publicly through the load balancer.
+#
+# Despite that, defaults to `$facts['ipaddress']`, since that
+# forces it to work.
+#
+# Note that concourse always binds on port 8080, this is currently
+# not configurable.
+#
+# @param postgres_user
+# @param postgres_password
+# @param session_signing_key
+#
+# Maps to `CONCOURSE_SESSION_SIGNING_KEY`, and is the key private key generated by
+# concourse generate-key -t rsa -f ./session_signing_key
+# That command will also emit a public key, but that key should be discarded since it's unused.
+#
+# This key is used for signing and verifying user session tokens.
+#
+# @param tsa_private_key
+# Private key used to validate SSH connections from workers.
#
-# Also requires `cluster` to be set.
+# Generated by
+# concourse generate-key -t ssh -f ./tsa_host_key
#
-# Remaining keys maps directly to concourse configurations.
+# Maps to `CONCOURSE_TSA_HOST_KEY`, and the public part should be passed to each worker.
+#
+# @param worker_public_keys
+# @param key_dir
+# @param session_signing_key_file
+# @param tsa_host_key_file
+# @param tsa_authorized_keys_file
+# @param postgres_host
+# @param postgres_port
+# @param postgres_socket
+# @param postgres_database
+# @param external_url
+# Publicly facing url of this cluster. Mainly used by the web server to generate proper links.
+#
+# For example, 'https://concourse.example.com'
+# @param api_max_conns
+# @param backend_max_conns
+# @param packages
class concourse::web (
- String $postgres_user = lookup("concourse::${cluster}::postgres_user"),
- Variant[String, Sensitive[String]] $postgres_password = lookup("concourse::${cluster}::postgres_password"),
+ String $cluster = $concourse::default_cluster,
+ String $postgres_user = $concourse::configured_clusters[$cluster]['postgres_user'],
+ Variant[String, Sensitive[String]] $postgres_password = $concourse::configured_clusters[$cluster]['postgres_password'],
- Variant[String, Sensitive[String]] $session_signing_key = lookup("concourse::${cluster}::session_signing_key"),
- Variant[String, Sensitive[String]] $tsa_private_key = lookup("concourse::${cluster}::tsa_private_key"),
- Variant[String, Sensitive[String]] $tsa_public_key = lookup("concourse::${cluster}::tsa_public_key"),
+ Variant[String, Sensitive[String]] $session_signing_key = $concourse::configured_clusters[$cluster]['session_signing_key'],
+ Variant[String, Sensitive[String]] $tsa_private_key = $concourse::configured_clusters[$cluster]['tsa_private_key'],
Array[String] $worker_public_keys = [],
String $key_dir = '/usr/lib/concourse',
@@ -43,7 +80,6 @@ class concourse::web (
String $tsa_host_key_file = "${key_dir}/tsa_host_key",
String $tsa_authorized_keys_file = "${key_dir}/authorized_worker_keys",
- String $cluster = 'default',
Optional[String] $peer_address = undef,
Optional[String] $postgres_host = undef,
@@ -52,27 +88,27 @@ class concourse::web (
Optional[String] $postgres_database = undef,
- Optional[String] $external_url = undef,
+ String $external_url = "https://${concourse::configured_clusters[$cluster]['external_domain']}",
Optional[Integer] $api_max_conns = undef,
Optional[Integer] $backend_max_conns = undef,
String $service = 'concourse',
String $service_unit = "${service}.service",
- Std::AbsolutePath $conf_file = '/etc/conf.d/concourse',
- Std::AbsolutePath $conf_dir = '/etc/conf.d/concourse.d',
+ Stdlib::Absolutepath $conf_file = '/etc/conf.d/concourse',
+ Stdlib::Absolutepath $conf_dir = '/etc/conf.d/concourse.d',
Boolean $purge_conf_dir = true,
Enum['absent', 'present'] $ensure = 'present',
Array[String] $packages = [
'concourse',
- 'councourse-resource-types',
+ 'concourse-resource-types',
],
) {
include concourse
ensure_packages($packages, {
- ensure => $ensure,
+ ensure => $ensure,
})
$env = {
@@ -99,7 +135,7 @@ class concourse::web (
ensure => $ensure,
mode => '0600',
show_diff => false,
- content => epp("${module_name}/env.epp", $env),
+ content => epp("${module_name}/env.epp", { 'entries' => $env }),
}
file { $conf_dir:
@@ -125,16 +161,13 @@ class concourse::web (
content => $session_signing_key,
;
$tsa_host_key_file:
- conent => $tsa_private_key,
- ;
- "${tsa_host_key_file}.pub":
- content => $tsa_public_key,
+ content => $tsa_private_key,
;
}
concat { "authorized_workers_key - ${cluster}":
- target => $tsa_authorized_keys_file,
- warning => '# File managed by puppet, local changes WILL be overwritten',
+ path => $tsa_authorized_keys_file,
+ warn => '# File managed by puppet, local changes WILL be overwritten',
ensure_newline => true,
}
@@ -145,7 +178,7 @@ class concourse::web (
}
}
- Worker_key <<| cluster == $cluster |>>
+ Concourse::Worker_key <<| cluster == $cluster |>>
systemd::unit_file { $service_unit:
ensure => $ensure,
@@ -155,11 +188,10 @@ class concourse::web (
enable => true,
}
- if $peer_address {
- @@nginx::resource::upstream::member { $facts['trusted']['certname']:
- ensure => $ensure,
- upstream => "concourse - ${cluster}",
- server => $peer_address,
- }
+ # Exported resource
+ @@nginx::resource::upstream::member { $trusted['certname']:
+ ensure => $ensure,
+ upstream => "concourse - ${cluster}",
+ server => "${peer_address}:8080",
}
}
diff --git a/manifests/worker.pp b/manifests/worker.pp
index 18703f2..2d25395 100644
--- a/manifests/worker.pp
+++ b/manifests/worker.pp
@@ -1,4 +1,4 @@
-# @summary A Concourse workre
+# @summary A Concourse worker
#
# Declared as a class, since the upstream documentation explicitly states
# that multiple workers on a single node is nonsensical. This may however
@@ -6,10 +6,10 @@
# a worker to a specific team or tag exists, and linux can limit the amount
# of resources given to a given process (this gets even easier through systemd,
# which the module currently uses extensively).
-
+#
# @param key_dir
# Directory in which keys should be stored.
-# @param worker_key_file
+# @param worker_public_key_file
# File in which the worker's public key should be stored
# @param worker_private_key_file
# File in which the worker ns private key should be stored.
@@ -26,6 +26,8 @@
# Network address to the master (web) node that this worker should connect to.
# @param tsa_public_key
# Public key of this workers master.
+#
+# MUST match the private key given to the corresponding web node as `tsa_private_key`.
# @param worker_public_key
# Public key of this worker. Only used if `$manage_private_key` is
# false, otherwise a key will be automatically generated.
@@ -40,11 +42,14 @@
# public portion exported as a fact.
# @param export_public_key
# Should an exported resource with this nodes public key be created.
-# This reads the fact from `$worker_public_key` and creates an exported
-# resource of type `concourse::worker_key`, which will allow the master
-# to realize it.
-# @param tag
-# List of arbitrary tags to connnect to this worker. Can be used by
+# The key is read from the local file configured in `$worker_public_key_file`.
+#
+# Each web node in the same cluster will collect these keys.
+#
+# This required a Puppet database to be configured
+# (not in this module, just generally).
+# @param concourse_tag
+# List of arbitrary tags to connect to this worker. Can be used by
# pipelines which requires specific environments.
# @param team
# Limit this worker to a specific team.
@@ -55,45 +60,45 @@
# @param healthcheck_timeout
# Timeout for health check.
# @param extra_env
-# A hash of extra environment variables which will be passed directly
+# A hash of extra environment variables which will be passed directly
# to the worker process.
class concourse::worker (
- Std::AbsolutePath $key_dir = '/usr/lib/concourse',
- Std::AbsolutePath $worker_key_file = "${key_dir}/worker_key",
- Std::AbsolutePath $worker_private_key_file = "${worker_key_file}.pub",
- String $cluster = $concourse::default_cluster,
- String $service = $concourse::worker_service,
- String $service_unit = "${service}.service",
- Enum['absent', 'present'] $ensure = 'present',
+ String $cluster = $concourse::default_cluster,
+ Stdlib::Absolutepath $key_dir = '/usr/lib/concourse',
+ Stdlib::Absolutepath $worker_private_key_file = "${key_dir}/worker_key",
+ Stdlib::Absolutepath $worker_public_key_file = "${worker_private_key_file}.pub",
+ String $service = $concourse::worker_service,
+ String $service_unit = "${service}.service",
+ Enum['absent', 'present'] $ensure = 'present',
- String $work_dir = $concourse::worker_work_dir,
- String $tsa_host = lookup("concourse::${cluster}::tsa_host"),
- String $tsa_public_key = lookup("concourse::${cluster}::tsa_public_key"),
- Optinal[String] $worker_public_key = undef,
- Optinal[String] $worker_private_key = undef,
- Boolean $manage_private_key = $worker_private_key == undef,
- Boolean $export_public_key = true,
- Optional[Array[String]] $tag = undef,
- Optinal[String] $team = undef,
+ String $work_dir = $concourse::worker_work_dir,
+ String $tsa_host = $concourse::configured_clusters[$cluster]['external_domain'],
+ String $tsa_public_key = $concourse::configured_clusters[$cluster]['tsa_public_key'],
+ Optional[String] $worker_public_key = undef,
+ Optional[String] $worker_private_key = undef,
+ Boolean $manage_private_key = $worker_private_key == undef,
+ Boolean $export_public_key = true,
+ Optional[Array[String]] $concourse_tag = undef,
+ Optional[String] $team = undef,
- String $healthcheck_bind_ip = '0.0.0.0',
- Stdlib::Port $healthcheck_bind_port = 8888,
- String $healthcheck_timeout = '5s',
+ String $healthcheck_bind_ip = '0.0.0.0',
+ Stdlib::Port $healthcheck_bind_port = 8888,
+ String $healthcheck_timeout = '5s',
- Hash[String, Any] $extra_env = {},
+ Hash[String, Any] $extra_env = {},
) {
ensure_packages([
- 'concourse',
+ 'concourse',
])
if $manage_private_key {
exec { 'Concourse generate worker key':
- command => ['concourse', 'generate-key', '-t', 'ssh', '-f', $worker_key_file],
- creates => $worker_private_key_file, # and worker_key_file
- path => ['/sbin', '/usr/sbin', '/bin', '/usr/bin',]
+ command => ['concourse', 'generate-key', '-t', 'ssh', '-f', $worker_private_key_file],
+ creates => $worker_private_key_file, # and worker_public_key_file
+ path => ['/sbin', '/usr/sbin', '/bin', '/usr/bin'],
}
} else {
- file { $worker_key_file:
+ file { $worker_public_key_file:
content => $worker_public_key,
}
@@ -104,15 +109,18 @@ class concourse::worker (
}
if $export_public_key {
- @@concourse::worker_key { "${facts['trusted']['certname']} worker key":
- content => $facts['concourse_worker_key'],
+ @@concourse::worker_key { "${trusted['certname']} worker key":
+ content => $worker_public_key_file,
cluster => $cluster,
+ # Requiring File[$worker_public_file] would be semantically better,
+ # but it appears like Exec resources don't autorequire their "created" file.
+ require => Exec['Concourse generate worker key'],
}
}
systemd::unit_file { $service_unit:
ensure => $ensure,
- soruce => "puppet:///modules/${module_name}/concourse-worker.service",
+ source => "puppet:///modules/${module_name}/concourse-worker.service",
} ~> service { $service:
ensure => if $ensure == 'present' { 'running' } else { 'stopped' },
enable => true,
@@ -123,18 +131,17 @@ class concourse::worker (
'CONCOURSE_TSA_HOST' => $tsa_host,
'CONCOURSE_TSA_PUBLIC_KEY' => $tsa_public_key,
'CONCOURSE_TSA_WORKER_PRIVATE_KEY' => $worker_private_key_file,
- 'CONCOURSE_TAG' => $tag,
+ 'CONCOURSE_TAG' => if $concourse_tag == undef { undef } else { $concourse_tag.join(',') },
'CONCOURSE_TEAM' => $team,
'HEALTHCHECK_BIND_IP' => $healthcheck_bind_ip,
'HEALTHCHECK_BIND_PORT' => $healthcheck_bind_port,
'HEALTHCHECK_TIMEOUT' => $healthcheck_timeout,
} + $extra_env
-
file { '/etc/conf.d/concourse-worker':
ensure => $ensure,
mode => '0600',
show_diff => false,
- content => epp("${module_name}/env.epp", $env),
+ content => epp("${module_name}/env.epp", { 'entries' => $env }),
}
}
diff --git a/manifests/worker_key.pp b/manifests/worker_key.pp
index 320bba1..be32a01 100644
--- a/manifests/worker_key.pp
+++ b/manifests/worker_key.pp
@@ -1,7 +1,18 @@
+# @summary A worker (public) key for a given cluster.
+#
+# This resource is supposed to be created and exported by the worker
+# resource, and then collected by the web resource.
+#
+# It should however be fine to create manual instances of this resource.
+#
+# @param content
+# Complete content of the key, as it appears on disk.
+# @param cluster
+# Which cluster this key is part of.
# @api private
define concourse::worker_key (
- String $content,
String $cluster,
+ String $content,
) {
concat::fragment { $name:
content => $content,
diff --git a/metadata.json b/metadata.json
index 7e74703..bd45e42 100644
--- a/metadata.json
+++ b/metadata.json
@@ -16,7 +16,7 @@
},
{
"name": "puppet/nginx",
- "version_requirement": >= 3.3.0 < 4.0.0"
+ "version_requirement": ">= 3.3.0 < 4.0.0"
},
{
"name": "puppetlabs/postgresql",
diff --git a/templates/env.epp b/templates/env.epp
index 8648088..acd8a3d 100644
--- a/templates/env.epp
+++ b/templates/env.epp
@@ -1,9 +1,9 @@
-<%- | Hash[String, Any] $entries
+<%- | Hash[String, Any] $entries,
| -%>
# Environment file for concourse.service
# File managed by Puppet. Local changes WILL be overwritten.
<%- $entries.each |$key, $value| { -%>
<%- unless $value == undef { -%>
-<%= $key %>=<%= $value =%>
+<%= $key %>=<%= $value %>
<%- } -%>
<%- } -%>