# @summary A Concourse worker # # Declared as a class, since the upstream documentation explicitly states # that multiple workers on a single node is nonsensical. This may however # change in future versions of this module, since you the option to limit # a worker to a specific team or tag exists, and linux can limit the amount # of resources given to a given process (this gets even easier through systemd, # which the module currently uses extensively). # # @param key_dir # Directory in which keys should be stored. # @param worker_public_key_file # File in which the worker's public key should be stored # @param worker_private_key_file # File in which the worker ns private key should be stored. # @param cluster # Which concourse cluster this worker should be part of. # @param service # Name of the worker service # @param service_unit # Name of the (systemd) service unit for the worker. # @param ensure # @param work_dir # Working directory for the worker. # @param tsa_host # Network address to the master (web) node that this worker should connect to. # @param tsa_public_key # Public key of this workers master. # # MUST match the private key given to the corresponding web node as `tsa_private_key`. # @param worker_public_key # Public key of this worker. Only used if `$manage_private_key` is # false, otherwise a key will be automatically generated. # public key exported as a fact. # @param worker_private_key # Private key of this worker. Like `worker_public_key`, will only # be used if `$manage_private_key` is false. This value will however # *not* be exported. # @param manage_private_key # Should this node manage and generate its own public key. If true # (the default) then a key will automatically be generated, and the # public portion exported as a fact. # @param export_public_key # Should an exported resource with this nodes public key be created. # The key is read from the local file configured in `$worker_public_key_file`. # # Each web node in the same cluster will collect these keys. # # This required a Puppet database to be configured # (not in this module, just generally). # @param concourse_tag # List of arbitrary tags to connect to this worker. Can be used by # pipelines which requires specific environments. # @param team # Limit this worker to a specific team. # @param healthcheck_bind_ip # Address to bind the healthcheck endpoint to. # @param healthcheck_bind_port # Port to bind the health endpoint to. # @param healthcheck_timeout # Timeout for health check. # @param conf_name # Local name of the main configuration file. # @param extra_env # A hash of extra environment variables which will be passed directly # to the worker process. class concourse::worker ( String $cluster = $concourse::default_cluster, Stdlib::Absolutepath $key_dir = $concourse::key_dir, Stdlib::Absolutepath $worker_private_key_file = "${key_dir}/worker_key", Stdlib::Absolutepath $worker_public_key_file = "${worker_private_key_file}.pub", Stdlib::Absolutepath $tsa_public_key_file = "${key_dir}/tsa_public_key", String $service = $concourse::worker_service, String $service_unit = "${service}.service", Enum['absent', 'present'] $ensure = 'present', String $work_dir = $concourse::worker_work_dir, String $tsa_host = $concourse::configured_clusters[$cluster]['external_domain'], String $tsa_public_key = $concourse::configured_clusters[$cluster]['tsa_public_key'], Optional[String] $worker_public_key = undef, Optional[String] $worker_private_key = undef, Boolean $manage_private_key = $worker_private_key == undef, Boolean $export_public_key = true, Optional[Array[String]] $concourse_tag = undef, Optional[String] $team = undef, String $healthcheck_bind_ip = '0.0.0.0', Stdlib::Port $healthcheck_bind_port = 8888, String $healthcheck_timeout = '5s', String $conf_name = 'worker-base', Hash[String, Any] $extra_env = {}, ) { ensure_packages([ 'concourse', ]) if $manage_private_key { exec { 'Concourse generate worker key': command => ['concourse', 'generate-key', '-t', 'ssh', '-f', $worker_private_key_file], creates => $worker_private_key_file, # and worker_public_key_file path => ['/sbin', '/usr/sbin', '/bin', '/usr/bin'], } } else { file { $worker_public_key_file: content => $worker_public_key, } file { $worker_private_key_file: mode => '0600', content => $worker_private_key, } } if $export_public_key { @@concourse::worker_key { "${trusted['certname']} worker key": source => $worker_public_key_file, cluster => $cluster, # Requiring File[$worker_public_file] would be semantically better, # but it appears like Exec resources don't autorequire their "created" file. require => Exec['Concourse generate worker key'], } } systemd::unit_file { $service_unit: ensure => $ensure, source => "puppet:///modules/${module_name}/concourse-worker.service", } ~> service { $service: ensure => if $ensure == 'present' { 'running' } else { 'stopped' }, enable => true, } file { $tsa_public_key_file: content => $tsa_public_key, show_diff => false, } $env = { 'CONCOURSE_WORK_DIR' => $work_dir, 'CONCOURSE_TSA_HOST' => $tsa_host, 'CONCOURSE_TSA_PUBLIC_KEY' => $tsa_public_key_file, 'CONCOURSE_TSA_WORKER_PRIVATE_KEY' => $worker_private_key_file, 'CONCOURSE_TAG' => if $concourse_tag == undef { undef } else { $concourse_tag.join(',') }, 'CONCOURSE_TEAM' => $team, 'HEALTHCHECK_BIND_IP' => $healthcheck_bind_ip, 'HEALTHCHECK_BIND_PORT' => $healthcheck_bind_port, 'HEALTHCHECK_TIMEOUT' => $healthcheck_timeout, } + $extra_env concourse::conf::worker { $conf_name: ensure => $ensure, env => $env, } }