From 517703de2efda51df28f96561462f65f8df635b3 Mon Sep 17 00:00:00 2001 From: Wouter D'Haeseleer Date: Thu, 29 Jun 2017 13:14:07 +0200 Subject: [PATCH 1/3] Including other names in the preer probe fact When having multiple interfaces on a gluster cluster. It is now possible to probe each interface and facter will return the other names as well. Otherwise fater will probe each run Fixes #124 --- lib/facter/gluster.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/facter/gluster.rb b/lib/facter/gluster.rb index e3d34e4b..a9a004c0 100644 --- a/lib/facter/gluster.rb +++ b/lib/facter/gluster.rb @@ -29,6 +29,10 @@ peer_count = Regexp.last_match[1].to_i if output =~ %r{^Number of Peers: (\d+)$} if peer_count > 0 peer_list = output.scan(%r{^Hostname: (.+)$}).flatten.join(',') + other_names = output.scan(%r{^Other names:\n((.+\n)+)}).flatten.join.scan(%r{(.+)\n?}).sort.uniq.flatten.join(',') + if other_names + peer_list += ',' + other_names + end # note the stderr redirection here # `gluster volume list` spits to stderr :( output = Facter::Util::Resolution.exec("#{binary} volume list 2>&1") From c5ad8d18680952311be020d6ab0d59196646c7ff Mon Sep 17 00:00:00 2001 From: Wouter D'Haeseleer Date: Thu, 29 Jun 2017 13:45:05 +0200 Subject: [PATCH 2/3] The peer name is not always the same as the FQDN, so use a more clever test --- manifests/peer.pp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/manifests/peer.pp b/manifests/peer.pp index 55b85f7b..1dbd5ad2 100644 --- a/manifests/peer.pp +++ b/manifests/peer.pp @@ -42,7 +42,8 @@ # peering attempt only resolves a cosmetic issue, not a functional one. # define gluster::peer ( - $pool = 'default' + $pool = 'default', + $fqdn = $::fqdn, ) { # we can't do much without the Gluster binary @@ -51,7 +52,7 @@ if getvar('::gluster_binary') { # we can't join to ourselves, so it only makes sense to operate # on other gluster servers in the same pool - if $title != $::fqdn { + if $fqdn != $::fqdn { # and we don't want to attach a server that is already a member # of the current pool From 58c61fa2fdd713a13e4e902ff7ac57a0f8aadb9b Mon Sep 17 00:00:00 2001 From: Wouter D'Haeseleer Date: Thu, 29 Jun 2017 13:56:02 +0200 Subject: [PATCH 3/3] Strip the brick test, it would fail anyway if bricks are not in the probe list --- manifests/volume.pp | 84 +++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 49 deletions(-) diff --git a/manifests/volume.pp b/manifests/volume.pp index 1f35047c..0295ae58 100644 --- a/manifests/volume.pp +++ b/manifests/volume.pp @@ -104,60 +104,46 @@ if $minimal_requirements and $already_exists == false { # this volume has not yet been created - # before we can create it, we need to ensure that all the - # servers hosting bricks are members of the storage pool + exec { "gluster create volume ${title}": + command => "${::gluster_binary} volume create ${title} ${args}", + } + + # if we have volume options, activate them now + # + # Note: $options is an array, but create_resources requires + # a hash of hashes. We do some contortions to get the + # array into the hash of hashes that looks like: + # + # option.name: + # value: value # - # first, get a list of unique server names hosting bricks - $brick_hosts = unique( regsubst( $bricks, '^([^:]+):(.+)$', '\1') ) - # now get a list of all peers, including ourself - $pool_members = concat( split( $::gluster_peer_list, ','), [ $::fqdn ] ) - # now see what the difference is - $missing_bricks = difference( $brick_hosts, $pool_members) + # Note 2: we're using the $_options variable, which contains the + # sorted list of options. + if $_options { + # first we need to prefix each array element with the volume name + # so that we match the gluster::volume::option title format of + # volume:option + $vol_opts = prefix( $_options, "${title}:" ) + # now we make some YAML, and then parse that to get a Puppet hash + $yaml = join( regsubst( $vol_opts, ': ', ":\n value: ", 'G'), "\n") + $hoh = parseyaml($yaml) - if ! empty($missing_bricks) { - notice("Not creating Gluster volume ${title}: some bricks are not in the pool") - } else { - exec { "gluster create volume ${title}": - command => "${::gluster_binary} volume create ${title} ${args}", + # safety check + assert_type(Hash, $hoh) + # we need to ensure that these are applied AFTER the volume is created + # but BEFORE the volume is started + $new_volume_defaults = { + require => Exec["gluster create volume ${title}"], + before => Exec["gluster start volume ${title}"], } - # if we have volume options, activate them now - # - # Note: $options is an array, but create_resources requires - # a hash of hashes. We do some contortions to get the - # array into the hash of hashes that looks like: - # - # option.name: - # value: value - # - # Note 2: we're using the $_options variable, which contains the - # sorted list of options. - if $_options { - # first we need to prefix each array element with the volume name - # so that we match the gluster::volume::option title format of - # volume:option - $vol_opts = prefix( $_options, "${title}:" ) - # now we make some YAML, and then parse that to get a Puppet hash - $yaml = join( regsubst( $vol_opts, ': ', ":\n value: ", 'G'), "\n") - $hoh = parseyaml($yaml) - - # safety check - assert_type(Hash, $hoh) - # we need to ensure that these are applied AFTER the volume is created - # but BEFORE the volume is started - $new_volume_defaults = { - require => Exec["gluster create volume ${title}"], - before => Exec["gluster start volume ${title}"], - } - - create_resources(::gluster::volume::option, $hoh, $new_volume_defaults) - } + create_resources(::gluster::volume::option, $hoh, $new_volume_defaults) + } - # don't forget to start the new volume! - exec { "gluster start volume ${title}": - command => "${::gluster_binary} volume start ${title}", - require => Exec["gluster create volume ${title}"], - } + # don't forget to start the new volume! + exec { "gluster start volume ${title}": + command => "${::gluster_binary} volume start ${title}", + require => Exec["gluster create volume ${title}"], } } elsif $already_exists {