github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/acceptancetests/repository/trusty/haproxy/hooks/hooks.py (about)

     1  #!/usr/bin/env python
     2  
     3  import base64
     4  import glob
     5  import os
     6  import re
     7  import socket
     8  import shutil
     9  import subprocess
    10  import sys
    11  import yaml
    12  import pwd
    13  
    14  from itertools import izip, tee
    15  from operator import itemgetter
    16  
    17  from charmhelpers.core.host import pwgen, lsb_release, service_restart
    18  from charmhelpers.core.hookenv import (
    19      log,
    20      config as config_get,
    21      local_unit,
    22      relation_set,
    23      relation_ids as get_relation_ids,
    24      relations_of_type,
    25      relations_for_id,
    26      relation_id,
    27      open_port,
    28      close_port,
    29      unit_get,
    30      )
    31  
    32  from charmhelpers.fetch import (
    33      apt_install,
    34      add_source,
    35      apt_update,
    36      apt_cache
    37  )
    38  
    39  from charmhelpers.contrib.charmsupport import nrpe
    40  
    41  
    42  # #############################################################################
    43  # Global variables
    44  # #############################################################################
    45  default_haproxy_config_dir = "/etc/haproxy"
    46  default_haproxy_config = "%s/haproxy.cfg" % default_haproxy_config_dir
    47  default_haproxy_service_config_dir = "/var/run/haproxy"
    48  default_haproxy_lib_dir = "/var/lib/haproxy"
    49  metrics_cronjob_path = "/etc/cron.d/haproxy_metrics"
    50  metrics_script_path = "/usr/local/bin/haproxy_to_statsd.sh"
    51  service_affecting_packages = ['haproxy']
    52  apt_backports_template = (
    53      "deb http://archive.ubuntu.com/ubuntu %(release)s-backports "
    54      "main restricted universe multiverse")
    55  haproxy_preferences_path = "/etc/apt/preferences.d/haproxy"
    56  
    57  
    58  dupe_options = [
    59      "mode tcp",
    60      "option tcplog",
    61      "mode http",
    62      "option httplog",
    63      ]
    64  
    65  frontend_only_options = [
    66      "acl",
    67      "backlog",
    68      "bind",
    69      "capture cookie",
    70      "capture request header",
    71      "capture response header",
    72      "clitimeout",
    73      "default_backend",
    74      "http-request",
    75      "maxconn",
    76      "monitor fail",
    77      "monitor-net",
    78      "monitor-uri",
    79      "option accept-invalid-http-request",
    80      "option clitcpka",
    81      "option contstats",
    82      "option dontlog-normal",
    83      "option dontlognull",
    84      "option http-use-proxy-header",
    85      "option log-separate-errors",
    86      "option logasap",
    87      "option socket-stats",
    88      "option tcp-smart-accept",
    89      "rate-limit sessions",
    90      "redirect",
    91      "tcp-request content accept",
    92      "tcp-request content reject",
    93      "tcp-request inspect-delay",
    94      "timeout client",
    95      "timeout clitimeout",
    96      "use_backend",
    97      ]
    98  
    99  
   100  class InvalidRelationDataError(Exception):
   101      """Invalid data has been provided in the relation."""
   102  
   103  
   104  # #############################################################################
   105  # Supporting functions
   106  # #############################################################################
   107  
   108  def comma_split(value):
   109      values = value.split(",")
   110      return filter(None, (v.strip() for v in values))
   111  
   112  
   113  def ensure_package_status(packages, status):
   114      if status in ['install', 'hold']:
   115          selections = ''.join(['{} {}\n'.format(package, status)
   116                                for package in packages])
   117          dpkg = subprocess.Popen(['dpkg', '--set-selections'],
   118                                  stdin=subprocess.PIPE)
   119          dpkg.communicate(input=selections)
   120  
   121  
   122  def render_template(template_name, vars):
   123      # deferred import so install hook can install jinja2
   124      from jinja2 import Environment, FileSystemLoader
   125      templates_dir = os.path.join(os.environ['CHARM_DIR'], 'templates')
   126      template_env = Environment(loader=FileSystemLoader(templates_dir))
   127      template = template_env.get_template(template_name)
   128      return template.render(vars)
   129  
   130  
   131  # -----------------------------------------------------------------------------
   132  # enable_haproxy:  Enabled haproxy at boot time
   133  # -----------------------------------------------------------------------------
   134  def enable_haproxy():
   135      default_haproxy = "/etc/default/haproxy"
   136      with open(default_haproxy) as f:
   137          enabled_haproxy = f.read().replace('ENABLED=0', 'ENABLED=1')
   138      with open(default_haproxy, 'w') as f:
   139          f.write(enabled_haproxy)
   140  
   141  
   142  # -----------------------------------------------------------------------------
   143  # create_haproxy_globals:  Creates the global section of the haproxy config
   144  # -----------------------------------------------------------------------------
   145  def create_haproxy_globals():
   146      config_data = config_get()
   147      global_log = comma_split(config_data['global_log'])
   148      haproxy_globals = []
   149      haproxy_globals.append('global')
   150      for global_log_item in global_log:
   151          haproxy_globals.append("    log %s" % global_log_item.strip())
   152      haproxy_globals.append("    maxconn %d" % config_data['global_maxconn'])
   153      haproxy_globals.append("    user %s" % config_data['global_user'])
   154      haproxy_globals.append("    group %s" % config_data['global_group'])
   155      if config_data['global_debug'] is True:
   156          haproxy_globals.append("    debug")
   157      if config_data['global_quiet'] is True:
   158          haproxy_globals.append("    quiet")
   159      haproxy_globals.append("    spread-checks %d" %
   160                             config_data['global_spread_checks'])
   161      if has_ssl_support():
   162          haproxy_globals.append("    tune.ssl.default-dh-param %d" %
   163                                 config_data['global_default_dh_param'])
   164          haproxy_globals.append("    ssl-default-bind-ciphers %s" %
   165                                 config_data['global_default_bind_ciphers'])
   166      if config_data['global_stats_socket'] is True:
   167          sock_path = "/var/run/haproxy/haproxy.sock"
   168          haproxy_globals.append("    stats socket %s mode 0600" % sock_path)
   169      return '\n'.join(haproxy_globals)
   170  
   171  
   172  # -----------------------------------------------------------------------------
   173  # create_haproxy_defaults:  Creates the defaults section of the haproxy config
   174  # -----------------------------------------------------------------------------
   175  def create_haproxy_defaults():
   176      config_data = config_get()
   177      default_options = comma_split(config_data['default_options'])
   178      default_timeouts = comma_split(config_data['default_timeouts'])
   179      haproxy_defaults = []
   180      haproxy_defaults.append("defaults")
   181      haproxy_defaults.append("    log %s" % config_data['default_log'])
   182      haproxy_defaults.append("    mode %s" % config_data['default_mode'])
   183      for option_item in default_options:
   184          haproxy_defaults.append("    option %s" % option_item.strip())
   185      haproxy_defaults.append("    retries %d" % config_data['default_retries'])
   186      for timeout_item in default_timeouts:
   187          haproxy_defaults.append("    timeout %s" % timeout_item.strip())
   188      return '\n'.join(haproxy_defaults)
   189  
   190  
   191  # -----------------------------------------------------------------------------
   192  # load_haproxy_config:  Convenience function that loads (as a string) the
   193  #                       current haproxy configuration file.
   194  #                       Returns a string containing the haproxy config or
   195  #                       None
   196  # -----------------------------------------------------------------------------
   197  def load_haproxy_config(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
   198      if os.path.isfile(haproxy_config_file):
   199          return open(haproxy_config_file).read()
   200      else:
   201          return None
   202  
   203  
   204  # -----------------------------------------------------------------------------
   205  # get_monitoring_password:  Gets the monitoring password from the
   206  #                           haproxy config.
   207  #                           This prevents the password from being constantly
   208  #                           regenerated by the system.
   209  # -----------------------------------------------------------------------------
   210  def get_monitoring_password(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
   211      haproxy_config = load_haproxy_config(haproxy_config_file)
   212      if haproxy_config is None:
   213          return None
   214      m = re.search("stats auth\s+(\w+):(\w+)", haproxy_config)
   215      if m is not None:
   216          return m.group(2)
   217      else:
   218          return None
   219  
   220  
   221  # -----------------------------------------------------------------------------
   222  # get_service_ports:  Convenience function that scans the existing haproxy
   223  #                     configuration file and returns a list of the existing
   224  #                     ports being used.  This is necessary to know which ports
   225  #                     to open and close when exposing/unexposing a service
   226  # -----------------------------------------------------------------------------
   227  def get_service_ports(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
   228      stanzas = get_listen_stanzas(haproxy_config_file=haproxy_config_file)
   229      return tuple((int(port) for service, addr, port in stanzas))
   230  
   231  
   232  # -----------------------------------------------------------------------------
   233  # get_listen_stanzas: Convenience function that scans the existing haproxy
   234  #                     configuration file and returns a list of the existing
   235  #                     listen stanzas cofnigured.
   236  # -----------------------------------------------------------------------------
   237  def get_listen_stanzas(haproxy_config_file="/etc/haproxy/haproxy.cfg"):
   238      haproxy_config = load_haproxy_config(haproxy_config_file)
   239      if haproxy_config is None:
   240          return ()
   241      listen_stanzas = re.findall(
   242          "listen\s+([^\s]+)\s+([^:]+):(.*)",
   243          haproxy_config)
   244      # Match bind stanzas like:
   245      #
   246      # bind 1.2.3.5:234
   247      # bind 1.2.3.4:123 ssl crt /foo/bar
   248      bind_stanzas = re.findall(
   249          "\s+bind\s+([^:]+):(\d+).*\n\s+default_backend\s+([^\s]+)",
   250          haproxy_config, re.M)
   251      return (tuple(((service, addr, int(port))
   252                     for service, addr, port in listen_stanzas)) +
   253              tuple(((service, addr, int(port))
   254                     for addr, port, service in bind_stanzas)))
   255  
   256  
   257  # -----------------------------------------------------------------------------
   258  # update_service_ports:  Convenience function that evaluate the old and new
   259  #                        service ports to decide which ports need to be
   260  #                        opened and which to close
   261  # -----------------------------------------------------------------------------
   262  def update_service_ports(old_service_ports=None, new_service_ports=None):
   263      if old_service_ports is None or new_service_ports is None:
   264          return None
   265      for port in old_service_ports:
   266          if port not in new_service_ports:
   267              close_port(port)
   268      for port in new_service_ports:
   269          if port not in old_service_ports:
   270              open_port(port)
   271  
   272  
   273  # -----------------------------------------------------------------------------
   274  # update_sysctl: create a sysctl.conf file from YAML-formatted 'sysctl' config
   275  # -----------------------------------------------------------------------------
   276  def update_sysctl(config_data):
   277      sysctl_dict = yaml.load(config_data.get("sysctl", "{}"))
   278      if sysctl_dict:
   279          sysctl_file = open("/etc/sysctl.d/50-haproxy.conf", "w")
   280          for key in sysctl_dict:
   281              sysctl_file.write("{}={}\n".format(key, sysctl_dict[key]))
   282          sysctl_file.close()
   283          subprocess.call(["sysctl", "-p", "/etc/sysctl.d/50-haproxy.conf"])
   284  
   285  
   286  # -----------------------------------------------------------------------------
   287  # update_ssl_cert: write the default SSL certificate using the values from the
   288  #                 'ssl-cert'/'ssl-key' and configuration keys
   289  # -----------------------------------------------------------------------------
   290  def update_ssl_cert(config_data):
   291      ssl_cert = config_data.get("ssl_cert")
   292      if not ssl_cert:
   293          return
   294      if ssl_cert == "SELFSIGNED":
   295          log("Using self-signed certificate")
   296          content = "".join(get_selfsigned_cert())
   297      else:
   298          ssl_key = config_data.get("ssl_key")
   299          if not ssl_key:
   300              log("No ssl_key provided, proceeding without default certificate")
   301              return
   302          log("Using config-provided certificate")
   303          content = base64.b64decode(ssl_cert)
   304          content += base64.b64decode(ssl_key)
   305  
   306      pem_path = os.path.join(default_haproxy_lib_dir, "default.pem")
   307      write_ssl_pem(pem_path, content)
   308  
   309  
   310  # -----------------------------------------------------------------------------
   311  # create_listen_stanza: Function to create a generic listen section in the
   312  #                       haproxy config
   313  #                       service_name:  Arbitrary service name
   314  #                       service_ip:  IP address to listen for connections
   315  #                       service_port:  Port to listen for connections
   316  #                       service_options:  Comma separated list of options
   317  #                       server_entries:  List of tuples
   318  #                                         server_name
   319  #                                         server_ip
   320  #                                         server_port
   321  #                                         server_options
   322  #                       backends:  List of dicts
   323  #                                  backend_name: backend name,
   324  #                                  servers: list of tuples as in server_entries
   325  #                       errorfiles: List of dicts
   326  #                                   http_status: status to handle
   327  #                                   content: base 64 content for HAProxy to
   328  #                                            write to socket
   329  #                       crts: List of base 64 contents for SSL certificate
   330  #                             files that will be used in the bind line.
   331  # -----------------------------------------------------------------------------
   332  def create_listen_stanza(service_name=None, service_ip=None,
   333                           service_port=None, service_options=None,
   334                           server_entries=None, service_errorfiles=None,
   335                           service_crts=None, service_backends=None):
   336      if service_name is None or service_ip is None or service_port is None:
   337          return None
   338      fe_options = []
   339      be_options = []
   340      if service_options is not None:
   341          # For options that should be duplicated in both frontend and backend,
   342          # copy them to both.
   343          for o in dupe_options:
   344              if any(map(o.strip().startswith, service_options)):
   345                  fe_options.append(o)
   346                  be_options.append(o)
   347          # Filter provided service options into frontend-only and backend-only.
   348          results = izip(
   349              (fe_options, be_options),
   350              (True, False),
   351              tee((o, any(map(o.strip().startswith,
   352                              frontend_only_options)))
   353                  for o in service_options))
   354          for out, cond, result in results:
   355              out.extend(option for option, match in result
   356                         if match is cond and option not in out)
   357      service_config = []
   358      unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
   359      service_config.append("frontend %s-%s" % (unit_name, service_port))
   360      bind_stanza = "    bind %s:%s" % (service_ip, service_port)
   361      if service_crts:
   362          # Enable SSL termination for this frontend, using the given
   363          # certificates.
   364          bind_stanza += " ssl"
   365          for i, crt in enumerate(service_crts):
   366              if crt == "DEFAULT":
   367                  path = os.path.join(default_haproxy_lib_dir, "default.pem")
   368              else:
   369                  path = os.path.join(default_haproxy_lib_dir,
   370                                      "service_%s" % service_name, "%d.pem" % i)
   371              # SSLv3 is always off, since it's vulnerable to POODLE attacks
   372              bind_stanza += " crt %s no-sslv3" % path
   373      service_config.append(bind_stanza)
   374      service_config.append("    default_backend %s" % (service_name,))
   375      service_config.extend("    %s" % service_option.strip()
   376                            for service_option in fe_options)
   377  
   378      # For now errorfiles are common for all backends, in the future we
   379      # might offer support for per-backend error files.
   380      backend_errorfiles = []  # List of (status, path) tuples
   381      if service_errorfiles is not None:
   382          for errorfile in service_errorfiles:
   383              path = os.path.join(default_haproxy_lib_dir,
   384                                  "service_%s" % service_name,
   385                                  "%s.http" % errorfile["http_status"])
   386              backend_errorfiles.append((errorfile["http_status"], path))
   387  
   388      # Default backend
   389      _append_backend(
   390          service_config, service_name, be_options, backend_errorfiles,
   391          server_entries)
   392  
   393      # Extra backends
   394      if service_backends is not None:
   395          for service_backend in service_backends:
   396              _append_backend(
   397                  service_config, service_backend["backend_name"],
   398                  be_options, backend_errorfiles, service_backend["servers"])
   399  
   400      return '\n'.join(service_config)
   401  
   402  
   403  def _append_backend(service_config, name, options, errorfiles, server_entries):
   404      """Append a new backend stanza to the given service_config.
   405  
   406      A backend stanza consists in a 'backend <name>' line followed by option
   407      lines, errorfile lines and server line.
   408      """
   409      service_config.append("")
   410      service_config.append("backend %s" % (name,))
   411      service_config.extend("    %s" % option.strip() for option in options)
   412      for status, path in errorfiles:
   413          service_config.append("    errorfile %s %s" % (status, path))
   414      if isinstance(server_entries, (list, tuple)):
   415          for i, (server_name, server_ip, server_port,
   416                  server_options) in enumerate(server_entries):
   417              server_line = "    server %s %s:%s" % \
   418                  (server_name, server_ip, server_port)
   419              if server_options is not None:
   420                  if isinstance(server_options, basestring):
   421                      server_line += " " + server_options
   422                  else:
   423                      server_line += " " + " ".join(server_options)
   424              server_line = server_line.format(i=i)
   425              service_config.append(server_line)
   426  
   427  
   428  # -----------------------------------------------------------------------------
   429  # create_monitoring_stanza:  Function to create the haproxy monitoring section
   430  #                            service_name: Arbitrary name
   431  # -----------------------------------------------------------------------------
   432  def create_monitoring_stanza(service_name="haproxy_monitoring"):
   433      config_data = config_get()
   434      if config_data['enable_monitoring'] is False:
   435          return None
   436      monitoring_password = get_monitoring_password()
   437      if config_data['monitoring_password'] != "changeme":
   438          monitoring_password = config_data['monitoring_password']
   439      elif (monitoring_password is None and
   440            config_data['monitoring_password'] == "changeme"):
   441          monitoring_password = pwgen(length=20)
   442      monitoring_config = []
   443      monitoring_config.append("mode http")
   444      monitoring_config.append("acl allowed_cidr src %s" %
   445                               config_data['monitoring_allowed_cidr'])
   446      monitoring_config.append("http-request deny unless allowed_cidr")
   447      monitoring_config.append("stats enable")
   448      monitoring_config.append("stats uri /")
   449      monitoring_config.append("stats realm Haproxy\ Statistics")
   450      monitoring_config.append("stats auth %s:%s" %
   451                               (config_data['monitoring_username'],
   452                                monitoring_password))
   453      monitoring_config.append("stats refresh %d" %
   454                               config_data['monitoring_stats_refresh'])
   455      return create_listen_stanza(service_name,
   456                                  "0.0.0.0",
   457                                  config_data['monitoring_port'],
   458                                  monitoring_config)
   459  
   460  
   461  # -----------------------------------------------------------------------------
   462  # get_config_services:  Convenience function that returns a mapping containing
   463  #                       all of the services configuration
   464  # -----------------------------------------------------------------------------
   465  def get_config_services():
   466      config_data = config_get()
   467      services = {}
   468      return parse_services_yaml(services, config_data['services'])
   469  
   470  
   471  def parse_services_yaml(services, yaml_data):
   472      """
   473      Parse given yaml services data.  Add it into the "services" dict.  Ensure
   474      that you union multiple services "server" entries, as these are the haproxy
   475      backends that are contacted.
   476      """
   477      yaml_services = yaml.safe_load(yaml_data)
   478      if yaml_services is None:
   479          return services
   480  
   481      for service in yaml_services:
   482          service_name = service["service_name"]
   483          if not services:
   484              # 'None' is used as a marker for the first service defined, which
   485              # is used as the default service if a proxied server doesn't
   486              # specify which service it is bound to.
   487              services[None] = {"service_name": service_name}
   488  
   489          if "service_options" in service:
   490              if isinstance(service["service_options"], basestring):
   491                  service["service_options"] = comma_split(
   492                      service["service_options"])
   493  
   494              if is_proxy(service_name) and ("option forwardfor" not in
   495                                             service["service_options"]):
   496                  service["service_options"].append("option forwardfor")
   497  
   498          if (("server_options" in service and
   499               isinstance(service["server_options"], basestring))):
   500              service["server_options"] = comma_split(service["server_options"])
   501  
   502          services[service_name] = merge_service(
   503              services.get(service_name, {}), service)
   504  
   505      return services
   506  
   507  
   508  def _add_items_if_missing(target, additions):
   509      """
   510      Append items from `additions` to `target` if they are not present already.
   511  
   512      Returns a new list.
   513      """
   514      result = target[:]
   515      for addition in additions:
   516          if addition not in result:
   517              result.append(addition)
   518      return result
   519  
   520  
   521  def merge_service(old_service, new_service):
   522      """
   523      Helper function to merge two service entries correctly.
   524      Everything will get trampled (preferring old_service), except "servers"
   525      which will be unioned acrosss both entries, stripping strict dups.
   526      """
   527      service = new_service.copy()
   528      service.update(old_service)
   529  
   530      # Merge all 'servers' entries of the default backend.
   531      if "servers" in old_service and "servers" in new_service:
   532          service["servers"] = _add_items_if_missing(
   533              old_service["servers"], new_service["servers"])
   534  
   535      # Merge all 'backends' and their contained "servers".
   536      if "backends" in old_service and "backends" in new_service:
   537          backends_by_name = {}
   538          # Go through backends in old and new configs and add them to
   539          # backends_by_name, merging 'servers' while at it.
   540          for backend in service["backends"] + new_service["backends"]:
   541              backend_name = backend.get("backend_name")
   542              if backend_name is None:
   543                  raise InvalidRelationDataError(
   544                      "Each backend must have backend_name.")
   545              if backend_name in backends_by_name:
   546                  # Merge servers.
   547                  target_backend = backends_by_name[backend_name]
   548                  target_backend["servers"] = _add_items_if_missing(
   549                      target_backend["servers"], backend["servers"])
   550              else:
   551                  backends_by_name[backend_name] = backend
   552  
   553          service["backends"] = sorted(
   554              backends_by_name.values(), key=itemgetter('backend_name'))
   555      return service
   556  
   557  
   558  def ensure_service_host_port(services):
   559      config_data = config_get()
   560      seen = []
   561      missing = []
   562      for service, options in sorted(services.iteritems()):
   563          if "service_host" not in options:
   564              missing.append(options)
   565              continue
   566          if "service_port" not in options:
   567              missing.append(options)
   568              continue
   569          seen.append((options["service_host"], int(options["service_port"])))
   570  
   571      seen.sort()
   572      last_port = seen and seen[-1][1] or int(config_data["monitoring_port"])
   573      for options in missing:
   574          last_port += 2
   575          options["service_host"] = "0.0.0.0"
   576          options["service_port"] = last_port
   577  
   578      return services
   579  
   580  
   581  # -----------------------------------------------------------------------------
   582  # get_config_service:   Convenience function that returns a dictionary
   583  #                       of the configuration of a given service's configuration
   584  # -----------------------------------------------------------------------------
   585  def get_config_service(service_name=None):
   586      return get_config_services().get(service_name, None)
   587  
   588  
   589  def is_proxy(service_name):
   590      flag_path = os.path.join(default_haproxy_service_config_dir,
   591                               "%s.is.proxy" % service_name)
   592      return os.path.exists(flag_path)
   593  
   594  
   595  # -----------------------------------------------------------------------------
   596  # create_services:  Function that will create the services configuration
   597  #                   from the config data and/or relation information
   598  # -----------------------------------------------------------------------------
   599  def create_services():
   600      services_dict = get_config_services()
   601      config_data = config_get()
   602  
   603      # Augment services_dict with service definitions from relation data.
   604      relation_data = relations_of_type("reverseproxy")
   605  
   606      # Handle relations which specify their own services clauses
   607      for relation_info in relation_data:
   608          if "services" in relation_info:
   609              services_dict = parse_services_yaml(services_dict,
   610                                                  relation_info['services'])
   611  
   612      if len(services_dict) == 0:
   613          log("No services configured, exiting.")
   614          return
   615  
   616      for relation_info in relation_data:
   617          unit = relation_info['__unit__']
   618  
   619          # Skip entries that specify their own services clauses, this was
   620          # handled earlier.
   621          if "services" in relation_info:
   622              log("Unit '%s' overrides 'services', "
   623                  "skipping further processing." % unit)
   624              continue
   625  
   626          juju_service_name = unit.rpartition('/')[0]
   627  
   628          relation_ok = True
   629          for required in ("port", "private-address"):
   630              if required not in relation_info:
   631                  log("No %s in relation data for '%s', skipping." %
   632                      (required, unit))
   633                  relation_ok = False
   634                  break
   635  
   636          if not relation_ok:
   637              continue
   638  
   639          # Mandatory switches ( private-address, port )
   640          host = relation_info['private-address']
   641          port = relation_info['port']
   642          server_name = ("%s-%s" % (unit.replace("/", "-"), port))
   643  
   644          # Optional switches ( service_name, sitenames )
   645          service_names = set()
   646          if 'service_name' in relation_info:
   647              if relation_info['service_name'] in services_dict:
   648                  service_names.add(relation_info['service_name'])
   649              else:
   650                  log("Service '%s' does not exist." %
   651                      relation_info['service_name'])
   652                  continue
   653  
   654          if 'sitenames' in relation_info:
   655              sitenames = relation_info['sitenames'].split()
   656              for sitename in sitenames:
   657                  if sitename in services_dict:
   658                      service_names.add(sitename)
   659  
   660          if juju_service_name + "_service" in services_dict:
   661              service_names.add(juju_service_name + "_service")
   662  
   663          if juju_service_name in services_dict:
   664              service_names.add(juju_service_name)
   665  
   666          if not service_names:
   667              service_names.add(services_dict[None]["service_name"])
   668  
   669          for service_name in service_names:
   670              service = services_dict[service_name]
   671  
   672              # Add the server entries
   673              servers = service.setdefault("servers", [])
   674              servers.append((server_name, host, port,
   675                              services_dict[service_name].get(
   676                                  'server_options', [])))
   677  
   678      has_servers = False
   679      for service_name, service in services_dict.iteritems():
   680          if service.get("servers", []):
   681              has_servers = True
   682  
   683      if not has_servers:
   684          log("No backend servers, exiting.")
   685          return
   686  
   687      del services_dict[None]
   688      services_dict = ensure_service_host_port(services_dict)
   689      if config_data["peering_mode"] != "active-active":
   690          services_dict = apply_peer_config(services_dict)
   691      write_service_config(services_dict)
   692      return services_dict
   693  
   694  
   695  def apply_peer_config(services_dict):
   696      peer_data = relations_of_type("peer")
   697  
   698      peer_services = {}
   699      for relation_info in peer_data:
   700          unit_name = relation_info["__unit__"]
   701          peer_services_data = relation_info.get("all_services")
   702          if peer_services_data is None:
   703              continue
   704          service_data = yaml.safe_load(peer_services_data)
   705          for service in service_data:
   706              service_name = service["service_name"]
   707              if service_name in services_dict:
   708                  peer_service = peer_services.setdefault(service_name, {})
   709                  peer_service["service_name"] = service_name
   710                  peer_service["service_host"] = service["service_host"]
   711                  peer_service["service_port"] = service["service_port"]
   712                  peer_service["service_options"] = ["balance leastconn",
   713                                                     "mode tcp",
   714                                                     "option tcplog"]
   715                  servers = peer_service.setdefault("servers", [])
   716                  servers.append((unit_name.replace("/", "-"),
   717                                  relation_info["private-address"],
   718                                  service["service_port"] + 1, ["check"]))
   719  
   720      if not peer_services:
   721          return services_dict
   722  
   723      unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
   724      private_address = unit_get("private-address")
   725      for service_name, peer_service in peer_services.iteritems():
   726          original_service = services_dict[service_name]
   727  
   728          # If the original service has timeout settings, copy them over to the
   729          # peer service.
   730          for option in original_service.get("service_options", ()):
   731              if "timeout" in option:
   732                  peer_service["service_options"].append(option)
   733  
   734          servers = peer_service["servers"]
   735          # Add ourselves to the list of servers for the peer listen stanza.
   736          servers.append((unit_name, private_address,
   737                          original_service["service_port"] + 1,
   738                          ["check"]))
   739  
   740          # Make all but the first server in the peer listen stanza a backup
   741          # server.
   742          servers.sort()
   743          for server in servers[1:]:
   744              server[3].append("backup")
   745  
   746          # Remap original service port, will now be used by peer listen stanza.
   747          original_service["service_port"] += 1
   748  
   749          # Remap original service to a new name, stuff peer listen stanza into
   750          # it's place.
   751          be_service = service_name + "_be"
   752          original_service["service_name"] = be_service
   753          services_dict[be_service] = original_service
   754          services_dict[service_name] = peer_service
   755  
   756      return services_dict
   757  
   758  
   759  def write_service_config(services_dict):
   760      # Construct the new haproxy.cfg file
   761      for service_key, service_config in services_dict.items():
   762          log("Service: %s" % service_key)
   763          service_name = service_config["service_name"]
   764          server_entries = service_config.get('servers')
   765          backends = service_config.get('backends', [])
   766  
   767          errorfiles = service_config.get('errorfiles', [])
   768          for errorfile in errorfiles:
   769              path = get_service_lib_path(service_name)
   770              full_path = os.path.join(
   771                  path, "%s.http" % errorfile["http_status"])
   772              with open(full_path, 'w') as f:
   773                  f.write(base64.b64decode(errorfile["content"]))
   774  
   775          # Write to disk the content of the given SSL certificates
   776          crts = service_config.get('crts', [])
   777          for i, crt in enumerate(crts):
   778              if crt == "DEFAULT":
   779                  continue
   780              content = base64.b64decode(crt)
   781              path = get_service_lib_path(service_name)
   782              full_path = os.path.join(path, "%d.pem" % i)
   783              write_ssl_pem(full_path, content)
   784              with open(full_path, 'w') as f:
   785                  f.write(content)
   786  
   787          if not os.path.exists(default_haproxy_service_config_dir):
   788              os.mkdir(default_haproxy_service_config_dir, 0600)
   789          with open(os.path.join(default_haproxy_service_config_dir,
   790                                 "%s.service" % service_name), 'w') as config:
   791              config.write(create_listen_stanza(
   792                  service_name,
   793                  service_config['service_host'],
   794                  service_config['service_port'],
   795                  service_config['service_options'],
   796                  server_entries, errorfiles, crts, backends))
   797  
   798  
   799  def get_service_lib_path(service_name):
   800      # Get a service-specific lib path
   801      path = os.path.join(default_haproxy_lib_dir,
   802                          "service_%s" % service_name)
   803      if not os.path.exists(path):
   804          os.makedirs(path)
   805      return path
   806  
   807  
   808  # -----------------------------------------------------------------------------
   809  # load_services: Convenience function that loads the service snippet
   810  #                configuration from the filesystem.
   811  # -----------------------------------------------------------------------------
   812  def load_services(service_name=None):
   813      services = ''
   814      if service_name is not None:
   815          if os.path.exists("%s/%s.service" %
   816                            (default_haproxy_service_config_dir, service_name)):
   817              with open("%s/%s.service" % (default_haproxy_service_config_dir,
   818                                           service_name)) as f:
   819                  services = f.read()
   820          else:
   821              services = None
   822      else:
   823          for service in glob.glob("%s/*.service" %
   824                                   default_haproxy_service_config_dir):
   825              with open(service) as f:
   826                  services += f.read()
   827                  services += "\n\n"
   828      return services
   829  
   830  
   831  # -----------------------------------------------------------------------------
   832  # remove_services:  Convenience function that removes the configuration
   833  #                   snippets from the filesystem.  This is necessary
   834  #                   To ensure sync between the config/relation-data
   835  #                   and the existing haproxy services.
   836  # -----------------------------------------------------------------------------
   837  def remove_services(service_name=None):
   838      if service_name is not None:
   839          path = "%s/%s.service" % (default_haproxy_service_config_dir,
   840                                    service_name)
   841          if os.path.exists(path):
   842              try:
   843                  os.remove(path)
   844              except Exception, e:
   845                  log(str(e))
   846                  return False
   847          return True
   848      else:
   849          for service in glob.glob("%s/*.service" %
   850                                   default_haproxy_service_config_dir):
   851              try:
   852                  os.remove(service)
   853              except Exception, e:
   854                  log(str(e))
   855                  pass
   856          return True
   857  
   858  
   859  # -----------------------------------------------------------------------------
   860  # construct_haproxy_config:  Convenience function to write haproxy.cfg
   861  #                            haproxy_globals, haproxy_defaults,
   862  #                            haproxy_monitoring, haproxy_services
   863  #                            are all strings that will be written without
   864  #                            any checks.
   865  #                            haproxy_monitoring and haproxy_services are
   866  #                            optional arguments
   867  # -----------------------------------------------------------------------------
   868  def construct_haproxy_config(haproxy_globals=None,
   869                               haproxy_defaults=None,
   870                               haproxy_monitoring=None,
   871                               haproxy_services=None):
   872      if None in (haproxy_globals, haproxy_defaults):
   873          return
   874      with open(default_haproxy_config, 'w') as haproxy_config:
   875          config_string = ''
   876          for config in (haproxy_globals, haproxy_defaults, haproxy_monitoring,
   877                         haproxy_services):
   878              if config is not None:
   879                  config_string += config + '\n\n'
   880          haproxy_config.write(config_string)
   881  
   882  
   883  # -----------------------------------------------------------------------------
   884  # service_haproxy:  Convenience function to start/stop/restart/reload
   885  #                   the haproxy service
   886  # -----------------------------------------------------------------------------
   887  def service_haproxy(action=None, haproxy_config=default_haproxy_config):
   888      if None in (action, haproxy_config):
   889          return None
   890      elif action == "check":
   891          command = ['/usr/sbin/haproxy', '-f', haproxy_config, '-c']
   892      else:
   893          command = ['service', 'haproxy', action]
   894      return_value = subprocess.call(command)
   895      return return_value == 0
   896  
   897  
   898  # #############################################################################
   899  # Hook functions
   900  # #############################################################################
   901  def install_hook():
   902      # Run both during initial install and during upgrade-charm.
   903      if not os.path.exists(default_haproxy_service_config_dir):
   904          os.mkdir(default_haproxy_service_config_dir, 0600)
   905  
   906      config_data = config_get()
   907      source = config_data.get('source')
   908      if source == 'backports':
   909          release = lsb_release()['DISTRIB_CODENAME']
   910          source = apt_backports_template % {'release': release}
   911          add_backports_preferences(release)
   912      add_source(source, config_data.get('key'))
   913      apt_update(fatal=True)
   914      apt_install(['haproxy', 'python-jinja2'], fatal=True)
   915      # Install pyasn1 library and modules for inspecting SSL certificates
   916      apt_install(['python-pyasn1', 'python-pyasn1-modules'], fatal=False)
   917      ensure_package_status(service_affecting_packages,
   918                            config_data['package_status'])
   919      enable_haproxy()
   920  
   921  
   922  def config_changed():
   923      config_data = config_get()
   924  
   925      ensure_package_status(service_affecting_packages,
   926                            config_data['package_status'])
   927  
   928      old_service_ports = get_service_ports()
   929      old_stanzas = get_listen_stanzas()
   930      haproxy_globals = create_haproxy_globals()
   931      haproxy_defaults = create_haproxy_defaults()
   932      if config_data['enable_monitoring'] is True:
   933          haproxy_monitoring = create_monitoring_stanza()
   934      else:
   935          haproxy_monitoring = None
   936      remove_services()
   937      if config_data.changed("ssl_cert"):
   938          # TODO: handle also the case where it's the public-address value
   939          # that changes (see also #1444062)
   940          _notify_reverseproxy()
   941      if not create_services():
   942          sys.exit()
   943      haproxy_services = load_services()
   944      update_sysctl(config_data)
   945      update_ssl_cert(config_data)
   946      construct_haproxy_config(haproxy_globals,
   947                               haproxy_defaults,
   948                               haproxy_monitoring,
   949                               haproxy_services)
   950  
   951      write_metrics_cronjob(metrics_script_path,
   952                            metrics_cronjob_path)
   953  
   954      if service_haproxy("check"):
   955          update_service_ports(old_service_ports, get_service_ports())
   956          service_haproxy("reload")
   957          if not (get_listen_stanzas() == old_stanzas):
   958              notify_website()
   959              notify_peer()
   960      else:
   961          # XXX Ideally the config should be restored to a working state if the
   962          # check fails, otherwise an inadvertent reload will cause the service
   963          # to be broken.
   964          log("HAProxy configuration check failed, exiting.")
   965          sys.exit(1)
   966      if config_data.changed("global_log") or config_data.changed("source"):
   967          # restart rsyslog to pickup haproxy rsyslog config
   968          # This could be removed once the following bug is fixed in the haproxy
   969          # package:
   970          #   https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=790871
   971          service_restart("rsyslog")
   972  
   973  
   974  def start_hook():
   975      if service_haproxy("status"):
   976          return service_haproxy("restart")
   977      else:
   978          return service_haproxy("start")
   979  
   980  
   981  def stop_hook():
   982      if service_haproxy("status"):
   983          return service_haproxy("stop")
   984  
   985  
   986  def reverseproxy_interface(hook_name=None):
   987      if hook_name is None:
   988          return None
   989      if hook_name == "joined":
   990          # When we join a new reverseproxy relation we communicate to the
   991          # remote unit our public IP and public SSL certificate, since
   992          # some applications might need it in order to tell third parties
   993          # how to interact with them.
   994          _notify_reverseproxy(relation_ids=(relation_id(),))
   995          return
   996      if hook_name in ("changed", "departed"):
   997          config_changed()
   998  
   999  
  1000  def _notify_reverseproxy(relation_ids=None):
  1001      config_data = config_get()
  1002      ssl_cert = config_data.get("ssl_cert")
  1003      if ssl_cert == "SELFSIGNED":
  1004          ssl_cert = base64.b64encode(get_selfsigned_cert()[0])
  1005      relation_settings = {
  1006          "public-address": unit_get("public-address"),
  1007          "ssl_cert": ssl_cert,
  1008      }
  1009      for rid in relation_ids or get_relation_ids("reverseproxy"):
  1010          relation_set(relation_id=rid, relation_settings=relation_settings)
  1011  
  1012  
  1013  def website_interface(hook_name=None):
  1014      if hook_name is None:
  1015          return None
  1016      # Notify website relation but only for the current relation in context.
  1017      notify_website(changed=hook_name == "changed",
  1018                     relation_ids=(relation_id(),))
  1019  
  1020  
  1021  def get_hostname(host=None):
  1022      my_host = socket.gethostname()
  1023      if host is None or host == "0.0.0.0":
  1024          # If the listen ip has been set to 0.0.0.0 then pass back the hostname
  1025          return socket.getfqdn(my_host)
  1026      elif host == "localhost":
  1027          # If the fqdn lookup has returned localhost (lxc setups) then return
  1028          # hostname
  1029          return my_host
  1030      return host
  1031  
  1032  
  1033  def notify_relation(relation, changed=False, relation_ids=None):
  1034      default_host = get_hostname()
  1035      default_port = 80
  1036  
  1037      for rid in relation_ids or get_relation_ids(relation):
  1038          service_names = set()
  1039          if rid is None:
  1040              rid = relation_id()
  1041          for relation_data in relations_for_id(rid):
  1042              if 'service_name' in relation_data:
  1043                  service_names.add(relation_data['service_name'])
  1044  
  1045              if changed:
  1046                  if 'is-proxy' in relation_data:
  1047                      remote_service = ("%s__%d" % (relation_data['hostname'],
  1048                                                    relation_data['port']))
  1049                      open("%s/%s.is.proxy" % (
  1050                          default_haproxy_service_config_dir,
  1051                          remote_service), 'a').close()
  1052  
  1053          service_name = None
  1054          if len(service_names) == 1:
  1055              service_name = service_names.pop()
  1056          elif len(service_names) > 1:
  1057              log("Remote units requested more than a single service name."
  1058                  "Falling back to default host/port.")
  1059  
  1060          if service_name is not None:
  1061              # If a specific service has been asked for then return the ip:port
  1062              # for that service, else pass back the default
  1063              requestedservice = get_config_service(service_name)
  1064              my_host = get_hostname(requestedservice['service_host'])
  1065              my_port = requestedservice['service_port']
  1066          else:
  1067              my_host = default_host
  1068              my_port = default_port
  1069  
  1070          all_services = ""
  1071          services_dict = create_services()
  1072          if services_dict is not None:
  1073              all_services = yaml.safe_dump(sorted(services_dict.itervalues()))
  1074  
  1075          relation_set(relation_id=rid, port=str(my_port),
  1076                       hostname=my_host,
  1077                       all_services=all_services)
  1078  
  1079  
  1080  def notify_website(changed=False, relation_ids=None):
  1081      notify_relation("website", changed=changed, relation_ids=relation_ids)
  1082  
  1083  
  1084  def notify_peer(changed=False, relation_ids=None):
  1085      notify_relation("peer", changed=changed, relation_ids=relation_ids)
  1086  
  1087  
  1088  def install_nrpe_scripts():
  1089      scripts_src = os.path.join(os.environ["CHARM_DIR"], "files",
  1090                                 "nrpe")
  1091      scripts_dst = "/usr/lib/nagios/plugins"
  1092      if not os.path.exists(scripts_dst):
  1093          os.makedirs(scripts_dst)
  1094      for fname in glob.glob(os.path.join(scripts_src, "*.sh")):
  1095          shutil.copy2(fname,
  1096                       os.path.join(scripts_dst, os.path.basename(fname)))
  1097  
  1098  
  1099  def update_nrpe_config():
  1100      install_nrpe_scripts()
  1101      nrpe_compat = nrpe.NRPE()
  1102      nrpe_compat.add_check('haproxy', 'Check HAProxy', 'check_haproxy.sh')
  1103      nrpe_compat.add_check('haproxy_queue', 'Check HAProxy queue depth',
  1104                            'check_haproxy_queue_depth.sh')
  1105      nrpe_compat.write()
  1106  
  1107  
  1108  def delete_metrics_cronjob(cron_path):
  1109      try:
  1110          os.unlink(cron_path)
  1111      except OSError:
  1112          pass
  1113  
  1114  
  1115  def write_metrics_cronjob(script_path, cron_path):
  1116      config_data = config_get()
  1117  
  1118      if config_data['enable_monitoring'] is False:
  1119          log("enable_monitoring must be set to true for metrics")
  1120          delete_metrics_cronjob(cron_path)
  1121          return
  1122  
  1123      # need the following two configs to be valid
  1124      metrics_target = config_data['metrics_target'].strip()
  1125      metrics_sample_interval = config_data['metrics_sample_interval']
  1126      if (not metrics_target or
  1127              ':' not in metrics_target or not
  1128              metrics_sample_interval):
  1129          log("Required config not found or invalid "
  1130              "(metrics_target, metrics_sample_interval), "
  1131              "disabling metrics")
  1132          delete_metrics_cronjob(cron_path)
  1133          return
  1134  
  1135      charm_dir = os.environ['CHARM_DIR']
  1136      statsd_host, statsd_port = metrics_target.split(':', 1)
  1137      metrics_prefix = config_data['metrics_prefix'].strip()
  1138      metrics_prefix = metrics_prefix.replace(
  1139          "$UNIT", local_unit().replace('.', '-').replace('/', '-'))
  1140      haproxy_hostport = ":".join(['localhost',
  1141                                  str(config_data['monitoring_port'])])
  1142      haproxy_httpauth = ":".join([config_data['monitoring_username'].strip(),
  1143                                  get_monitoring_password()])
  1144  
  1145      # ensure script installed
  1146      shutil.copy2('%s/files/metrics/haproxy_to_statsd.sh' % charm_dir,
  1147                   metrics_script_path)
  1148  
  1149      # write the crontab
  1150      with open(cron_path, 'w') as cronjob:
  1151          cronjob.write(render_template("metrics_cronjob.template", {
  1152              'interval': config_data['metrics_sample_interval'],
  1153              'script': script_path,
  1154              'metrics_prefix': metrics_prefix,
  1155              'metrics_sample_interval': metrics_sample_interval,
  1156              'haproxy_hostport': haproxy_hostport,
  1157              'haproxy_httpauth': haproxy_httpauth,
  1158              'statsd_host': statsd_host,
  1159              'statsd_port': statsd_port,
  1160          }))
  1161  
  1162  
  1163  def add_backports_preferences(release):
  1164      with open(haproxy_preferences_path, "w") as preferences:
  1165          preferences.write(
  1166              "Package: haproxy\n"
  1167              "Pin: release a=%(release)s-backports\n"
  1168              "Pin-Priority: 500\n" % {'release': release})
  1169  
  1170  
  1171  def has_ssl_support():
  1172      """Return True if the locally installed haproxy package supports SSL."""
  1173      cache = apt_cache()
  1174      package = cache["haproxy"]
  1175      return package.current_ver.ver_str.split(".")[0:2] >= ["1", "5"]
  1176  
  1177  
  1178  def get_selfsigned_cert():
  1179      """Return the content of the self-signed certificate.
  1180  
  1181      If no self-signed certificate is there or the existing one doesn't match
  1182      our unit data, a new one will be created.
  1183  
  1184      @return: A 2-tuple whose first item holds the content of the public
  1185          certificate and the second item the content of the private key.
  1186      """
  1187      cert_file = os.path.join(default_haproxy_lib_dir, "selfsigned_ca.crt")
  1188      key_file = os.path.join(default_haproxy_lib_dir, "selfsigned.key")
  1189      if is_selfsigned_cert_stale(cert_file, key_file):
  1190          log("Generating self-signed certificate")
  1191          gen_selfsigned_cert(cert_file, key_file)
  1192      result = ()
  1193      for content_file in [cert_file, key_file]:
  1194          with open(content_file, "r") as fd:
  1195              result += (fd.read(),)
  1196      return result
  1197  
  1198  
  1199  # XXX taken from the apache2 charm.
  1200  def is_selfsigned_cert_stale(cert_file, key_file):
  1201      """
  1202      Do we need to generate a new self-signed cert?
  1203  
  1204      @param cert_file: destination path of generated certificate
  1205      @param key_file: destination path of generated private key
  1206      """
  1207      # Basic Existence Checks
  1208      if not os.path.exists(cert_file):
  1209          return True
  1210      if not os.path.exists(key_file):
  1211          return True
  1212  
  1213      # Common Name
  1214      from OpenSSL import crypto
  1215      with open(cert_file) as fd:
  1216          cert = crypto.load_certificate(
  1217              crypto.FILETYPE_PEM, fd.read())
  1218      cn = cert.get_subject().commonName
  1219      if unit_get('public-address') != cn:
  1220          return True
  1221  
  1222      # Subject Alternate Name -- only trusty+ support this
  1223      try:
  1224          from pyasn1.codec.der import decoder
  1225          from pyasn1_modules import rfc2459
  1226      except ImportError:
  1227          log('Cannot check subjAltName on <= 12.04, skipping.')
  1228          return False
  1229      cert_addresses = set()
  1230      unit_addresses = set(
  1231          [unit_get('public-address'), unit_get('private-address')])
  1232      for i in range(0, cert.get_extension_count()):
  1233          extension = cert.get_extension(i)
  1234          try:
  1235              names = decoder.decode(
  1236                  extension.get_data(), asn1Spec=rfc2459.SubjectAltName())[0]
  1237              for name in names:
  1238                  cert_addresses.add(str(name.getComponent()))
  1239          except:
  1240              pass
  1241      if cert_addresses != unit_addresses:
  1242          log('subjAltName: Cert (%s) != Unit (%s), assuming stale' % (
  1243              cert_addresses, unit_addresses))
  1244          return True
  1245  
  1246      return False
  1247  
  1248  
  1249  # XXX taken from the apache2 charm.
  1250  def gen_selfsigned_cert(cert_file, key_file):
  1251      """
  1252      Create a self-signed certificate.
  1253  
  1254      @param cert_file: destination path of generated certificate
  1255      @param key_file: destination path of generated private key
  1256      """
  1257      os.environ['OPENSSL_CN'] = unit_get('public-address')
  1258      os.environ['OPENSSL_PUBLIC'] = unit_get("public-address")
  1259      os.environ['OPENSSL_PRIVATE'] = unit_get("private-address")
  1260      # Set the umask so the child process will inherit it and
  1261      # the generated files will be readable only by root..
  1262      old_mask = os.umask(077)
  1263      subprocess.call(
  1264          ['openssl', 'req', '-new', '-x509', '-nodes', '-config',
  1265           os.path.join(os.environ['CHARM_DIR'], 'data', 'openssl.cnf'),
  1266           '-keyout', key_file, '-out', cert_file, '-days', '3650'],)
  1267      os.umask(old_mask)
  1268      uid = pwd.getpwnam('haproxy').pw_uid
  1269      os.chown(key_file, uid, -1)
  1270      os.chown(cert_file, uid, -1)
  1271  
  1272  
  1273  def write_ssl_pem(path, content):
  1274      """Write an SSL pem file and set permissions on it."""
  1275      # Set the umask so the child process will inherit it and we
  1276      # can make certificate files readable only by the 'haproxy'
  1277      # user (see below).
  1278      old_mask = os.umask(077)
  1279      with open(path, 'w') as f:
  1280          f.write(content)
  1281      os.umask(old_mask)
  1282      uid = pwd.getpwnam('haproxy').pw_uid
  1283      os.chown(path, uid, -1)
  1284  
  1285  
  1286  def statistics_interface():
  1287      config = config_get()
  1288      enable_monitoring = config['enable_monitoring']
  1289      monitoring_port = config['monitoring_port']
  1290      monitoring_password = get_monitoring_password()
  1291      monitoring_username = config['monitoring_username']
  1292      for relid in get_relation_ids('statistics'):
  1293          if not enable_monitoring:
  1294              relation_set(relation_id=relid,
  1295                           enabled=enable_monitoring)
  1296          else:
  1297              relation_set(relation_id=relid,
  1298                           enabled=enable_monitoring,
  1299                           port=monitoring_port,
  1300                           password=monitoring_password,
  1301                           user=monitoring_username)
  1302  
  1303  
  1304  # #############################################################################
  1305  # Main section
  1306  # #############################################################################
  1307  
  1308  
  1309  def main(hook_name):
  1310      if hook_name == "install":
  1311          install_hook()
  1312      elif hook_name == "upgrade-charm":
  1313          install_hook()
  1314          config_changed()
  1315          update_nrpe_config()
  1316      elif hook_name == "config-changed":
  1317          config_data = config_get()
  1318          if config_data.changed("source"):
  1319              install_hook()
  1320          config_changed()
  1321          update_nrpe_config()
  1322          statistics_interface()
  1323          if config_data.implicit_save:
  1324              config_data.save()
  1325      elif hook_name == "start":
  1326          start_hook()
  1327      elif hook_name == "stop":
  1328          stop_hook()
  1329      elif hook_name == "reverseproxy-relation-broken":
  1330          config_changed()
  1331      elif hook_name == "reverseproxy-relation-changed":
  1332          reverseproxy_interface("changed")
  1333      elif hook_name == "reverseproxy-relation-departed":
  1334          reverseproxy_interface("departed")
  1335      elif hook_name == "reverseproxy-relation-joined":
  1336          reverseproxy_interface("joined")
  1337      elif hook_name == "website-relation-joined":
  1338          website_interface("joined")
  1339      elif hook_name == "website-relation-changed":
  1340          website_interface("changed")
  1341      elif hook_name == "peer-relation-joined":
  1342          website_interface("joined")
  1343      elif hook_name == "peer-relation-changed":
  1344          reverseproxy_interface("changed")
  1345      elif hook_name in ("nrpe-external-master-relation-joined",
  1346                         "local-monitors-relation-joined"):
  1347          update_nrpe_config()
  1348      elif hook_name in ("statistics-relation-joined",
  1349                         "statistics-relation-changed"):
  1350          statistics_interface()
  1351      else:
  1352          print "Unknown hook"
  1353          sys.exit(1)
  1354  
  1355  if __name__ == "__main__":
  1356      hook_name = os.path.basename(sys.argv[0])
  1357      # Also support being invoked directly with hook as argument name.
  1358      if hook_name == "hooks.py":
  1359          if len(sys.argv) < 2:
  1360              sys.exit("Missing required hook name argument.")
  1361          hook_name = sys.argv[1]
  1362      main(hook_name)