github.com/chasestarr/deis@v1.13.5-0.20170519182049-1d9e59fbdbfc/controller/api/models.py (about)

     1  # -*- coding: utf-8 -*-
     2  
     3  """
     4  Data models for the Deis API.
     5  """
     6  
     7  from __future__ import unicode_literals
     8  import base64
     9  from datetime import datetime
    10  import etcd
    11  import importlib
    12  import logging
    13  import re
    14  import time
    15  from threading import Thread
    16  
    17  from django.conf import settings
    18  from django.contrib.auth import get_user_model
    19  from django.core.exceptions import ValidationError, SuspiciousOperation
    20  from django.db import models
    21  from django.db.models import Count
    22  from django.db.models import Max
    23  from django.db.models.signals import post_delete, post_save
    24  from django.dispatch import receiver
    25  from django.utils.encoding import python_2_unicode_compatible
    26  from docker.utils import utils as dockerutils
    27  from json_field.fields import JSONField
    28  from OpenSSL import crypto
    29  import requests
    30  from rest_framework.authtoken.models import Token
    31  
    32  from api import fields, utils, exceptions
    33  from registry import publish_release
    34  from utils import dict_diff, fingerprint
    35  
    36  
    37  logger = logging.getLogger(__name__)
    38  
    39  
    40  def close_db_connections(func, *args, **kwargs):
    41      """
    42      Decorator to explicitly close db connections during threaded execution
    43  
    44      Note this is necessary to work around:
    45      https://code.djangoproject.com/ticket/22420
    46      """
    47      def _close_db_connections(*args, **kwargs):
    48          ret = None
    49          try:
    50              ret = func(*args, **kwargs)
    51          finally:
    52              from django.db import connections
    53              for conn in connections.all():
    54                  conn.close()
    55          return ret
    56      return _close_db_connections
    57  
    58  
    59  def log_event(app, msg, level=logging.INFO):
    60      # controller needs to know which app this log comes from
    61      logger.log(level, "{}: {}".format(app.id, msg))
    62      app.log(msg, level)
    63  
    64  
    65  def validate_base64(value):
    66      """Check that value contains only valid base64 characters."""
    67      try:
    68          base64.b64decode(value.split()[1])
    69      except Exception as e:
    70          raise ValidationError(e)
    71  
    72  
    73  def validate_id_is_docker_compatible(value):
    74      """
    75      Check that the ID follows docker's image name constraints
    76      """
    77      match = re.match(r'^[a-z0-9-]+$', value)
    78      if not match:
    79          raise ValidationError("App IDs can only contain [a-z0-9-].")
    80  
    81  
    82  def validate_app_structure(value):
    83      """Error if the dict values aren't ints >= 0."""
    84      try:
    85          if any(int(v) < 0 for v in value.viewvalues()):
    86              raise ValueError("Must be greater than or equal to zero")
    87      except ValueError, err:
    88          raise ValidationError(err)
    89  
    90  
    91  def validate_reserved_names(value):
    92      """A value cannot use some reserved names."""
    93      if value in settings.DEIS_RESERVED_NAMES:
    94          raise ValidationError('{} is a reserved name.'.format(value))
    95  
    96  
    97  def validate_comma_separated(value):
    98      """Error if the value doesn't look like a list of hostnames or IP addresses
    99      separated by commas.
   100      """
   101      if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
   102          raise ValidationError(
   103              "{} should be a comma-separated list".format(value))
   104  
   105  
   106  def validate_domain(value):
   107      """Error if the domain contains unexpected characters."""
   108      if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
   109          raise ValidationError('"{}" contains unexpected characters'.format(value))
   110  
   111  
   112  def validate_certificate(value):
   113      try:
   114          crypto.load_certificate(crypto.FILETYPE_PEM, value)
   115      except crypto.Error as e:
   116          raise ValidationError('Could not load certificate: {}'.format(e))
   117  
   118  
   119  def validate_common_name(value):
   120      if '*' in value:
   121          raise ValidationError('Wildcard certificates are not supported')
   122  
   123  
   124  def get_etcd_client():
   125      if not hasattr(get_etcd_client, "client"):
   126          # wire up etcd publishing if we can connect
   127          try:
   128              get_etcd_client.client = etcd.Client(
   129                  host=settings.ETCD_HOST,
   130                  port=int(settings.ETCD_PORT))
   131              get_etcd_client.client.get('/deis')
   132          except etcd.EtcdException:
   133              logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
   134              get_etcd_client.client = None
   135      return get_etcd_client.client
   136  
   137  
   138  class AuditedModel(models.Model):
   139      """Add created and updated fields to a model."""
   140  
   141      created = models.DateTimeField(auto_now_add=True)
   142      updated = models.DateTimeField(auto_now=True)
   143  
   144      class Meta:
   145          """Mark :class:`AuditedModel` as abstract."""
   146          abstract = True
   147  
   148  
   149  def select_app_name():
   150      """Select a unique randomly generated app name"""
   151      name = utils.generate_app_name()
   152  
   153      while App.objects.filter(id=name).exists():
   154          name = utils.generate_app_name()
   155  
   156      return name
   157  
   158  
   159  class UuidAuditedModel(AuditedModel):
   160      """Add a UUID primary key to an :class:`AuditedModel`."""
   161  
   162      uuid = fields.UuidField('UUID', primary_key=True)
   163  
   164      class Meta:
   165          """Mark :class:`UuidAuditedModel` as abstract."""
   166          abstract = True
   167  
   168  
   169  @python_2_unicode_compatible
   170  class App(UuidAuditedModel):
   171      """
   172      Application used to service requests on behalf of end-users
   173      """
   174  
   175      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   176      id = models.SlugField(max_length=64, unique=True, default=select_app_name,
   177                            validators=[validate_id_is_docker_compatible,
   178                                        validate_reserved_names])
   179      structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
   180  
   181      class Meta:
   182          permissions = (('use_app', 'Can use app'),)
   183  
   184      @property
   185      def _scheduler(self):
   186          mod = importlib.import_module(settings.SCHEDULER_MODULE)
   187          return mod.SchedulerClient(settings.SCHEDULER_TARGET,
   188                                     settings.SCHEDULER_AUTH,
   189                                     settings.SCHEDULER_OPTIONS,
   190                                     settings.SSH_PRIVATE_KEY)
   191  
   192      def __str__(self):
   193          return self.id
   194  
   195      @property
   196      def url(self):
   197          return self.id + '.' + settings.DEIS_DOMAIN
   198  
   199      def _get_job_id(self, container_type):
   200          app = self.id
   201          release = self.release_set.latest()
   202          version = "v{}".format(release.version)
   203          job_id = "{app}_{version}.{container_type}".format(**locals())
   204          return job_id
   205  
   206      def _get_command(self, container_type):
   207          try:
   208              # if this is not procfile-based app, ensure they cannot break out
   209              # and run arbitrary commands on the host
   210              # FIXME: remove slugrunner's hardcoded entrypoint
   211              release = self.release_set.latest()
   212              if release.build.dockerfile or not release.build.sha:
   213                  return "bash -c '{}'".format(release.build.procfile[container_type])
   214              else:
   215                  return 'start {}'.format(container_type)
   216          # if the key is not present or if a parent attribute is None
   217          except (KeyError, TypeError, AttributeError):
   218              # handle special case for Dockerfile deployments
   219              return '' if container_type == 'cmd' else 'start {}'.format(container_type)
   220  
   221      def log(self, message, level=logging.INFO):
   222          """Logs a message in the context of this application.
   223  
   224          This prefixes log messages with an application "tag" that the customized deis-logspout will
   225          be on the lookout for.  When it's seen, the message-- usually an application event of some
   226          sort like releasing or scaling, will be considered as "belonging" to the application
   227          instead of the controller and will be handled accordingly.
   228          """
   229          logger.log(level, "[{}]: {}".format(self.id, message))
   230  
   231      def create(self, *args, **kwargs):
   232          """Create a new application with an initial config and release"""
   233          config = Config.objects.create(owner=self.owner, app=self)
   234          Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
   235  
   236      def delete(self, *args, **kwargs):
   237          """Delete this application including all containers"""
   238          try:
   239              # attempt to remove containers from the scheduler
   240              self._destroy_containers([c for c in self.container_set.exclude(type='run')])
   241          except RuntimeError:
   242              pass
   243          self._clean_app_logs()
   244          return super(App, self).delete(*args, **kwargs)
   245  
   246      def restart(self, **kwargs):
   247          to_restart = self.container_set.all()
   248          if kwargs.get('type'):
   249              to_restart = to_restart.filter(type=kwargs.get('type'))
   250          if kwargs.get('num'):
   251              to_restart = to_restart.filter(num=kwargs.get('num'))
   252          self._restart_containers(to_restart)
   253          return to_restart
   254  
   255      def _clean_app_logs(self):
   256          """Delete application logs stored by the logger component"""
   257          try:
   258              url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
   259              requests.delete(url)
   260          except Exception as e:
   261              # Ignore errors deleting application logs.  An error here should not interfere with
   262              # the overall success of deleting an application, but we should log it.
   263              err = 'Error deleting existing application logs: {}'.format(e)
   264              log_event(self, err, logging.WARNING)
   265  
   266      def scale(self, user, structure):  # noqa
   267          """Scale containers up or down to match requested structure."""
   268          if self.release_set.latest().build is None:
   269              raise EnvironmentError('No build associated with this release')
   270          requested_structure = structure.copy()
   271          release = self.release_set.latest()
   272          # test for available process types
   273          available_process_types = release.build.procfile or {}
   274          for container_type in requested_structure:
   275              if container_type == 'cmd':
   276                  continue  # allow docker cmd types in case we don't have the image source
   277              if container_type not in available_process_types:
   278                  raise EnvironmentError(
   279                      'Container type {} does not exist in application'.format(container_type))
   280          msg = '{} scaled containers '.format(user.username) + ' '.join(
   281              "{}={}".format(k, v) for k, v in requested_structure.items())
   282          log_event(self, msg)
   283          # iterate and scale by container type (web, worker, etc)
   284          changed = False
   285          to_add, to_remove = [], []
   286          scale_types = {}
   287  
   288          # iterate on a copy of the container_type keys
   289          for container_type in requested_structure.keys():
   290              containers = list(self.container_set.filter(type=container_type).order_by('created'))
   291              # increment new container nums off the most recent container
   292              results = self.container_set.filter(type=container_type).aggregate(Max('num'))
   293              container_num = (results.get('num__max') or 0) + 1
   294              requested = requested_structure.pop(container_type)
   295              diff = requested - len(containers)
   296              if diff == 0:
   297                  continue
   298              changed = True
   299              scale_types[container_type] = requested
   300              while diff < 0:
   301                  c = containers.pop()
   302                  to_remove.append(c)
   303                  diff += 1
   304              while diff > 0:
   305                  # create a database record
   306                  c = Container.objects.create(owner=self.owner,
   307                                               app=self,
   308                                               release=release,
   309                                               type=container_type,
   310                                               num=container_num)
   311                  to_add.append(c)
   312                  container_num += 1
   313                  diff -= 1
   314  
   315          if changed:
   316              if "scale" in dir(self._scheduler):
   317                  self._scale_containers(scale_types, to_remove)
   318              else:
   319                  if to_add:
   320                      self._start_containers(to_add)
   321                  if to_remove:
   322                      self._destroy_containers(to_remove)
   323          # save new structure to the database
   324          vals = self.container_set.exclude(type='run').values(
   325              'type').annotate(Count('pk')).order_by()
   326          new_structure = structure.copy()
   327          new_structure.update({v['type']: v['pk__count'] for v in vals})
   328          self.structure = new_structure
   329          self.save()
   330          return changed
   331  
   332      def _scale_containers(self, scale_types, to_remove):
   333          release = self.release_set.latest()
   334          for scale_type in scale_types:
   335              image = release.image
   336              version = "v{}".format(release.version)
   337              kwargs = {'memory': release.config.memory,
   338                        'cpu': release.config.cpu,
   339                        'tags': release.config.tags,
   340                        'version': version,
   341                        'aname': self.id,
   342                        'num': scale_types[scale_type]}
   343              job_id = self._get_job_id(scale_type)
   344              command = self._get_command(scale_type)
   345              try:
   346                  self._scheduler.scale(
   347                      name=job_id,
   348                      image=image,
   349                      command=command,
   350                      **kwargs)
   351              except Exception as e:
   352                  err = '{} (scale): {}'.format(job_id, e)
   353                  log_event(self, err, logging.ERROR)
   354                  raise
   355          [c.delete() for c in to_remove]
   356  
   357      def _start_containers(self, to_add):
   358          """Creates and starts containers via the scheduler"""
   359          if not to_add:
   360              return
   361          create_threads = [Thread(target=c.create) for c in to_add]
   362          start_threads = [Thread(target=c.start) for c in to_add]
   363          [t.start() for t in create_threads]
   364          [t.join() for t in create_threads]
   365          if any(c.state != 'created' for c in to_add):
   366              err = 'aborting, failed to create some containers'
   367              log_event(self, err, logging.ERROR)
   368              self._destroy_containers(to_add)
   369              raise RuntimeError(err)
   370          [t.start() for t in start_threads]
   371          [t.join() for t in start_threads]
   372          if set([c.state for c in to_add]) != set(['up']):
   373              err = 'warning, some containers failed to start'
   374              log_event(self, err, logging.WARNING)
   375          # if the user specified a health check, try checking to see if it's running
   376          try:
   377              config = self.config_set.latest()
   378              if 'HEALTHCHECK_URL' in config.values.keys():
   379                  self._healthcheck(to_add, config.values)
   380          except Config.DoesNotExist:
   381              pass
   382  
   383      def _healthcheck(self, containers, config):
   384          # if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
   385          intervals = [1.0, 0.1, 0.5, 1.0]
   386          # HACK (bacongobbler): we need to wait until publisher has a chance to publish each
   387          # service to etcd, which can take up to 20 seconds.
   388          time.sleep(20)
   389          for i in xrange(len(intervals)):
   390              delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
   391              try:
   392                  # sleep until the initial timeout is over
   393                  if delay > 0:
   394                      time.sleep(delay * intervals[i])
   395                  to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
   396                  self._do_healthcheck(to_healthcheck, config)
   397                  break
   398              except exceptions.HealthcheckException as e:
   399                  try:
   400                      next_delay = delay * intervals[i+1]
   401                      msg = "{}; trying again in {} seconds".format(e, next_delay)
   402                      log_event(self, msg, logging.WARNING)
   403                  except IndexError:
   404                      log_event(self, e, logging.WARNING)
   405          else:
   406              self._destroy_containers(containers)
   407              msg = "aborting, app containers failed to respond to health check"
   408              log_event(self, msg, logging.ERROR)
   409              raise RuntimeError(msg)
   410  
   411      def _do_healthcheck(self, containers, config):
   412          path = config.get('HEALTHCHECK_URL', '/')
   413          timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
   414          if not _etcd_client:
   415              raise exceptions.HealthcheckException('no etcd client available')
   416          for container in containers:
   417              try:
   418                  key = "/deis/services/{self}/{container.job_id}".format(**locals())
   419                  url = "http://{}{}".format(_etcd_client.get(key).value, path)
   420                  response = requests.get(url, timeout=timeout)
   421                  if response.status_code != requests.codes.OK:
   422                      raise exceptions.HealthcheckException(
   423                          "app failed health check (got '{}', expected: '200')".format(
   424                              response.status_code))
   425              except (requests.Timeout, requests.ConnectionError, KeyError) as e:
   426                  raise exceptions.HealthcheckException(
   427                      'failed to connect to container ({})'.format(e))
   428  
   429      def _restart_containers(self, to_restart):
   430          """Restarts containers via the scheduler"""
   431          if not to_restart:
   432              return
   433          stop_threads = [Thread(target=c.stop) for c in to_restart]
   434          start_threads = [Thread(target=c.start) for c in to_restart]
   435          [t.start() for t in stop_threads]
   436          [t.join() for t in stop_threads]
   437          if any(c.state != 'created' for c in to_restart):
   438              err = 'warning, some containers failed to stop'
   439              log_event(self, err, logging.WARNING)
   440          [t.start() for t in start_threads]
   441          [t.join() for t in start_threads]
   442          if any(c.state != 'up' for c in to_restart):
   443              err = 'warning, some containers failed to start'
   444              log_event(self, err, logging.WARNING)
   445  
   446      def _destroy_containers(self, to_destroy):
   447          """Destroys containers via the scheduler"""
   448          if not to_destroy:
   449              return
   450          destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
   451          [t.start() for t in destroy_threads]
   452          [t.join() for t in destroy_threads]
   453          pks = [c.pk for c in to_destroy if c.state == 'destroyed']
   454          Container.objects.filter(pk__in=pks).delete()
   455          if any(c.state != 'destroyed' for c in to_destroy):
   456              err = 'aborting, failed to destroy some containers'
   457              log_event(self, err, logging.ERROR)
   458              raise RuntimeError(err)
   459  
   460      def _prune_containers(self):
   461          try:
   462              containers = self.container_set.exclude(type='run')
   463              # find the unique type+num values of containers
   464              vals = set((i[0], i[1]) for i in containers.values_list('type', 'num'))
   465              for typ, num in vals:
   466                  # delete all but the latest of each type+num
   467                  group = containers.filter(type=typ, num=num)
   468                  if group.count() > 1:
   469                      latest = group.latest()
   470                      group.exclude(uuid=latest.uuid).delete()
   471          except Exception as e:
   472              # just log the error, don't raise it
   473              err = '(_prune_containers): {}'.format(e)
   474              log_event(self, err, logging.ERROR)
   475  
   476      def deploy(self, user, release):
   477          """Deploy a new release to this application"""
   478          self._prune_containers()
   479          existing = self.container_set.exclude(type='run')
   480          new = []
   481          scale_types = set()
   482          for e in existing:
   483              n = e.clone(release)
   484              n.save()
   485              new.append(n)
   486              scale_types.add(e.type)
   487  
   488          if new and "deploy" in dir(self._scheduler):
   489              self._deploy_app(scale_types, release, existing)
   490          else:
   491              self._start_containers(new)
   492  
   493              # destroy old containers
   494              if existing:
   495                  self._destroy_containers(existing)
   496  
   497          # perform default scaling if necessary
   498          if self.structure == {} and release.build is not None:
   499              self._default_scale(user, release)
   500  
   501      def _deploy_app(self, scale_types, release, existing):
   502          for scale_type in scale_types:
   503              image = release.image
   504              version = "v{}".format(release.version)
   505              kwargs = {'memory': release.config.memory,
   506                        'cpu': release.config.cpu,
   507                        'tags': release.config.tags,
   508                        'aname': self.id,
   509                        'num': 0,
   510                        'version': version}
   511              job_id = self._get_job_id(scale_type)
   512              command = self._get_command(scale_type)
   513              try:
   514                  self._scheduler.deploy(
   515                      name=job_id,
   516                      image=image,
   517                      command=command,
   518                      **kwargs)
   519              except Exception as e:
   520                  err = '{} (deploy): {}'.format(job_id, e)
   521                  log_event(self, err, logging.ERROR)
   522                  raise
   523          [c.delete() for c in existing]
   524  
   525      def _default_scale(self, user, release):
   526          """Scale to default structure based on release type"""
   527          # if there is no SHA, assume a docker image is being promoted
   528          if not release.build.sha:
   529              structure = {'cmd': 1}
   530  
   531          # if a dockerfile exists without a procfile, assume docker workflow
   532          elif release.build.dockerfile and not release.build.procfile:
   533              structure = {'cmd': 1}
   534  
   535          # if a procfile exists without a web entry, assume docker workflow
   536          elif release.build.procfile and 'web' not in release.build.procfile:
   537              structure = {'cmd': 1}
   538  
   539          # default to heroku workflow
   540          else:
   541              structure = {'web': 1}
   542  
   543          self.scale(user, structure)
   544  
   545      def logs(self, log_lines=str(settings.LOG_LINES)):
   546          """Return aggregated log data for this application."""
   547          try:
   548              url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
   549                                                          self.id, log_lines)
   550              r = requests.get(url)
   551          # Handle HTTP request errors
   552          except requests.exceptions.RequestException as e:
   553              logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
   554              raise e
   555          # Handle logs empty or not found
   556          if r.status_code == 204 or r.status_code == 404:
   557              logger.info("GET {} returned a {} status code".format(url, r.status_code))
   558              raise EnvironmentError('Could not locate logs')
   559          # Handle unanticipated status codes
   560          if r.status_code != 200:
   561              logger.error("Error accessing deis-logger: GET {} returned a {} status code"
   562                           .format(url, r.status_code))
   563              raise EnvironmentError('Error accessing deis-logger')
   564          return r.content
   565  
   566      def run(self, user, command):
   567          """Run a one-off command in an ephemeral app container."""
   568          # FIXME: remove the need for SSH private keys by using
   569          # a scheduler that supports one-off admin tasks natively
   570          if not settings.SSH_PRIVATE_KEY:
   571              raise EnvironmentError('Support for admin commands is not configured')
   572          if self.release_set.latest().build is None:
   573              raise EnvironmentError('No build associated with this release to run this command')
   574          # TODO: add support for interactive shell
   575          msg = "{} runs '{}'".format(user.username, command)
   576          log_event(self, msg)
   577          c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
   578  
   579          # create database record for run process
   580          c = Container.objects.create(owner=self.owner,
   581                                       app=self,
   582                                       release=self.release_set.latest(),
   583                                       type='run',
   584                                       num=c_num)
   585          image = c.release.image
   586  
   587          # check for backwards compatibility
   588          def _has_hostname(image):
   589              repo, tag = dockerutils.parse_repository_tag(image)
   590              return True if '/' in repo and '.' in repo.split('/')[0] else False
   591  
   592          if not _has_hostname(image):
   593              image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
   594                                        settings.REGISTRY_PORT,
   595                                        image)
   596          # SECURITY: shell-escape user input
   597          escaped_command = command.replace("'", "'\\''")
   598          return c.run(escaped_command)
   599  
   600  
   601  @python_2_unicode_compatible
   602  class Container(UuidAuditedModel):
   603      """
   604      Docker container used to securely host an application process.
   605      """
   606  
   607      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   608      app = models.ForeignKey('App')
   609      release = models.ForeignKey('Release')
   610      type = models.CharField(max_length=128, blank=False)
   611      num = models.PositiveIntegerField()
   612  
   613      @property
   614      def _scheduler(self):
   615          return self.app._scheduler
   616  
   617      @property
   618      def state(self):
   619          return self._scheduler.state(self.job_id).name
   620  
   621      def short_name(self):
   622          return "{}.{}.{}".format(self.app.id, self.type, self.num)
   623      short_name.short_description = 'Name'
   624  
   625      def __str__(self):
   626          return self.short_name()
   627  
   628      class Meta:
   629          get_latest_by = 'created'
   630          ordering = ['created']
   631  
   632      @property
   633      def job_id(self):
   634          version = "v{}".format(self.release.version)
   635          return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals())
   636  
   637      def _get_command(self):
   638          try:
   639              # if this is not procfile-based app, ensure they cannot break out
   640              # and run arbitrary commands on the host
   641              # FIXME: remove slugrunner's hardcoded entrypoint
   642              if self.release.build.dockerfile or not self.release.build.sha:
   643                  return "bash -c '{}'".format(self.release.build.procfile[self.type])
   644              else:
   645                  return 'start {}'.format(self.type)
   646          # if the key is not present or if a parent attribute is None
   647          except (KeyError, TypeError, AttributeError):
   648              # handle special case for Dockerfile deployments
   649              return '' if self.type == 'cmd' else 'start {}'.format(self.type)
   650  
   651      _command = property(_get_command)
   652  
   653      def clone(self, release):
   654          c = Container.objects.create(owner=self.owner,
   655                                       app=self.app,
   656                                       release=release,
   657                                       type=self.type,
   658                                       num=self.num)
   659          return c
   660  
   661      @close_db_connections
   662      def create(self):
   663          image = self.release.image
   664          kwargs = {'memory': self.release.config.memory,
   665                    'cpu': self.release.config.cpu,
   666                    'tags': self.release.config.tags}
   667          try:
   668              self._scheduler.create(
   669                  name=self.job_id,
   670                  image=image,
   671                  command=self._command,
   672                  **kwargs)
   673          except Exception as e:
   674              err = '{} (create): {}'.format(self.job_id, e)
   675              log_event(self.app, err, logging.ERROR)
   676              raise
   677  
   678      @close_db_connections
   679      def start(self):
   680          try:
   681              self._scheduler.start(self.job_id)
   682          except Exception as e:
   683              err = '{} (start): {}'.format(self.job_id, e)
   684              log_event(self.app, err, logging.WARNING)
   685              raise
   686  
   687      @close_db_connections
   688      def stop(self):
   689          try:
   690              self._scheduler.stop(self.job_id)
   691          except Exception as e:
   692              err = '{} (stop): {}'.format(self.job_id, e)
   693              log_event(self.app, err, logging.ERROR)
   694              raise
   695  
   696      @close_db_connections
   697      def destroy(self):
   698          try:
   699              self._scheduler.destroy(self.job_id)
   700          except Exception as e:
   701              err = '{} (destroy): {}'.format(self.job_id, e)
   702              log_event(self.app, err, logging.ERROR)
   703              raise
   704  
   705      def run(self, command):
   706          """Run a one-off command"""
   707          if self.release.build is None:
   708              raise EnvironmentError('No build associated with this release '
   709                                     'to run this command')
   710          image = self.release.image
   711          entrypoint = '/bin/bash'
   712          # if this is a procfile-based app, switch the entrypoint to slugrunner's default
   713          # FIXME: remove slugrunner's hardcoded entrypoint
   714          if self.release.build.procfile and \
   715             self.release.build.sha and not \
   716             self.release.build.dockerfile:
   717              entrypoint = '/runner/init'
   718              command = "'{}'".format(command)
   719          else:
   720              command = "-c '{}'".format(command)
   721          try:
   722              rc, output = self._scheduler.run(self.job_id, image, entrypoint, command)
   723              return rc, output
   724          except Exception as e:
   725              err = '{} (run): {}'.format(self.job_id, e)
   726              log_event(self.app, err, logging.ERROR)
   727              raise
   728  
   729  
   730  @python_2_unicode_compatible
   731  class Push(UuidAuditedModel):
   732      """
   733      Instance of a push used to trigger an application build
   734      """
   735      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   736      app = models.ForeignKey('App')
   737      sha = models.CharField(max_length=40)
   738  
   739      fingerprint = models.CharField(max_length=255)
   740      receive_user = models.CharField(max_length=255)
   741      receive_repo = models.CharField(max_length=255)
   742  
   743      ssh_connection = models.CharField(max_length=255)
   744      ssh_original_command = models.CharField(max_length=255)
   745  
   746      class Meta:
   747          get_latest_by = 'created'
   748          ordering = ['-created']
   749          unique_together = (('app', 'uuid'),)
   750  
   751      def __str__(self):
   752          return "{0}-{1}".format(self.app.id, self.sha[:7])
   753  
   754  
   755  @python_2_unicode_compatible
   756  class Build(UuidAuditedModel):
   757      """
   758      Instance of a software build used by runtime nodes
   759      """
   760  
   761      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   762      app = models.ForeignKey('App')
   763      image = models.CharField(max_length=256)
   764  
   765      # optional fields populated by builder
   766      sha = models.CharField(max_length=40, blank=True)
   767      procfile = JSONField(default={}, blank=True)
   768      dockerfile = models.TextField(blank=True)
   769  
   770      class Meta:
   771          get_latest_by = 'created'
   772          ordering = ['-created']
   773          unique_together = (('app', 'uuid'),)
   774  
   775      def create(self, user, *args, **kwargs):
   776          latest_release = self.app.release_set.latest()
   777          source_version = 'latest'
   778          if self.sha:
   779              source_version = 'git-{}'.format(self.sha)
   780          new_release = latest_release.new(user,
   781                                           build=self,
   782                                           config=latest_release.config,
   783                                           source_version=source_version)
   784          try:
   785              self.app.deploy(user, new_release)
   786              return new_release
   787          except RuntimeError:
   788              new_release.delete()
   789              raise
   790  
   791      def save(self, **kwargs):
   792          try:
   793              previous_build = self.app.build_set.latest()
   794              to_destroy = []
   795              for proctype in previous_build.procfile:
   796                  if proctype not in self.procfile:
   797                      for c in self.app.container_set.filter(type=proctype):
   798                          to_destroy.append(c)
   799              self.app._destroy_containers(to_destroy)
   800          except Build.DoesNotExist:
   801              pass
   802          return super(Build, self).save(**kwargs)
   803  
   804      def __str__(self):
   805          return "{0}-{1}".format(self.app.id, self.uuid[:7])
   806  
   807  
   808  @python_2_unicode_compatible
   809  class Config(UuidAuditedModel):
   810      """
   811      Set of configuration values applied as environment variables
   812      during runtime execution of the Application.
   813      """
   814  
   815      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   816      app = models.ForeignKey('App')
   817      values = JSONField(default={}, blank=True)
   818      memory = JSONField(default={}, blank=True)
   819      cpu = JSONField(default={}, blank=True)
   820      tags = JSONField(default={}, blank=True)
   821  
   822      class Meta:
   823          get_latest_by = 'created'
   824          ordering = ['-created']
   825          unique_together = (('app', 'uuid'),)
   826  
   827      def __str__(self):
   828          return "{}-{}".format(self.app.id, self.uuid[:7])
   829  
   830      def save(self, **kwargs):
   831          """merge the old config with the new"""
   832          try:
   833              previous_config = self.app.config_set.latest()
   834              for attr in ['cpu', 'memory', 'tags', 'values']:
   835                  # Guard against migrations from older apps without fixes to
   836                  # JSONField encoding.
   837                  try:
   838                      data = getattr(previous_config, attr).copy()
   839                  except AttributeError:
   840                      data = {}
   841                  try:
   842                      new_data = getattr(self, attr).copy()
   843                  except AttributeError:
   844                      new_data = {}
   845                  data.update(new_data)
   846                  # remove config keys if we provided a null value
   847                  [data.pop(k) for k, v in new_data.viewitems() if v is None]
   848                  setattr(self, attr, data)
   849          except Config.DoesNotExist:
   850              pass
   851          return super(Config, self).save(**kwargs)
   852  
   853  
   854  @python_2_unicode_compatible
   855  class Release(UuidAuditedModel):
   856      """
   857      Software release deployed by the application platform
   858  
   859      Releases contain a :class:`Build` and a :class:`Config`.
   860      """
   861  
   862      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   863      app = models.ForeignKey('App')
   864      version = models.PositiveIntegerField()
   865      summary = models.TextField(blank=True, null=True)
   866  
   867      config = models.ForeignKey('Config')
   868      build = models.ForeignKey('Build', null=True)
   869  
   870      class Meta:
   871          get_latest_by = 'created'
   872          ordering = ['-created']
   873          unique_together = (('app', 'version'),)
   874  
   875      def __str__(self):
   876          return "{0}-v{1}".format(self.app.id, self.version)
   877  
   878      @property
   879      def image(self):
   880          return '{}:v{}'.format(self.app.id, str(self.version))
   881  
   882      def new(self, user, config, build, summary=None, source_version='latest'):
   883          """
   884          Create a new application release using the provided Build and Config
   885          on behalf of a user.
   886  
   887          Releases start at v1 and auto-increment.
   888          """
   889          # construct fully-qualified target image
   890          new_version = self.version + 1
   891          # create new release and auto-increment version
   892          release = Release.objects.create(
   893              owner=user, app=self.app, config=config,
   894              build=build, version=new_version, summary=summary)
   895          try:
   896              release.publish()
   897          except EnvironmentError as e:
   898              # If we cannot publish this app, just log and carry on
   899              log_event(self.app, e)
   900              pass
   901          return release
   902  
   903      def publish(self, source_version='latest'):
   904          if self.build is None:
   905              raise EnvironmentError('No build associated with this release to publish')
   906          source_image = self.build.image
   907          if ':' not in source_image:
   908              source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
   909              source_image = "{}:{}".format(source_image, source_tag)
   910          # If the build has a SHA, assume it's from deis-builder and in the deis-registry already
   911          deis_registry = bool(self.build.sha)
   912          publish_release(source_image, self.config.values, self.image, deis_registry)
   913  
   914      def previous(self):
   915          """
   916          Return the previous Release to this one.
   917  
   918          :return: the previous :class:`Release`, or None
   919          """
   920          releases = self.app.release_set
   921          if self.pk:
   922              releases = releases.exclude(pk=self.pk)
   923          try:
   924              # Get the Release previous to this one
   925              prev_release = releases.latest()
   926          except Release.DoesNotExist:
   927              prev_release = None
   928          return prev_release
   929  
   930      def rollback(self, user, version):
   931          if version < 1:
   932              raise EnvironmentError('version cannot be below 0')
   933          summary = "{} rolled back to v{}".format(user, version)
   934          prev = self.app.release_set.get(version=version)
   935          new_release = self.new(
   936              user,
   937              build=prev.build,
   938              config=prev.config,
   939              summary=summary,
   940              source_version='v{}'.format(version))
   941          try:
   942              self.app.deploy(user, new_release)
   943              return new_release
   944          except RuntimeError:
   945              new_release.delete()
   946              raise
   947  
   948      def save(self, *args, **kwargs):  # noqa
   949          if not self.summary:
   950              self.summary = ''
   951              prev_release = self.previous()
   952              # compare this build to the previous build
   953              old_build = prev_release.build if prev_release else None
   954              old_config = prev_release.config if prev_release else None
   955              # if the build changed, log it and who pushed it
   956              if self.version == 1:
   957                  self.summary += "{} created initial release".format(self.app.owner)
   958              elif self.build != old_build:
   959                  if self.build.sha:
   960                      self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
   961                  else:
   962                      self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
   963              # if the config data changed, log the dict diff
   964              if self.config != old_config:
   965                  dict1 = self.config.values
   966                  dict2 = old_config.values if old_config else {}
   967                  diff = dict_diff(dict1, dict2)
   968                  # try to be as succinct as possible
   969                  added = ', '.join(k for k in diff.get('added', {}))
   970                  added = 'added ' + added if added else ''
   971                  changed = ', '.join(k for k in diff.get('changed', {}))
   972                  changed = 'changed ' + changed if changed else ''
   973                  deleted = ', '.join(k for k in diff.get('deleted', {}))
   974                  deleted = 'deleted ' + deleted if deleted else ''
   975                  changes = ', '.join(i for i in (added, changed, deleted) if i)
   976                  if changes:
   977                      if self.summary:
   978                          self.summary += ' and '
   979                      self.summary += "{} {}".format(self.config.owner, changes)
   980                  # if the limits changed (memory or cpu), log the dict diff
   981                  changes = []
   982                  old_mem = old_config.memory if old_config else {}
   983                  diff = dict_diff(self.config.memory, old_mem)
   984                  if diff.get('added') or diff.get('changed') or diff.get('deleted'):
   985                      changes.append('memory')
   986                  old_cpu = old_config.cpu if old_config else {}
   987                  diff = dict_diff(self.config.cpu, old_cpu)
   988                  if diff.get('added') or diff.get('changed') or diff.get('deleted'):
   989                      changes.append('cpu')
   990                  if changes:
   991                      changes = 'changed limits for '+', '.join(changes)
   992                      self.summary += "{} {}".format(self.config.owner, changes)
   993                  # if the tags changed, log the dict diff
   994                  changes = []
   995                  old_tags = old_config.tags if old_config else {}
   996                  diff = dict_diff(self.config.tags, old_tags)
   997                  # try to be as succinct as possible
   998                  added = ', '.join(k for k in diff.get('added', {}))
   999                  added = 'added tag ' + added if added else ''
  1000                  changed = ', '.join(k for k in diff.get('changed', {}))
  1001                  changed = 'changed tag ' + changed if changed else ''
  1002                  deleted = ', '.join(k for k in diff.get('deleted', {}))
  1003                  deleted = 'deleted tag ' + deleted if deleted else ''
  1004                  changes = ', '.join(i for i in (added, changed, deleted) if i)
  1005                  if changes:
  1006                      if self.summary:
  1007                          self.summary += ' and '
  1008                      self.summary += "{} {}".format(self.config.owner, changes)
  1009              if not self.summary:
  1010                  if self.version == 1:
  1011                      self.summary = "{} created the initial release".format(self.owner)
  1012                  else:
  1013                      self.summary = "{} changed nothing".format(self.owner)
  1014          super(Release, self).save(*args, **kwargs)
  1015  
  1016  
  1017  @python_2_unicode_compatible
  1018  class Domain(AuditedModel):
  1019      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1020      app = models.ForeignKey('App')
  1021      domain = models.TextField(blank=False, null=False, unique=True)
  1022  
  1023      def __str__(self):
  1024          return self.domain
  1025  
  1026  
  1027  @python_2_unicode_compatible
  1028  class Certificate(AuditedModel):
  1029      """
  1030      Public and private key pair used to secure application traffic at the router.
  1031      """
  1032      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1033      # there is no upper limit on the size of an x.509 certificate
  1034      certificate = models.TextField(validators=[validate_certificate])
  1035      key = models.TextField()
  1036      # X.509 certificates allow any string of information as the common name.
  1037      common_name = models.TextField(unique=True, validators=[validate_common_name])
  1038      expires = models.DateTimeField()
  1039  
  1040      def __str__(self):
  1041          return self.common_name
  1042  
  1043      def _get_certificate(self):
  1044          try:
  1045              return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate)
  1046          except crypto.Error as e:
  1047              raise SuspiciousOperation(e)
  1048  
  1049      def save(self, *args, **kwargs):
  1050          certificate = self._get_certificate()
  1051          if not self.common_name:
  1052              self.common_name = certificate.get_subject().CN
  1053          if not self.expires:
  1054              # convert openssl's expiry date format to Django's DateTimeField format
  1055              self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ')
  1056          return super(Certificate, self).save(*args, **kwargs)
  1057  
  1058  
  1059  @python_2_unicode_compatible
  1060  class Key(UuidAuditedModel):
  1061      """An SSH public key."""
  1062  
  1063      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1064      id = models.CharField(max_length=128)
  1065      public = models.TextField(unique=True, validators=[validate_base64])
  1066      fingerprint = models.CharField(max_length=128)
  1067  
  1068      class Meta:
  1069          verbose_name = 'SSH Key'
  1070          unique_together = (('owner', 'fingerprint'))
  1071  
  1072      def __str__(self):
  1073          return "{}...{}".format(self.public[:18], self.public[-31:])
  1074  
  1075      def save(self, *args, **kwargs):
  1076          self.fingerprint = fingerprint(self.public)
  1077          return super(Key, self).save(*args, **kwargs)
  1078  
  1079  
  1080  # define update/delete callbacks for synchronizing
  1081  # models with the configuration management backend
  1082  
  1083  def _log_build_created(**kwargs):
  1084      if kwargs.get('created'):
  1085          build = kwargs['instance']
  1086          # log only to the controller; this event will be logged in the release summary
  1087          logger.info("{}: build {} created".format(build.app, build))
  1088  
  1089  
  1090  def _log_release_created(**kwargs):
  1091      if kwargs.get('created'):
  1092          release = kwargs['instance']
  1093          # log only to the controller; this event will be logged in the release summary
  1094          logger.info("{}: release {} created".format(release.app, release))
  1095          # append release lifecycle logs to the app
  1096          release.app.log(release.summary)
  1097  
  1098  
  1099  def _log_config_updated(**kwargs):
  1100      config = kwargs['instance']
  1101      # log only to the controller; this event will be logged in the release summary
  1102      logger.info("{}: config {} updated".format(config.app, config))
  1103  
  1104  
  1105  def _log_domain_added(**kwargs):
  1106      if kwargs.get('created'):
  1107          domain = kwargs['instance']
  1108          msg = "domain {} added".format(domain)
  1109          log_event(domain.app, msg)
  1110  
  1111  
  1112  def _log_domain_removed(**kwargs):
  1113      domain = kwargs['instance']
  1114      msg = "domain {} removed".format(domain)
  1115      log_event(domain.app, msg)
  1116  
  1117  
  1118  def _log_cert_added(**kwargs):
  1119      if kwargs.get('created'):
  1120          cert = kwargs['instance']
  1121          logger.info("cert {} added".format(cert))
  1122  
  1123  
  1124  def _log_cert_removed(**kwargs):
  1125      cert = kwargs['instance']
  1126      logger.info("cert {} removed".format(cert))
  1127  
  1128  
  1129  def _etcd_publish_key(**kwargs):
  1130      key = kwargs['instance']
  1131      _etcd_client.write('/deis/builder/users/{}/{}'.format(
  1132          key.owner.username, fingerprint(key.public)), key.public)
  1133  
  1134  
  1135  def _etcd_purge_key(**kwargs):
  1136      key = kwargs['instance']
  1137      try:
  1138          _etcd_client.delete('/deis/builder/users/{}/{}'.format(
  1139              key.owner.username, fingerprint(key.public)))
  1140      except KeyError:
  1141          pass
  1142  
  1143  
  1144  def _etcd_purge_user(**kwargs):
  1145      username = kwargs['instance'].username
  1146      try:
  1147          _etcd_client.delete(
  1148              '/deis/builder/users/{}'.format(username), dir=True, recursive=True)
  1149      except KeyError:
  1150          # If _etcd_publish_key() wasn't called, there is no user dir to delete.
  1151          pass
  1152  
  1153  
  1154  def _etcd_publish_app(**kwargs):
  1155      appname = kwargs['instance']
  1156      try:
  1157          _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
  1158      except KeyError:
  1159          # Ignore error when the directory already exists.
  1160          pass
  1161  
  1162  
  1163  def _etcd_purge_app(**kwargs):
  1164      appname = kwargs['instance']
  1165      try:
  1166          _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
  1167      except KeyError:
  1168          pass
  1169  
  1170  
  1171  def _etcd_publish_cert(**kwargs):
  1172      cert = kwargs['instance']
  1173      _etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate)
  1174      _etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key)
  1175  
  1176  
  1177  def _etcd_purge_cert(**kwargs):
  1178      cert = kwargs['instance']
  1179      try:
  1180          _etcd_client.delete('/deis/certs/{}'.format(cert),
  1181                              prevExist=True, dir=True, recursive=True)
  1182      except KeyError:
  1183          pass
  1184  
  1185  
  1186  def _etcd_publish_config(**kwargs):
  1187      config = kwargs['instance']
  1188      # we purge all existing config when adding the newest instance. This is because
  1189      # deis config:unset would remove an existing value, but not delete the
  1190      # old config object
  1191      try:
  1192          _etcd_client.delete('/deis/config/{}'.format(config.app),
  1193                              prevExist=True, dir=True, recursive=True)
  1194      except KeyError:
  1195          pass
  1196      for k, v in config.values.iteritems():
  1197          _etcd_client.write(
  1198              '/deis/config/{}/{}'.format(
  1199                  config.app,
  1200                  unicode(k).encode('utf-8').lower()),
  1201              unicode(v).encode('utf-8'))
  1202  
  1203  
  1204  def _etcd_purge_config(**kwargs):
  1205      config = kwargs['instance']
  1206      try:
  1207          _etcd_client.delete('/deis/config/{}'.format(config.app),
  1208                              prevExist=True, dir=True, recursive=True)
  1209      except KeyError:
  1210          pass
  1211  
  1212  
  1213  def _etcd_publish_domains(**kwargs):
  1214      domain = kwargs['instance']
  1215      _etcd_client.write('/deis/domains/{}'.format(domain), domain.app)
  1216  
  1217  
  1218  def _etcd_purge_domains(**kwargs):
  1219      domain = kwargs['instance']
  1220      try:
  1221          _etcd_client.delete('/deis/domains/{}'.format(domain),
  1222                              prevExist=True, dir=True, recursive=True)
  1223      except KeyError:
  1224          pass
  1225  
  1226  
  1227  # Log significant app-related events
  1228  post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
  1229  post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
  1230  post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
  1231  post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
  1232  post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log')
  1233  post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
  1234  post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log')
  1235  
  1236  
  1237  # automatically generate a new token on creation
  1238  @receiver(post_save, sender=get_user_model())
  1239  def create_auth_token(sender, instance=None, created=False, **kwargs):
  1240      if created:
  1241          Token.objects.create(user=instance)
  1242  
  1243  
  1244  _etcd_client = get_etcd_client()
  1245  
  1246  
  1247  if _etcd_client:
  1248      post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
  1249      post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
  1250      post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
  1251      post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
  1252      post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
  1253      post_save.connect(_etcd_publish_app, sender=App, dispatch_uid='api.models')
  1254      post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
  1255      post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models')
  1256      post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')
  1257      post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models')
  1258      post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')