github.com/rochacon/deis@v1.0.2-0.20150903015341-6839b592a1ff/controller/api/models.py (about)

     1  # -*- coding: utf-8 -*-
     2  
     3  """
     4  Data models for the Deis API.
     5  """
     6  
     7  from __future__ import unicode_literals
     8  import base64
     9  from datetime import datetime
    10  import etcd
    11  import importlib
    12  import logging
    13  import os
    14  import re
    15  import subprocess
    16  import time
    17  from threading import Thread
    18  
    19  from django.conf import settings
    20  from django.contrib.auth import get_user_model
    21  from django.core.exceptions import ValidationError, SuspiciousOperation
    22  from django.db import models
    23  from django.db.models import Count
    24  from django.db.models import Max
    25  from django.db.models.signals import post_delete, post_save
    26  from django.dispatch import receiver
    27  from django.utils.encoding import python_2_unicode_compatible
    28  from docker.utils import utils as dockerutils
    29  from json_field.fields import JSONField
    30  from OpenSSL import crypto
    31  import requests
    32  from rest_framework.authtoken.models import Token
    33  
    34  from api import fields, utils, exceptions
    35  from registry import publish_release
    36  from utils import dict_diff, fingerprint
    37  
    38  
    39  logger = logging.getLogger(__name__)
    40  
    41  
    42  def close_db_connections(func, *args, **kwargs):
    43      """
    44      Decorator to explicitly close db connections during threaded execution
    45  
    46      Note this is necessary to work around:
    47      https://code.djangoproject.com/ticket/22420
    48      """
    49      def _close_db_connections(*args, **kwargs):
    50          ret = None
    51          try:
    52              ret = func(*args, **kwargs)
    53          finally:
    54              from django.db import connections
    55              for conn in connections.all():
    56                  conn.close()
    57          return ret
    58      return _close_db_connections
    59  
    60  
    61  def log_event(app, msg, level=logging.INFO):
    62      # controller needs to know which app this log comes from
    63      logger.log(level, "{}: {}".format(app.id, msg))
    64      app.log(msg)
    65  
    66  
    67  def validate_base64(value):
    68      """Check that value contains only valid base64 characters."""
    69      try:
    70          base64.b64decode(value.split()[1])
    71      except Exception as e:
    72          raise ValidationError(e)
    73  
    74  
    75  def validate_id_is_docker_compatible(value):
    76      """
    77      Check that the ID follows docker's image name constraints
    78      """
    79      match = re.match(r'^[a-z0-9-]+$', value)
    80      if not match:
    81          raise ValidationError("App IDs can only contain [a-z0-9-].")
    82  
    83  
    84  def validate_app_structure(value):
    85      """Error if the dict values aren't ints >= 0."""
    86      try:
    87          if any(int(v) < 0 for v in value.viewvalues()):
    88              raise ValueError("Must be greater than or equal to zero")
    89      except ValueError, err:
    90          raise ValidationError(err)
    91  
    92  
    93  def validate_reserved_names(value):
    94      """A value cannot use some reserved names."""
    95      if value in settings.DEIS_RESERVED_NAMES:
    96          raise ValidationError('{} is a reserved name.'.format(value))
    97  
    98  
    99  def validate_comma_separated(value):
   100      """Error if the value doesn't look like a list of hostnames or IP addresses
   101      separated by commas.
   102      """
   103      if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
   104          raise ValidationError(
   105              "{} should be a comma-separated list".format(value))
   106  
   107  
   108  def validate_domain(value):
   109      """Error if the domain contains unexpected characters."""
   110      if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
   111          raise ValidationError('"{}" contains unexpected characters'.format(value))
   112  
   113  
   114  def validate_certificate(value):
   115      try:
   116          crypto.load_certificate(crypto.FILETYPE_PEM, value)
   117      except crypto.Error as e:
   118          raise ValidationError('Could not load certificate: {}'.format(e))
   119  
   120  
   121  def get_etcd_client():
   122      if not hasattr(get_etcd_client, "client"):
   123          # wire up etcd publishing if we can connect
   124          try:
   125              get_etcd_client.client = etcd.Client(
   126                  host=settings.ETCD_HOST,
   127                  port=int(settings.ETCD_PORT))
   128              get_etcd_client.client.get('/deis')
   129          except etcd.EtcdException:
   130              logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
   131              get_etcd_client.client = None
   132      return get_etcd_client.client
   133  
   134  
   135  class AuditedModel(models.Model):
   136      """Add created and updated fields to a model."""
   137  
   138      created = models.DateTimeField(auto_now_add=True)
   139      updated = models.DateTimeField(auto_now=True)
   140  
   141      class Meta:
   142          """Mark :class:`AuditedModel` as abstract."""
   143          abstract = True
   144  
   145  
   146  def select_app_name():
   147      """Select a unique randomly generated app name"""
   148      name = utils.generate_app_name()
   149  
   150      while App.objects.filter(id=name).exists():
   151          name = utils.generate_app_name()
   152  
   153      return name
   154  
   155  
   156  class UuidAuditedModel(AuditedModel):
   157      """Add a UUID primary key to an :class:`AuditedModel`."""
   158  
   159      uuid = fields.UuidField('UUID', primary_key=True)
   160  
   161      class Meta:
   162          """Mark :class:`UuidAuditedModel` as abstract."""
   163          abstract = True
   164  
   165  
   166  @python_2_unicode_compatible
   167  class App(UuidAuditedModel):
   168      """
   169      Application used to service requests on behalf of end-users
   170      """
   171  
   172      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   173      id = models.SlugField(max_length=64, unique=True, default=select_app_name,
   174                            validators=[validate_id_is_docker_compatible,
   175                                        validate_reserved_names])
   176      structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
   177  
   178      class Meta:
   179          permissions = (('use_app', 'Can use app'),)
   180  
   181      @property
   182      def _scheduler(self):
   183          mod = importlib.import_module(settings.SCHEDULER_MODULE)
   184          return mod.SchedulerClient(settings.SCHEDULER_TARGET,
   185                                     settings.SCHEDULER_AUTH,
   186                                     settings.SCHEDULER_OPTIONS,
   187                                     settings.SSH_PRIVATE_KEY)
   188  
   189      def __str__(self):
   190          return self.id
   191  
   192      @property
   193      def url(self):
   194          return self.id + '.' + settings.DEIS_DOMAIN
   195  
   196      def _get_job_id(self, container_type):
   197          app = self.id
   198          release = self.release_set.latest()
   199          version = "v{}".format(release.version)
   200          job_id = "{app}_{version}.{container_type}".format(**locals())
   201          return job_id
   202  
   203      def _get_command(self, container_type):
   204          try:
   205              # if this is not procfile-based app, ensure they cannot break out
   206              # and run arbitrary commands on the host
   207              # FIXME: remove slugrunner's hardcoded entrypoint
   208              release = self.release_set.latest()
   209              if release.build.dockerfile or not release.build.sha:
   210                  return "bash -c '{}'".format(release.build.procfile[container_type])
   211              else:
   212                  return 'start {}'.format(container_type)
   213          # if the key is not present or if a parent attribute is None
   214          except (KeyError, TypeError, AttributeError):
   215              # handle special case for Dockerfile deployments
   216              return '' if container_type == 'cmd' else 'start {}'.format(container_type)
   217  
   218      def log(self, message):
   219          """Logs a message to the application's log file.
   220  
   221          This is a workaround for how Django interacts with Python's logging module. Each app
   222          needs its own FileHandler instance so it can write to its own log file. That won't work in
   223          Django's case because logging is set up before you run the server and it disables all
   224          existing logging configurations.
   225          """
   226          with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f:
   227              msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT),
   228                                                message)
   229              f.write(msg.encode('utf-8'))
   230  
   231      def create(self, *args, **kwargs):
   232          """Create a new application with an initial config and release"""
   233          config = Config.objects.create(owner=self.owner, app=self)
   234          Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
   235  
   236      def delete(self, *args, **kwargs):
   237          """Delete this application including all containers"""
   238          try:
   239              # attempt to remove containers from the scheduler
   240              self._destroy_containers([c for c in self.container_set.exclude(type='run')])
   241          except RuntimeError:
   242              pass
   243          self._clean_app_logs()
   244          return super(App, self).delete(*args, **kwargs)
   245  
   246      def restart(self, **kwargs):
   247          to_restart = self.container_set.all()
   248          if kwargs.get('type'):
   249              to_restart = to_restart.filter(type=kwargs.get('type'))
   250          if kwargs.get('num'):
   251              to_restart = to_restart.filter(num=kwargs.get('num'))
   252          self._restart_containers(to_restart)
   253          return to_restart
   254  
   255      def _clean_app_logs(self):
   256          """Delete application logs stored by the logger component"""
   257          path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
   258          if os.path.exists(path):
   259              os.remove(path)
   260  
   261      def scale(self, user, structure):  # noqa
   262          """Scale containers up or down to match requested structure."""
   263          if self.release_set.latest().build is None:
   264              raise EnvironmentError('No build associated with this release')
   265          requested_structure = structure.copy()
   266          release = self.release_set.latest()
   267          # test for available process types
   268          available_process_types = release.build.procfile or {}
   269          for container_type in requested_structure:
   270              if container_type == 'cmd':
   271                  continue  # allow docker cmd types in case we don't have the image source
   272              if container_type not in available_process_types:
   273                  raise EnvironmentError(
   274                      'Container type {} does not exist in application'.format(container_type))
   275          msg = '{} scaled containers '.format(user.username) + ' '.join(
   276              "{}={}".format(k, v) for k, v in requested_structure.items())
   277          log_event(self, msg)
   278          # iterate and scale by container type (web, worker, etc)
   279          changed = False
   280          to_add, to_remove = [], []
   281          scale_types = {}
   282  
   283          # iterate on a copy of the container_type keys
   284          for container_type in requested_structure.keys():
   285              containers = list(self.container_set.filter(type=container_type).order_by('created'))
   286              # increment new container nums off the most recent container
   287              results = self.container_set.filter(type=container_type).aggregate(Max('num'))
   288              container_num = (results.get('num__max') or 0) + 1
   289              requested = requested_structure.pop(container_type)
   290              diff = requested - len(containers)
   291              if diff == 0:
   292                  continue
   293              changed = True
   294              scale_types[container_type] = requested
   295              while diff < 0:
   296                  c = containers.pop()
   297                  to_remove.append(c)
   298                  diff += 1
   299              while diff > 0:
   300                  # create a database record
   301                  c = Container.objects.create(owner=self.owner,
   302                                               app=self,
   303                                               release=release,
   304                                               type=container_type,
   305                                               num=container_num)
   306                  to_add.append(c)
   307                  container_num += 1
   308                  diff -= 1
   309  
   310          if changed:
   311              if "scale" in dir(self._scheduler):
   312                  self._scale_containers(scale_types, to_remove)
   313              else:
   314                  if to_add:
   315                      self._start_containers(to_add)
   316                  if to_remove:
   317                      self._destroy_containers(to_remove)
   318          # save new structure to the database
   319          vals = self.container_set.exclude(type='run').values(
   320              'type').annotate(Count('pk')).order_by()
   321          new_structure = structure.copy()
   322          new_structure.update({v['type']: v['pk__count'] for v in vals})
   323          self.structure = new_structure
   324          self.save()
   325          return changed
   326  
   327      def _scale_containers(self, scale_types, to_remove):
   328          release = self.release_set.latest()
   329          for scale_type in scale_types:
   330              image = release.image
   331              version = "v{}".format(release.version)
   332              kwargs = {'memory': release.config.memory,
   333                        'cpu': release.config.cpu,
   334                        'tags': release.config.tags,
   335                        'version': version,
   336                        'aname': self.id,
   337                        'num': scale_types[scale_type]}
   338              job_id = self._get_job_id(scale_type)
   339              command = self._get_command(scale_type)
   340              try:
   341                  self._scheduler.scale(
   342                      name=job_id,
   343                      image=image,
   344                      command=command,
   345                      **kwargs)
   346              except Exception as e:
   347                  err = '{} (scale): {}'.format(job_id, e)
   348                  log_event(self, err, logging.ERROR)
   349                  raise
   350          [c.delete() for c in to_remove]
   351  
   352      def _start_containers(self, to_add):
   353          """Creates and starts containers via the scheduler"""
   354          if not to_add:
   355              return
   356          create_threads = [Thread(target=c.create) for c in to_add]
   357          start_threads = [Thread(target=c.start) for c in to_add]
   358          [t.start() for t in create_threads]
   359          [t.join() for t in create_threads]
   360          if any(c.state != 'created' for c in to_add):
   361              err = 'aborting, failed to create some containers'
   362              log_event(self, err, logging.ERROR)
   363              self._destroy_containers(to_add)
   364              raise RuntimeError(err)
   365          [t.start() for t in start_threads]
   366          [t.join() for t in start_threads]
   367          if set([c.state for c in to_add]) != set(['up']):
   368              err = 'warning, some containers failed to start'
   369              log_event(self, err, logging.WARNING)
   370          # if the user specified a health check, try checking to see if it's running
   371          try:
   372              config = self.config_set.latest()
   373              if 'HEALTHCHECK_URL' in config.values.keys():
   374                  self._healthcheck(to_add, config.values)
   375          except Config.DoesNotExist:
   376              pass
   377  
   378      def _healthcheck(self, containers, config):
   379          # if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
   380          intervals = [1.0, 0.1, 0.5, 1.0]
   381          # HACK (bacongobbler): we need to wait until publisher has a chance to publish each
   382          # service to etcd, which can take up to 20 seconds.
   383          time.sleep(20)
   384          for i in xrange(len(intervals)):
   385              delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
   386              try:
   387                  # sleep until the initial timeout is over
   388                  if delay > 0:
   389                      time.sleep(delay * intervals[i])
   390                  self._do_healthcheck(containers, config)
   391                  break
   392              except exceptions.HealthcheckException as e:
   393                  try:
   394                      next_delay = delay * intervals[i+1]
   395                      msg = "{}; trying again in {} seconds".format(e, next_delay)
   396                      log_event(self, msg, logging.WARNING)
   397                  except IndexError:
   398                      log_event(self, e, logging.WARNING)
   399          else:
   400              self._destroy_containers(containers)
   401              msg = "aborting, app containers failed to respond to health check"
   402              log_event(self, msg, logging.ERROR)
   403              raise RuntimeError(msg)
   404  
   405      def _do_healthcheck(self, containers, config):
   406          path = config.get('HEALTHCHECK_URL', '/')
   407          timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
   408          if not _etcd_client:
   409              raise exceptions.HealthcheckException('no etcd client available')
   410          for container in containers:
   411              try:
   412                  key = "/deis/services/{self}/{container.job_id}".format(**locals())
   413                  url = "http://{}{}".format(_etcd_client.get(key).value, path)
   414                  response = requests.get(url, timeout=timeout)
   415                  if response.status_code != requests.codes.OK:
   416                      raise exceptions.HealthcheckException(
   417                          "app failed health check (got '{}', expected: '200')".format(
   418                              response.status_code))
   419              except (requests.Timeout, requests.ConnectionError, KeyError) as e:
   420                  raise exceptions.HealthcheckException(
   421                      'failed to connect to container ({})'.format(e))
   422  
   423      def _restart_containers(self, to_restart):
   424          """Restarts containers via the scheduler"""
   425          if not to_restart:
   426              return
   427          stop_threads = [Thread(target=c.stop) for c in to_restart]
   428          start_threads = [Thread(target=c.start) for c in to_restart]
   429          [t.start() for t in stop_threads]
   430          [t.join() for t in stop_threads]
   431          if any(c.state != 'created' for c in to_restart):
   432              err = 'warning, some containers failed to stop'
   433              log_event(self, err, logging.WARNING)
   434          [t.start() for t in start_threads]
   435          [t.join() for t in start_threads]
   436          if any(c.state != 'up' for c in to_restart):
   437              err = 'warning, some containers failed to start'
   438              log_event(self, err, logging.WARNING)
   439  
   440      def _destroy_containers(self, to_destroy):
   441          """Destroys containers via the scheduler"""
   442          if not to_destroy:
   443              return
   444          destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
   445          [t.start() for t in destroy_threads]
   446          [t.join() for t in destroy_threads]
   447          [c.delete() for c in to_destroy if c.state == 'destroyed']
   448          if any(c.state != 'destroyed' for c in to_destroy):
   449              err = 'aborting, failed to destroy some containers'
   450              log_event(self, err, logging.ERROR)
   451              raise RuntimeError(err)
   452  
   453      def deploy(self, user, release):
   454          """Deploy a new release to this application"""
   455          existing = self.container_set.exclude(type='run')
   456          new = []
   457          scale_types = set()
   458          for e in existing:
   459              n = e.clone(release)
   460              n.save()
   461              new.append(n)
   462              scale_types.add(e.type)
   463  
   464          if new and "deploy" in dir(self._scheduler):
   465              self._deploy_app(scale_types, release, existing)
   466          else:
   467              self._start_containers(new)
   468  
   469              # destroy old containers
   470              if existing:
   471                  self._destroy_containers(existing)
   472  
   473          # perform default scaling if necessary
   474          if self.structure == {} and release.build is not None:
   475              self._default_scale(user, release)
   476  
   477      def _deploy_app(self, scale_types, release, existing):
   478          for scale_type in scale_types:
   479              image = release.image
   480              version = "v{}".format(release.version)
   481              kwargs = {'memory': release.config.memory,
   482                        'cpu': release.config.cpu,
   483                        'tags': release.config.tags,
   484                        'aname': self.id,
   485                        'num': 0,
   486                        'version': version}
   487              job_id = self._get_job_id(scale_type)
   488              command = self._get_command(scale_type)
   489              try:
   490                  self._scheduler.deploy(
   491                      name=job_id,
   492                      image=image,
   493                      command=command,
   494                      **kwargs)
   495              except Exception as e:
   496                  err = '{} (deploy): {}'.format(job_id, e)
   497                  log_event(self, err, logging.ERROR)
   498                  raise
   499          [c.delete() for c in existing]
   500  
   501      def _default_scale(self, user, release):
   502          """Scale to default structure based on release type"""
   503          # if there is no SHA, assume a docker image is being promoted
   504          if not release.build.sha:
   505              structure = {'cmd': 1}
   506  
   507          # if a dockerfile exists without a procfile, assume docker workflow
   508          elif release.build.dockerfile and not release.build.procfile:
   509              structure = {'cmd': 1}
   510  
   511          # if a procfile exists without a web entry, assume docker workflow
   512          elif release.build.procfile and 'web' not in release.build.procfile:
   513              structure = {'cmd': 1}
   514  
   515          # default to heroku workflow
   516          else:
   517              structure = {'web': 1}
   518  
   519          self.scale(user, structure)
   520  
   521      def logs(self, log_lines=str(settings.LOG_LINES)):
   522          """Return aggregated log data for this application."""
   523          path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
   524          if not os.path.exists(path):
   525              raise EnvironmentError('Could not locate logs')
   526          data = subprocess.check_output(['tail', '-n', log_lines, path])
   527          return data
   528  
   529      def run(self, user, command):
   530          """Run a one-off command in an ephemeral app container."""
   531          # FIXME: remove the need for SSH private keys by using
   532          # a scheduler that supports one-off admin tasks natively
   533          if not settings.SSH_PRIVATE_KEY:
   534              raise EnvironmentError('Support for admin commands is not configured')
   535          if self.release_set.latest().build is None:
   536              raise EnvironmentError('No build associated with this release to run this command')
   537          # TODO: add support for interactive shell
   538          msg = "{} runs '{}'".format(user.username, command)
   539          log_event(self, msg)
   540          c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
   541  
   542          # create database record for run process
   543          c = Container.objects.create(owner=self.owner,
   544                                       app=self,
   545                                       release=self.release_set.latest(),
   546                                       type='run',
   547                                       num=c_num)
   548          image = c.release.image
   549  
   550          # check for backwards compatibility
   551          def _has_hostname(image):
   552              repo, tag = dockerutils.parse_repository_tag(image)
   553              return True if '/' in repo and '.' in repo.split('/')[0] else False
   554  
   555          if not _has_hostname(image):
   556              image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
   557                                        settings.REGISTRY_PORT,
   558                                        image)
   559          # SECURITY: shell-escape user input
   560          escaped_command = command.replace("'", "'\\''")
   561          return c.run(escaped_command)
   562  
   563  
   564  @python_2_unicode_compatible
   565  class Container(UuidAuditedModel):
   566      """
   567      Docker container used to securely host an application process.
   568      """
   569  
   570      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   571      app = models.ForeignKey('App')
   572      release = models.ForeignKey('Release')
   573      type = models.CharField(max_length=128, blank=False)
   574      num = models.PositiveIntegerField()
   575  
   576      @property
   577      def _scheduler(self):
   578          return self.app._scheduler
   579  
   580      @property
   581      def state(self):
   582          return self._scheduler.state(self.job_id).name
   583  
   584      def short_name(self):
   585          return "{}.{}.{}".format(self.app.id, self.type, self.num)
   586      short_name.short_description = 'Name'
   587  
   588      def __str__(self):
   589          return self.short_name()
   590  
   591      class Meta:
   592          get_latest_by = '-created'
   593          ordering = ['created']
   594  
   595      @property
   596      def job_id(self):
   597          version = "v{}".format(self.release.version)
   598          return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals())
   599  
   600      def _get_command(self):
   601          try:
   602              # if this is not procfile-based app, ensure they cannot break out
   603              # and run arbitrary commands on the host
   604              # FIXME: remove slugrunner's hardcoded entrypoint
   605              if self.release.build.dockerfile or not self.release.build.sha:
   606                  return "bash -c '{}'".format(self.release.build.procfile[self.type])
   607              else:
   608                  return 'start {}'.format(self.type)
   609          # if the key is not present or if a parent attribute is None
   610          except (KeyError, TypeError, AttributeError):
   611              # handle special case for Dockerfile deployments
   612              return '' if self.type == 'cmd' else 'start {}'.format(self.type)
   613  
   614      _command = property(_get_command)
   615  
   616      def clone(self, release):
   617          c = Container.objects.create(owner=self.owner,
   618                                       app=self.app,
   619                                       release=release,
   620                                       type=self.type,
   621                                       num=self.num)
   622          return c
   623  
   624      @close_db_connections
   625      def create(self):
   626          image = self.release.image
   627          kwargs = {'memory': self.release.config.memory,
   628                    'cpu': self.release.config.cpu,
   629                    'tags': self.release.config.tags}
   630          try:
   631              self._scheduler.create(
   632                  name=self.job_id,
   633                  image=image,
   634                  command=self._command,
   635                  **kwargs)
   636          except Exception as e:
   637              err = '{} (create): {}'.format(self.job_id, e)
   638              log_event(self.app, err, logging.ERROR)
   639              raise
   640  
   641      @close_db_connections
   642      def start(self):
   643          try:
   644              self._scheduler.start(self.job_id)
   645          except Exception as e:
   646              err = '{} (start): {}'.format(self.job_id, e)
   647              log_event(self.app, err, logging.WARNING)
   648              raise
   649  
   650      @close_db_connections
   651      def stop(self):
   652          try:
   653              self._scheduler.stop(self.job_id)
   654          except Exception as e:
   655              err = '{} (stop): {}'.format(self.job_id, e)
   656              log_event(self.app, err, logging.ERROR)
   657              raise
   658  
   659      @close_db_connections
   660      def destroy(self):
   661          try:
   662              self._scheduler.destroy(self.job_id)
   663          except Exception as e:
   664              err = '{} (destroy): {}'.format(self.job_id, e)
   665              log_event(self.app, err, logging.ERROR)
   666              raise
   667  
   668      def run(self, command):
   669          """Run a one-off command"""
   670          if self.release.build is None:
   671              raise EnvironmentError('No build associated with this release '
   672                                     'to run this command')
   673          image = self.release.image
   674          entrypoint = '/bin/bash'
   675          # if this is a procfile-based app, switch the entrypoint to slugrunner's default
   676          # FIXME: remove slugrunner's hardcoded entrypoint
   677          if self.release.build.procfile and \
   678             self.release.build.sha and not \
   679             self.release.build.dockerfile:
   680              entrypoint = '/runner/init'
   681              command = "'{}'".format(command)
   682          else:
   683              command = "-c '{}'".format(command)
   684          try:
   685              rc, output = self._scheduler.run(self.job_id, image, entrypoint, command)
   686              return rc, output
   687          except Exception as e:
   688              err = '{} (run): {}'.format(self.job_id, e)
   689              log_event(self.app, err, logging.ERROR)
   690              raise
   691  
   692  
   693  @python_2_unicode_compatible
   694  class Push(UuidAuditedModel):
   695      """
   696      Instance of a push used to trigger an application build
   697      """
   698      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   699      app = models.ForeignKey('App')
   700      sha = models.CharField(max_length=40)
   701  
   702      fingerprint = models.CharField(max_length=255)
   703      receive_user = models.CharField(max_length=255)
   704      receive_repo = models.CharField(max_length=255)
   705  
   706      ssh_connection = models.CharField(max_length=255)
   707      ssh_original_command = models.CharField(max_length=255)
   708  
   709      class Meta:
   710          get_latest_by = 'created'
   711          ordering = ['-created']
   712          unique_together = (('app', 'uuid'),)
   713  
   714      def __str__(self):
   715          return "{0}-{1}".format(self.app.id, self.sha[:7])
   716  
   717  
   718  @python_2_unicode_compatible
   719  class Build(UuidAuditedModel):
   720      """
   721      Instance of a software build used by runtime nodes
   722      """
   723  
   724      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   725      app = models.ForeignKey('App')
   726      image = models.CharField(max_length=256)
   727  
   728      # optional fields populated by builder
   729      sha = models.CharField(max_length=40, blank=True)
   730      procfile = JSONField(default={}, blank=True)
   731      dockerfile = models.TextField(blank=True)
   732  
   733      class Meta:
   734          get_latest_by = 'created'
   735          ordering = ['-created']
   736          unique_together = (('app', 'uuid'),)
   737  
   738      def create(self, user, *args, **kwargs):
   739          latest_release = self.app.release_set.latest()
   740          source_version = 'latest'
   741          if self.sha:
   742              source_version = 'git-{}'.format(self.sha)
   743          new_release = latest_release.new(user,
   744                                           build=self,
   745                                           config=latest_release.config,
   746                                           source_version=source_version)
   747          try:
   748              self.app.deploy(user, new_release)
   749              return new_release
   750          except RuntimeError:
   751              new_release.delete()
   752              raise
   753  
   754      def save(self, **kwargs):
   755          try:
   756              previous_build = self.app.build_set.latest()
   757              to_destroy = []
   758              for proctype in previous_build.procfile:
   759                  if proctype not in self.procfile:
   760                      for c in self.app.container_set.filter(type=proctype):
   761                          to_destroy.append(c)
   762              self.app._destroy_containers(to_destroy)
   763          except Build.DoesNotExist:
   764              pass
   765          return super(Build, self).save(**kwargs)
   766  
   767      def __str__(self):
   768          return "{0}-{1}".format(self.app.id, self.uuid[:7])
   769  
   770  
   771  @python_2_unicode_compatible
   772  class Config(UuidAuditedModel):
   773      """
   774      Set of configuration values applied as environment variables
   775      during runtime execution of the Application.
   776      """
   777  
   778      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   779      app = models.ForeignKey('App')
   780      values = JSONField(default={}, blank=True)
   781      memory = JSONField(default={}, blank=True)
   782      cpu = JSONField(default={}, blank=True)
   783      tags = JSONField(default={}, blank=True)
   784  
   785      class Meta:
   786          get_latest_by = 'created'
   787          ordering = ['-created']
   788          unique_together = (('app', 'uuid'),)
   789  
   790      def __str__(self):
   791          return "{}-{}".format(self.app.id, self.uuid[:7])
   792  
   793      def save(self, **kwargs):
   794          """merge the old config with the new"""
   795          try:
   796              previous_config = self.app.config_set.latest()
   797              for attr in ['cpu', 'memory', 'tags', 'values']:
   798                  # Guard against migrations from older apps without fixes to
   799                  # JSONField encoding.
   800                  try:
   801                      data = getattr(previous_config, attr).copy()
   802                  except AttributeError:
   803                      data = {}
   804                  try:
   805                      new_data = getattr(self, attr).copy()
   806                  except AttributeError:
   807                      new_data = {}
   808                  data.update(new_data)
   809                  # remove config keys if we provided a null value
   810                  [data.pop(k) for k, v in new_data.viewitems() if v is None]
   811                  setattr(self, attr, data)
   812          except Config.DoesNotExist:
   813              pass
   814          return super(Config, self).save(**kwargs)
   815  
   816  
   817  @python_2_unicode_compatible
   818  class Release(UuidAuditedModel):
   819      """
   820      Software release deployed by the application platform
   821  
   822      Releases contain a :class:`Build` and a :class:`Config`.
   823      """
   824  
   825      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
   826      app = models.ForeignKey('App')
   827      version = models.PositiveIntegerField()
   828      summary = models.TextField(blank=True, null=True)
   829  
   830      config = models.ForeignKey('Config')
   831      build = models.ForeignKey('Build', null=True)
   832  
   833      class Meta:
   834          get_latest_by = 'created'
   835          ordering = ['-created']
   836          unique_together = (('app', 'version'),)
   837  
   838      def __str__(self):
   839          return "{0}-v{1}".format(self.app.id, self.version)
   840  
   841      @property
   842      def image(self):
   843          return '{}:v{}'.format(self.app.id, str(self.version))
   844  
   845      def new(self, user, config, build, summary=None, source_version='latest'):
   846          """
   847          Create a new application release using the provided Build and Config
   848          on behalf of a user.
   849  
   850          Releases start at v1 and auto-increment.
   851          """
   852          # construct fully-qualified target image
   853          new_version = self.version + 1
   854          # create new release and auto-increment version
   855          release = Release.objects.create(
   856              owner=user, app=self.app, config=config,
   857              build=build, version=new_version, summary=summary)
   858          try:
   859              release.publish()
   860          except EnvironmentError as e:
   861              # If we cannot publish this app, just log and carry on
   862              log_event(self.app, e)
   863              pass
   864          return release
   865  
   866      def publish(self, source_version='latest'):
   867          if self.build is None:
   868              raise EnvironmentError('No build associated with this release to publish')
   869          source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
   870          source_image = '{}:{}'.format(self.build.image, source_tag)
   871          # IOW, this image did not come from the builder
   872          # FIXME: remove check for mock registry module
   873          if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE:
   874              # we assume that the image is not present on our registry,
   875              # so shell out a task to pull in the repository
   876              data = {
   877                  'src': self.build.image
   878              }
   879              requests.post(
   880                  '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL,
   881                                                      self.app.id),
   882                  data=data,
   883              )
   884              # update the source image to the repository we just imported
   885              source_image = self.app.id
   886              # if the image imported had a tag specified, use that tag as the source
   887              if ':' in self.build.image:
   888                  if '/' not in self.build.image[self.build.image.rfind(':') + 1:]:
   889                      source_image += self.build.image[self.build.image.rfind(':'):]
   890          publish_release(source_image,
   891                          self.config.values,
   892                          self.image)
   893  
   894      def previous(self):
   895          """
   896          Return the previous Release to this one.
   897  
   898          :return: the previous :class:`Release`, or None
   899          """
   900          releases = self.app.release_set
   901          if self.pk:
   902              releases = releases.exclude(pk=self.pk)
   903          try:
   904              # Get the Release previous to this one
   905              prev_release = releases.latest()
   906          except Release.DoesNotExist:
   907              prev_release = None
   908          return prev_release
   909  
   910      def rollback(self, user, version):
   911          if version < 1:
   912              raise EnvironmentError('version cannot be below 0')
   913          summary = "{} rolled back to v{}".format(user, version)
   914          prev = self.app.release_set.get(version=version)
   915          new_release = self.new(
   916              user,
   917              build=prev.build,
   918              config=prev.config,
   919              summary=summary,
   920              source_version='v{}'.format(version))
   921          try:
   922              self.app.deploy(user, new_release)
   923              return new_release
   924          except RuntimeError:
   925              new_release.delete()
   926              raise
   927  
   928      def save(self, *args, **kwargs):  # noqa
   929          if not self.summary:
   930              self.summary = ''
   931              prev_release = self.previous()
   932              # compare this build to the previous build
   933              old_build = prev_release.build if prev_release else None
   934              old_config = prev_release.config if prev_release else None
   935              # if the build changed, log it and who pushed it
   936              if self.version == 1:
   937                  self.summary += "{} created initial release".format(self.app.owner)
   938              elif self.build != old_build:
   939                  if self.build.sha:
   940                      self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
   941                  else:
   942                      self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
   943              # if the config data changed, log the dict diff
   944              if self.config != old_config:
   945                  dict1 = self.config.values
   946                  dict2 = old_config.values if old_config else {}
   947                  diff = dict_diff(dict1, dict2)
   948                  # try to be as succinct as possible
   949                  added = ', '.join(k for k in diff.get('added', {}))
   950                  added = 'added ' + added if added else ''
   951                  changed = ', '.join(k for k in diff.get('changed', {}))
   952                  changed = 'changed ' + changed if changed else ''
   953                  deleted = ', '.join(k for k in diff.get('deleted', {}))
   954                  deleted = 'deleted ' + deleted if deleted else ''
   955                  changes = ', '.join(i for i in (added, changed, deleted) if i)
   956                  if changes:
   957                      if self.summary:
   958                          self.summary += ' and '
   959                      self.summary += "{} {}".format(self.config.owner, changes)
   960                  # if the limits changed (memory or cpu), log the dict diff
   961                  changes = []
   962                  old_mem = old_config.memory if old_config else {}
   963                  diff = dict_diff(self.config.memory, old_mem)
   964                  if diff.get('added') or diff.get('changed') or diff.get('deleted'):
   965                      changes.append('memory')
   966                  old_cpu = old_config.cpu if old_config else {}
   967                  diff = dict_diff(self.config.cpu, old_cpu)
   968                  if diff.get('added') or diff.get('changed') or diff.get('deleted'):
   969                      changes.append('cpu')
   970                  if changes:
   971                      changes = 'changed limits for '+', '.join(changes)
   972                      self.summary += "{} {}".format(self.config.owner, changes)
   973                  # if the tags changed, log the dict diff
   974                  changes = []
   975                  old_tags = old_config.tags if old_config else {}
   976                  diff = dict_diff(self.config.tags, old_tags)
   977                  # try to be as succinct as possible
   978                  added = ', '.join(k for k in diff.get('added', {}))
   979                  added = 'added tag ' + added if added else ''
   980                  changed = ', '.join(k for k in diff.get('changed', {}))
   981                  changed = 'changed tag ' + changed if changed else ''
   982                  deleted = ', '.join(k for k in diff.get('deleted', {}))
   983                  deleted = 'deleted tag ' + deleted if deleted else ''
   984                  changes = ', '.join(i for i in (added, changed, deleted) if i)
   985                  if changes:
   986                      if self.summary:
   987                          self.summary += ' and '
   988                      self.summary += "{} {}".format(self.config.owner, changes)
   989              if not self.summary:
   990                  if self.version == 1:
   991                      self.summary = "{} created the initial release".format(self.owner)
   992                  else:
   993                      self.summary = "{} changed nothing".format(self.owner)
   994          super(Release, self).save(*args, **kwargs)
   995  
   996  
   997  @python_2_unicode_compatible
   998  class Domain(AuditedModel):
   999      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1000      app = models.ForeignKey('App')
  1001      domain = models.TextField(blank=False, null=False, unique=True)
  1002  
  1003      def __str__(self):
  1004          return self.domain
  1005  
  1006  
  1007  @python_2_unicode_compatible
  1008  class Certificate(AuditedModel):
  1009      """
  1010      Public and private key pair used to secure application traffic at the router.
  1011      """
  1012      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1013      # there is no upper limit on the size of an x.509 certificate
  1014      certificate = models.TextField(validators=[validate_certificate])
  1015      key = models.TextField()
  1016      # X.509 certificates allow any string of information as the common name.
  1017      common_name = models.TextField(unique=True)
  1018      expires = models.DateTimeField()
  1019  
  1020      def __str__(self):
  1021          return self.common_name
  1022  
  1023      def _get_certificate(self):
  1024          try:
  1025              return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate)
  1026          except crypto.Error as e:
  1027              raise SuspiciousOperation(e)
  1028  
  1029      def save(self, *args, **kwargs):
  1030          certificate = self._get_certificate()
  1031          if not self.common_name:
  1032              self.common_name = certificate.get_subject().CN
  1033          if not self.expires:
  1034              # convert openssl's expiry date format to Django's DateTimeField format
  1035              self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ')
  1036          return super(Certificate, self).save(*args, **kwargs)
  1037  
  1038  
  1039  @python_2_unicode_compatible
  1040  class Key(UuidAuditedModel):
  1041      """An SSH public key."""
  1042  
  1043      owner = models.ForeignKey(settings.AUTH_USER_MODEL)
  1044      id = models.CharField(max_length=128)
  1045      public = models.TextField(unique=True, validators=[validate_base64])
  1046      fingerprint = models.CharField(max_length=128)
  1047  
  1048      class Meta:
  1049          verbose_name = 'SSH Key'
  1050          unique_together = (('owner', 'fingerprint'))
  1051  
  1052      def __str__(self):
  1053          return "{}...{}".format(self.public[:18], self.public[-31:])
  1054  
  1055      def save(self, *args, **kwargs):
  1056          self.fingerprint = fingerprint(self.public)
  1057          return super(Key, self).save(*args, **kwargs)
  1058  
  1059  
  1060  # define update/delete callbacks for synchronizing
  1061  # models with the configuration management backend
  1062  
  1063  def _log_build_created(**kwargs):
  1064      if kwargs.get('created'):
  1065          build = kwargs['instance']
  1066          # log only to the controller; this event will be logged in the release summary
  1067          logger.info("{}: build {} created".format(build.app, build))
  1068  
  1069  
  1070  def _log_release_created(**kwargs):
  1071      if kwargs.get('created'):
  1072          release = kwargs['instance']
  1073          # log only to the controller; this event will be logged in the release summary
  1074          logger.info("{}: release {} created".format(release.app, release))
  1075          # append release lifecycle logs to the app
  1076          release.app.log(release.summary)
  1077  
  1078  
  1079  def _log_config_updated(**kwargs):
  1080      config = kwargs['instance']
  1081      # log only to the controller; this event will be logged in the release summary
  1082      logger.info("{}: config {} updated".format(config.app, config))
  1083  
  1084  
  1085  def _log_domain_added(**kwargs):
  1086      domain = kwargs['instance']
  1087      msg = "domain {} added".format(domain)
  1088      log_event(domain.app, msg)
  1089  
  1090  
  1091  def _log_domain_removed(**kwargs):
  1092      domain = kwargs['instance']
  1093      msg = "domain {} removed".format(domain)
  1094      log_event(domain.app, msg)
  1095  
  1096  
  1097  def _log_cert_added(**kwargs):
  1098      cert = kwargs['instance']
  1099      logger.info("cert {} added".format(cert))
  1100  
  1101  
  1102  def _log_cert_removed(**kwargs):
  1103      cert = kwargs['instance']
  1104      logger.info("cert {} removed".format(cert))
  1105  
  1106  
  1107  def _etcd_publish_key(**kwargs):
  1108      key = kwargs['instance']
  1109      _etcd_client.write('/deis/builder/users/{}/{}'.format(
  1110          key.owner.username, fingerprint(key.public)), key.public)
  1111  
  1112  
  1113  def _etcd_purge_key(**kwargs):
  1114      key = kwargs['instance']
  1115      try:
  1116          _etcd_client.delete('/deis/builder/users/{}/{}'.format(
  1117              key.owner.username, fingerprint(key.public)))
  1118      except KeyError:
  1119          pass
  1120  
  1121  
  1122  def _etcd_purge_user(**kwargs):
  1123      username = kwargs['instance'].username
  1124      try:
  1125          _etcd_client.delete(
  1126              '/deis/builder/users/{}'.format(username), dir=True, recursive=True)
  1127      except KeyError:
  1128          # If _etcd_publish_key() wasn't called, there is no user dir to delete.
  1129          pass
  1130  
  1131  
  1132  def _etcd_create_app(**kwargs):
  1133      appname = kwargs['instance']
  1134      if kwargs['created']:
  1135          _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
  1136  
  1137  
  1138  def _etcd_purge_app(**kwargs):
  1139      appname = kwargs['instance']
  1140      try:
  1141          _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
  1142      except KeyError:
  1143          pass
  1144  
  1145  
  1146  def _etcd_publish_cert(**kwargs):
  1147      cert = kwargs['instance']
  1148      if kwargs['created']:
  1149          _etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate)
  1150          _etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key)
  1151  
  1152  
  1153  def _etcd_purge_cert(**kwargs):
  1154      cert = kwargs['instance']
  1155      try:
  1156          _etcd_client.delete('/deis/certs/{}'.format(cert),
  1157                              prevExist=True, dir=True, recursive=True)
  1158      except KeyError:
  1159          pass
  1160  
  1161  
  1162  def _etcd_publish_config(**kwargs):
  1163      config = kwargs['instance']
  1164      # we purge all existing config when adding the newest instance. This is because
  1165      # deis config:unset would remove an existing value, but not delete the
  1166      # old config object
  1167      try:
  1168          _etcd_client.delete('/deis/config/{}'.format(config.app),
  1169                              prevExist=True, dir=True, recursive=True)
  1170      except KeyError:
  1171          pass
  1172      if kwargs['created']:
  1173          for k, v in config.values.iteritems():
  1174              _etcd_client.write(
  1175                  '/deis/config/{}/{}'.format(
  1176                      config.app,
  1177                      unicode(k).encode('utf-8').lower()),
  1178                  unicode(v).encode('utf-8'))
  1179  
  1180  
  1181  def _etcd_purge_config(**kwargs):
  1182      config = kwargs['instance']
  1183      try:
  1184          _etcd_client.delete('/deis/config/{}'.format(config.app),
  1185                              prevExist=True, dir=True, recursive=True)
  1186      except KeyError:
  1187          pass
  1188  
  1189  
  1190  def _etcd_publish_domains(**kwargs):
  1191      domain = kwargs['instance']
  1192      if kwargs['created']:
  1193          _etcd_client.write('/deis/domains/{}'.format(domain), domain.app)
  1194  
  1195  
  1196  def _etcd_purge_domains(**kwargs):
  1197      domain = kwargs['instance']
  1198      try:
  1199          _etcd_client.delete('/deis/domains/{}'.format(domain),
  1200                              prevExist=True, dir=True, recursive=True)
  1201      except KeyError:
  1202          pass
  1203  
  1204  
  1205  # Log significant app-related events
  1206  post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
  1207  post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
  1208  post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
  1209  post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
  1210  post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log')
  1211  post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
  1212  post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log')
  1213  
  1214  
  1215  # automatically generate a new token on creation
  1216  @receiver(post_save, sender=get_user_model())
  1217  def create_auth_token(sender, instance=None, created=False, **kwargs):
  1218      if created:
  1219          Token.objects.create(user=instance)
  1220  
  1221  
  1222  _etcd_client = get_etcd_client()
  1223  
  1224  
  1225  if _etcd_client:
  1226      post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
  1227      post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
  1228      post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
  1229      post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
  1230      post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
  1231      post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models')
  1232      post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
  1233      post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models')
  1234      post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')
  1235      post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models')
  1236      post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')