github.com/amrnt/deis@v1.3.1/controller/api/models.py (about) 1 # -*- coding: utf-8 -*- 2 3 """ 4 Data models for the Deis API. 5 """ 6 7 from __future__ import unicode_literals 8 import base64 9 import etcd 10 import importlib 11 import logging 12 import os 13 import re 14 import subprocess 15 import time 16 import threading 17 18 from django.conf import settings 19 from django.contrib.auth import get_user_model 20 from django.core.exceptions import ValidationError 21 from django.db import models 22 from django.db.models import Count 23 from django.db.models import Max 24 from django.db.models.signals import post_delete, post_save 25 from django.dispatch import receiver 26 from django.utils.encoding import python_2_unicode_compatible 27 from django_fsm import FSMField, transition 28 from django_fsm.signals import post_transition 29 from docker.utils import utils as dockerutils 30 from json_field.fields import JSONField 31 import requests 32 from rest_framework.authtoken.models import Token 33 34 from api import fields, utils 35 from registry import publish_release 36 from utils import dict_diff, fingerprint 37 38 39 logger = logging.getLogger(__name__) 40 41 42 def log_event(app, msg, level=logging.INFO): 43 msg = "{}: {}".format(app.id, msg) 44 logger.log(level, msg) # django logger 45 app.log(msg) # local filesystem 46 47 48 def validate_base64(value): 49 """Check that value contains only valid base64 characters.""" 50 try: 51 base64.b64decode(value.split()[1]) 52 except Exception as e: 53 raise ValidationError(e) 54 55 56 def validate_id_is_docker_compatible(value): 57 """ 58 Check that the ID follows docker's image name constraints 59 """ 60 match = re.match(r'^[a-z0-9-]+$', value) 61 if not match: 62 raise ValidationError("App IDs can only contain [a-z0-9-].") 63 64 65 def validate_app_structure(value): 66 """Error if the dict values aren't ints >= 0.""" 67 try: 68 for k, v in value.iteritems(): 69 if int(v) < 0: 70 raise ValueError("Must be greater than or equal to zero") 71 except ValueError, err: 72 raise ValidationError(err) 73 74 75 def validate_reserved_names(value): 76 """A value cannot use some reserved names.""" 77 if value in ['deis']: 78 raise ValidationError('{} is a reserved name.'.format(value)) 79 80 81 def validate_comma_separated(value): 82 """Error if the value doesn't look like a list of hostnames or IP addresses 83 separated by commas. 84 """ 85 if not re.search(r'^[a-zA-Z0-9-,\.]+$', value): 86 raise ValidationError( 87 "{} should be a comma-separated list".format(value)) 88 89 90 def validate_domain(value): 91 """Error if the domain contains unexpected characters.""" 92 if not re.search(r'^[a-zA-Z0-9-\.]+$', value): 93 raise ValidationError('"{}" contains unexpected characters'.format(value)) 94 95 96 class AuditedModel(models.Model): 97 """Add created and updated fields to a model.""" 98 99 created = models.DateTimeField(auto_now_add=True) 100 updated = models.DateTimeField(auto_now=True) 101 102 class Meta: 103 """Mark :class:`AuditedModel` as abstract.""" 104 abstract = True 105 106 107 class UuidAuditedModel(AuditedModel): 108 """Add a UUID primary key to an :class:`AuditedModel`.""" 109 110 uuid = fields.UuidField('UUID', primary_key=True) 111 112 class Meta: 113 """Mark :class:`UuidAuditedModel` as abstract.""" 114 abstract = True 115 116 117 @python_2_unicode_compatible 118 class App(UuidAuditedModel): 119 """ 120 Application used to service requests on behalf of end-users 121 """ 122 123 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 124 id = models.SlugField(max_length=64, unique=True, default=utils.generate_app_name, 125 validators=[validate_id_is_docker_compatible, 126 validate_reserved_names]) 127 structure = JSONField(default={}, blank=True, validators=[validate_app_structure]) 128 129 class Meta: 130 permissions = (('use_app', 'Can use app'),) 131 132 def __str__(self): 133 return self.id 134 135 def _get_scheduler(self, *args, **kwargs): 136 module_name = 'scheduler.' + settings.SCHEDULER_MODULE 137 mod = importlib.import_module(module_name) 138 139 return mod.SchedulerClient(settings.SCHEDULER_TARGET, 140 settings.SCHEDULER_AUTH, 141 settings.SCHEDULER_OPTIONS, 142 settings.SSH_PRIVATE_KEY) 143 144 _scheduler = property(_get_scheduler) 145 146 @property 147 def url(self): 148 return self.id + '.' + settings.DEIS_DOMAIN 149 150 def log(self, message): 151 """Logs a message to the application's log file. 152 153 This is a workaround for how Django interacts with Python's logging module. Each app 154 needs its own FileHandler instance so it can write to its own log file. That won't work in 155 Django's case because logging is set up before you run the server and it disables all 156 existing logging configurations. 157 """ 158 with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f: 159 msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT), 160 message) 161 f.write(msg.encode('utf-8')) 162 163 def create(self, *args, **kwargs): 164 """Create a new application with an initial config and release""" 165 config = Config.objects.create(owner=self.owner, app=self) 166 Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None) 167 168 def delete(self, *args, **kwargs): 169 """Delete this application including all containers""" 170 for c in self.container_set.exclude(type='run'): 171 c.destroy() 172 self._clean_app_logs() 173 return super(App, self).delete(*args, **kwargs) 174 175 def _clean_app_logs(self): 176 """Delete application logs stored by the logger component""" 177 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 178 if os.path.exists(path): 179 os.remove(path) 180 181 def scale(self, user, structure): # noqa 182 """Scale containers up or down to match requested structure.""" 183 if self.release_set.latest().build is None: 184 raise EnvironmentError('No build associated with this release') 185 requested_structure = structure.copy() 186 release = self.release_set.latest() 187 # test for available process types 188 available_process_types = release.build.procfile or {} 189 for container_type in requested_structure.keys(): 190 if container_type == 'cmd': 191 continue # allow docker cmd types in case we don't have the image source 192 if container_type not in available_process_types: 193 raise EnvironmentError( 194 'Container type {} does not exist in application'.format(container_type)) 195 msg = '{} scaled containers '.format(user.username) + ' '.join( 196 "{}={}".format(k, v) for k, v in requested_structure.items()) 197 log_event(self, msg) 198 # iterate and scale by container type (web, worker, etc) 199 changed = False 200 to_add, to_remove = [], [] 201 for container_type in requested_structure.keys(): 202 containers = list(self.container_set.filter(type=container_type).order_by('created')) 203 # increment new container nums off the most recent container 204 results = self.container_set.filter(type=container_type).aggregate(Max('num')) 205 container_num = (results.get('num__max') or 0) + 1 206 requested = requested_structure.pop(container_type) 207 diff = requested - len(containers) 208 if diff == 0: 209 continue 210 changed = True 211 while diff < 0: 212 c = containers.pop() 213 to_remove.append(c) 214 diff += 1 215 while diff > 0: 216 # create a database record 217 c = Container.objects.create(owner=self.owner, 218 app=self, 219 release=release, 220 type=container_type, 221 num=container_num) 222 to_add.append(c) 223 container_num += 1 224 diff -= 1 225 if changed: 226 if to_add: 227 self._start_containers(to_add) 228 if to_remove: 229 self._destroy_containers(to_remove) 230 # save new structure to the database 231 vals = self.container_set.values('type').annotate(Count('pk')).order_by() 232 self.structure = {v['type']: v['pk__count'] for v in vals} 233 self.save() 234 return changed 235 236 def _start_containers(self, to_add): 237 """Creates and starts containers via the scheduler""" 238 create_threads = [] 239 start_threads = [] 240 for c in to_add: 241 create_threads.append(threading.Thread(target=c.create)) 242 start_threads.append(threading.Thread(target=c.start)) 243 [t.start() for t in create_threads] 244 [t.join() for t in create_threads] 245 if set([c.state for c in to_add]) != set([Container.CREATED]): 246 err = 'aborting, failed to create some containers' 247 log_event(self, err, logging.ERROR) 248 raise RuntimeError(err) 249 [t.start() for t in start_threads] 250 [t.join() for t in start_threads] 251 if set([c.state for c in to_add]) != set([Container.UP]): 252 err = 'warning, some containers failed to start' 253 log_event(self, err, logging.WARNING) 254 255 def _destroy_containers(self, to_destroy): 256 """Destroys containers via the scheduler""" 257 destroy_threads = [] 258 for c in to_destroy: 259 destroy_threads.append(threading.Thread(target=c.destroy)) 260 [t.start() for t in destroy_threads] 261 [t.join() for t in destroy_threads] 262 [c.delete() for c in to_destroy if c.state == Container.DESTROYED] 263 if set([c.state for c in to_destroy]) != set([Container.DESTROYED]): 264 err = 'aborting, failed to destroy some containers' 265 log_event(self, err, logging.ERROR) 266 raise RuntimeError(err) 267 268 def deploy(self, user, release, initial=False): 269 """Deploy a new release to this application""" 270 existing = self.container_set.exclude(type='run') 271 new = [] 272 for e in existing: 273 n = e.clone(release) 274 n.save() 275 new.append(n) 276 277 # create new containers 278 threads = [] 279 for c in new: 280 threads.append(threading.Thread(target=c.create)) 281 [t.start() for t in threads] 282 [t.join() for t in threads] 283 284 # check for containers that failed to create 285 if len(new) > 0 and set([c.state for c in new]) != set([Container.CREATED]): 286 err = 'aborting, failed to create some containers' 287 log_event(self, err, logging.ERROR) 288 self._destroy_containers(new) 289 raise RuntimeError(err) 290 291 # start new containers 292 threads = [] 293 for c in new: 294 threads.append(threading.Thread(target=c.start)) 295 [t.start() for t in threads] 296 [t.join() for t in threads] 297 298 # check for containers that didn't come up correctly 299 if len(new) > 0 and set([c.state for c in new]) != set([Container.UP]): 300 # report the deploy error 301 err = 'warning, some containers failed to start' 302 log_event(self, err, logging.WARNING) 303 304 # destroy old containers 305 if existing: 306 self._destroy_containers(existing) 307 308 # perform default scaling if necessary 309 if initial: 310 self._default_scale(user, release) 311 312 def _default_scale(self, user, release): 313 """Scale to default structure based on release type""" 314 # if there is no SHA, assume a docker image is being promoted 315 if not release.build.sha: 316 structure = {'cmd': 1} 317 318 # if a dockerfile exists without a procfile, assume docker workflow 319 elif release.build.dockerfile and not release.build.procfile: 320 structure = {'cmd': 1} 321 322 # if a procfile exists without a web entry, assume docker workflow 323 elif release.build.procfile and 'web' not in release.build.procfile: 324 structure = {'cmd': 1} 325 326 # default to heroku workflow 327 else: 328 structure = {'web': 1} 329 330 self.scale(user, structure) 331 332 def logs(self): 333 """Return aggregated log data for this application.""" 334 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 335 if not os.path.exists(path): 336 raise EnvironmentError('Could not locate logs') 337 data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path]) 338 return data 339 340 def run(self, user, command): 341 """Run a one-off command in an ephemeral app container.""" 342 # FIXME: remove the need for SSH private keys by using 343 # a scheduler that supports one-off admin tasks natively 344 if not settings.SSH_PRIVATE_KEY: 345 raise EnvironmentError('Support for admin commands is not configured') 346 if self.release_set.latest().build is None: 347 raise EnvironmentError('No build associated with this release to run this command') 348 # TODO: add support for interactive shell 349 msg = "{} runs '{}'".format(user.username, command) 350 log_event(self, msg) 351 c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1 352 353 # create database record for run process 354 c = Container.objects.create(owner=self.owner, 355 app=self, 356 release=self.release_set.latest(), 357 type='run', 358 num=c_num) 359 image = c.release.image 360 361 # check for backwards compatibility 362 def _has_hostname(image): 363 repo, tag = dockerutils.parse_repository_tag(image) 364 return True if '/' in repo and '.' in repo.split('/')[0] else False 365 366 if not _has_hostname(image): 367 image = '{}:{}/{}'.format(settings.REGISTRY_HOST, 368 settings.REGISTRY_PORT, 369 image) 370 # SECURITY: shell-escape user input 371 escaped_command = command.replace("'", "'\\''") 372 return c.run(escaped_command) 373 374 375 @python_2_unicode_compatible 376 class Container(UuidAuditedModel): 377 """ 378 Docker container used to securely host an application process. 379 """ 380 INITIALIZED = 'initialized' 381 CREATED = 'created' 382 UP = 'up' 383 DOWN = 'down' 384 DESTROYED = 'destroyed' 385 CRASHED = 'crashed' 386 ERROR = 'error' 387 STATE_CHOICES = ( 388 (INITIALIZED, 'initialized'), 389 (CREATED, 'created'), 390 (UP, 'up'), 391 (DOWN, 'down'), 392 (DESTROYED, 'destroyed'), 393 (CRASHED, 'crashed'), 394 (ERROR, 'error'), 395 ) 396 397 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 398 app = models.ForeignKey('App') 399 release = models.ForeignKey('Release') 400 type = models.CharField(max_length=128, blank=False) 401 num = models.PositiveIntegerField() 402 state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, 403 protected=True, propagate=False) 404 405 def short_name(self): 406 return "{}.{}.{}".format(self.app.id, self.type, self.num) 407 short_name.short_description = 'Name' 408 409 def __str__(self): 410 return self.short_name() 411 412 class Meta: 413 get_latest_by = '-created' 414 ordering = ['created'] 415 416 def _get_job_id(self): 417 app = self.app.id 418 release = self.release 419 version = "v{}".format(release.version) 420 num = self.num 421 job_id = "{app}_{version}.{self.type}.{num}".format(**locals()) 422 return job_id 423 424 _job_id = property(_get_job_id) 425 426 def _get_scheduler(self): 427 return self.app._scheduler 428 429 _scheduler = property(_get_scheduler) 430 431 def _get_command(self): 432 try: 433 # if this is not procfile-based app, ensure they cannot break out 434 # and run arbitrary commands on the host 435 # FIXME: remove slugrunner's hardcoded entrypoint 436 if self.release.build.dockerfile or not self.release.build.sha: 437 return "bash -c '{}'".format(self.release.build.procfile[self.type]) 438 else: 439 return 'start {}'.format(self.type) 440 # if the key is not present or if a parent attribute is None 441 except (KeyError, TypeError, AttributeError): 442 # handle special case for Dockerfile deployments 443 return '' if self.type == 'cmd' else 'start {}'.format(self.type) 444 445 _command = property(_get_command) 446 447 def clone(self, release): 448 c = Container.objects.create(owner=self.owner, 449 app=self.app, 450 release=release, 451 type=self.type, 452 num=self.num) 453 return c 454 455 @transition(field=state, source=INITIALIZED, target=CREATED, on_error=ERROR) 456 def create(self): 457 image = self.release.image 458 kwargs = {'memory': self.release.config.memory, 459 'cpu': self.release.config.cpu, 460 'tags': self.release.config.tags} 461 job_id = self._job_id 462 try: 463 self._scheduler.create( 464 name=job_id, 465 image=image, 466 command=self._command, 467 **kwargs) 468 except Exception as e: 469 err = '{} (create): {}'.format(job_id, e) 470 log_event(self.app, err, logging.ERROR) 471 raise 472 473 @transition(field=state, source=[CREATED, UP, DOWN], target=UP, on_error=CRASHED) 474 def start(self): 475 job_id = self._job_id 476 try: 477 self._scheduler.start(job_id) 478 except Exception as e: 479 err = '{} (start): {}'.format(job_id, e) 480 log_event(self.app, err, logging.WARNING) 481 raise 482 483 @transition(field=state, source=UP, target=DOWN, on_error=ERROR) 484 def stop(self): 485 job_id = self._job_id 486 try: 487 self._scheduler.stop(job_id) 488 except Exception as e: 489 err = '{} (stop): {}'.format(job_id, e) 490 log_event(self.app, err, logging.ERROR) 491 raise 492 493 @transition(field=state, source='*', target=DESTROYED, on_error=ERROR) 494 def destroy(self): 495 job_id = self._job_id 496 try: 497 self._scheduler.destroy(job_id) 498 except Exception as e: 499 err = '{} (destroy): {}'.format(job_id, e) 500 log_event(self.app, err, logging.ERROR) 501 raise 502 503 def run(self, command): 504 """Run a one-off command""" 505 if self.release.build is None: 506 raise EnvironmentError('No build associated with this release ' 507 'to run this command') 508 image = self.release.image 509 job_id = self._job_id 510 entrypoint = '/bin/bash' 511 # if this is a procfile-based app, switch the entrypoint to slugrunner's default 512 # FIXME: remove slugrunner's hardcoded entrypoint 513 if self.release.build.procfile and \ 514 self.release.build.sha and not \ 515 self.release.build.dockerfile: 516 entrypoint = '/runner/init' 517 command = "'{}'".format(command) 518 else: 519 command = "-c '{}'".format(command) 520 try: 521 rc, output = self._scheduler.run(job_id, image, entrypoint, command) 522 return rc, output 523 except Exception as e: 524 err = '{} (run): {}'.format(job_id, e) 525 log_event(self.app, err, logging.ERROR) 526 raise 527 528 529 @python_2_unicode_compatible 530 class Push(UuidAuditedModel): 531 """ 532 Instance of a push used to trigger an application build 533 """ 534 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 535 app = models.ForeignKey('App') 536 sha = models.CharField(max_length=40) 537 538 fingerprint = models.CharField(max_length=255) 539 receive_user = models.CharField(max_length=255) 540 receive_repo = models.CharField(max_length=255) 541 542 ssh_connection = models.CharField(max_length=255) 543 ssh_original_command = models.CharField(max_length=255) 544 545 class Meta: 546 get_latest_by = 'created' 547 ordering = ['-created'] 548 unique_together = (('app', 'uuid'),) 549 550 def __str__(self): 551 return "{0}-{1}".format(self.app.id, self.sha[:7]) 552 553 554 @python_2_unicode_compatible 555 class Build(UuidAuditedModel): 556 """ 557 Instance of a software build used by runtime nodes 558 """ 559 560 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 561 app = models.ForeignKey('App') 562 image = models.CharField(max_length=256) 563 564 # optional fields populated by builder 565 sha = models.CharField(max_length=40, blank=True) 566 procfile = JSONField(default={}, blank=True) 567 dockerfile = models.TextField(blank=True) 568 569 class Meta: 570 get_latest_by = 'created' 571 ordering = ['-created'] 572 unique_together = (('app', 'uuid'),) 573 574 def create(self, user, *args, **kwargs): 575 latest_release = self.app.release_set.latest() 576 source_version = 'latest' 577 if self.sha: 578 source_version = 'git-{}'.format(self.sha) 579 new_release = latest_release.new(user, 580 build=self, 581 config=latest_release.config, 582 source_version=source_version) 583 initial = True if self.app.structure == {} else False 584 try: 585 self.app.deploy(user, new_release, initial=initial) 586 return new_release 587 except RuntimeError: 588 new_release.delete() 589 raise 590 591 def __str__(self): 592 return "{0}-{1}".format(self.app.id, self.uuid[:7]) 593 594 595 @python_2_unicode_compatible 596 class Config(UuidAuditedModel): 597 """ 598 Set of configuration values applied as environment variables 599 during runtime execution of the Application. 600 """ 601 602 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 603 app = models.ForeignKey('App') 604 values = JSONField(default={}, blank=True) 605 memory = JSONField(default={}, blank=True) 606 cpu = JSONField(default={}, blank=True) 607 tags = JSONField(default={}, blank=True) 608 609 class Meta: 610 get_latest_by = 'created' 611 ordering = ['-created'] 612 unique_together = (('app', 'uuid'),) 613 614 def __str__(self): 615 return "{}-{}".format(self.app.id, self.uuid[:7]) 616 617 def save(self, **kwargs): 618 """merge the old config with the new""" 619 try: 620 previous_config = self.app.config_set.latest() 621 for attr in ['cpu', 'memory', 'tags', 'values']: 622 # Guard against migrations from older apps without fixes to 623 # JSONField encoding. 624 try: 625 data = getattr(previous_config, attr).copy() 626 except AttributeError: 627 data = {} 628 try: 629 new_data = getattr(self, attr).copy() 630 except AttributeError: 631 new_data = {} 632 data.update(new_data) 633 # remove config keys if we provided a null value 634 [data.pop(k) for k, v in new_data.items() if v is None] 635 setattr(self, attr, data) 636 except Config.DoesNotExist: 637 pass 638 return super(Config, self).save(**kwargs) 639 640 641 @python_2_unicode_compatible 642 class Release(UuidAuditedModel): 643 """ 644 Software release deployed by the application platform 645 646 Releases contain a :class:`Build` and a :class:`Config`. 647 """ 648 649 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 650 app = models.ForeignKey('App') 651 version = models.PositiveIntegerField() 652 summary = models.TextField(blank=True, null=True) 653 654 config = models.ForeignKey('Config') 655 build = models.ForeignKey('Build', null=True) 656 657 class Meta: 658 get_latest_by = 'created' 659 ordering = ['-created'] 660 unique_together = (('app', 'version'),) 661 662 def __str__(self): 663 return "{0}-v{1}".format(self.app.id, self.version) 664 665 @property 666 def image(self): 667 return '{}:v{}'.format(self.app.id, str(self.version)) 668 669 def new(self, user, config, build, summary=None, source_version='latest'): 670 """ 671 Create a new application release using the provided Build and Config 672 on behalf of a user. 673 674 Releases start at v1 and auto-increment. 675 """ 676 # construct fully-qualified target image 677 new_version = self.version + 1 678 # create new release and auto-increment version 679 release = Release.objects.create( 680 owner=user, app=self.app, config=config, 681 build=build, version=new_version, summary=summary) 682 try: 683 release.publish() 684 except EnvironmentError as e: 685 # If we cannot publish this app, just log and carry on 686 logger.info(e) 687 pass 688 return release 689 690 def publish(self, source_version='latest'): 691 if self.build is None: 692 raise EnvironmentError('No build associated with this release to publish') 693 source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version 694 source_image = '{}:{}'.format(self.build.image, source_tag) 695 # IOW, this image did not come from the builder 696 # FIXME: remove check for mock registry module 697 if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE: 698 # we assume that the image is not present on our registry, 699 # so shell out a task to pull in the repository 700 data = { 701 'src': self.build.image 702 } 703 requests.post( 704 '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL, 705 self.app.id), 706 data=data, 707 ) 708 # update the source image to the repository we just imported 709 source_image = self.app.id 710 # if the image imported had a tag specified, use that tag as the source 711 if ':' in self.build.image: 712 if '/' not in self.build.image[self.build.image.rfind(':') + 1:]: 713 source_image += self.build.image[self.build.image.rfind(':'):] 714 publish_release(source_image, 715 self.config.values, 716 self.image) 717 718 def previous(self): 719 """ 720 Return the previous Release to this one. 721 722 :return: the previous :class:`Release`, or None 723 """ 724 releases = self.app.release_set 725 if self.pk: 726 releases = releases.exclude(pk=self.pk) 727 try: 728 # Get the Release previous to this one 729 prev_release = releases.latest() 730 except Release.DoesNotExist: 731 prev_release = None 732 return prev_release 733 734 def rollback(self, user, version): 735 if version < 1: 736 raise EnvironmentError('version cannot be below 0') 737 summary = "{} rolled back to v{}".format(user, version) 738 prev = self.app.release_set.get(version=version) 739 new_release = self.new( 740 user, 741 build=prev.build, 742 config=prev.config, 743 summary=summary, 744 source_version='v{}'.format(version)) 745 try: 746 self.app.deploy(user, new_release) 747 return new_release 748 except RuntimeError: 749 new_release.delete() 750 raise 751 752 def save(self, *args, **kwargs): # noqa 753 if not self.summary: 754 self.summary = '' 755 prev_release = self.previous() 756 # compare this build to the previous build 757 old_build = prev_release.build if prev_release else None 758 old_config = prev_release.config if prev_release else None 759 # if the build changed, log it and who pushed it 760 if self.version == 1: 761 self.summary += "{} created initial release".format(self.app.owner) 762 elif self.build != old_build: 763 if self.build.sha: 764 self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7]) 765 else: 766 self.summary += "{} deployed {}".format(self.build.owner, self.build.image) 767 # if the config data changed, log the dict diff 768 if self.config != old_config: 769 dict1 = self.config.values 770 dict2 = old_config.values if old_config else {} 771 diff = dict_diff(dict1, dict2) 772 # try to be as succinct as possible 773 added = ', '.join(k for k in diff.get('added', {})) 774 added = 'added ' + added if added else '' 775 changed = ', '.join(k for k in diff.get('changed', {})) 776 changed = 'changed ' + changed if changed else '' 777 deleted = ', '.join(k for k in diff.get('deleted', {})) 778 deleted = 'deleted ' + deleted if deleted else '' 779 changes = ', '.join(i for i in (added, changed, deleted) if i) 780 if changes: 781 if self.summary: 782 self.summary += ' and ' 783 self.summary += "{} {}".format(self.config.owner, changes) 784 # if the limits changed (memory or cpu), log the dict diff 785 changes = [] 786 old_mem = old_config.memory if old_config else {} 787 diff = dict_diff(self.config.memory, old_mem) 788 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 789 changes.append('memory') 790 old_cpu = old_config.cpu if old_config else {} 791 diff = dict_diff(self.config.cpu, old_cpu) 792 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 793 changes.append('cpu') 794 if changes: 795 changes = 'changed limits for '+', '.join(changes) 796 self.summary += "{} {}".format(self.config.owner, changes) 797 # if the tags changed, log the dict diff 798 changes = [] 799 old_tags = old_config.tags if old_config else {} 800 diff = dict_diff(self.config.tags, old_tags) 801 # try to be as succinct as possible 802 added = ', '.join(k for k in diff.get('added', {})) 803 added = 'added tag ' + added if added else '' 804 changed = ', '.join(k for k in diff.get('changed', {})) 805 changed = 'changed tag ' + changed if changed else '' 806 deleted = ', '.join(k for k in diff.get('deleted', {})) 807 deleted = 'deleted tag ' + deleted if deleted else '' 808 changes = ', '.join(i for i in (added, changed, deleted) if i) 809 if changes: 810 if self.summary: 811 self.summary += ' and ' 812 self.summary += "{} {}".format(self.config.owner, changes) 813 if not self.summary: 814 if self.version == 1: 815 self.summary = "{} created the initial release".format(self.owner) 816 else: 817 self.summary = "{} changed nothing".format(self.owner) 818 super(Release, self).save(*args, **kwargs) 819 820 821 @python_2_unicode_compatible 822 class Domain(AuditedModel): 823 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 824 app = models.ForeignKey('App') 825 domain = models.TextField(blank=False, null=False, unique=True) 826 827 def __str__(self): 828 return self.domain 829 830 831 @python_2_unicode_compatible 832 class Key(UuidAuditedModel): 833 """An SSH public key.""" 834 835 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 836 id = models.CharField(max_length=128) 837 public = models.TextField(unique=True, validators=[validate_base64]) 838 839 class Meta: 840 verbose_name = 'SSH Key' 841 unique_together = (('owner', 'id')) 842 843 def __str__(self): 844 return "{}...{}".format(self.public[:18], self.public[-31:]) 845 846 847 # define update/delete callbacks for synchronizing 848 # models with the configuration management backend 849 850 def _log_build_created(**kwargs): 851 if kwargs.get('created'): 852 build = kwargs['instance'] 853 log_event(build.app, "build {} created".format(build)) 854 855 856 def _log_release_created(**kwargs): 857 if kwargs.get('created'): 858 release = kwargs['instance'] 859 log_event(release.app, "release {} created".format(release)) 860 # append release lifecycle logs to the app 861 release.app.log(release.summary) 862 863 864 def _log_config_updated(**kwargs): 865 config = kwargs['instance'] 866 log_event(config.app, "config {} updated".format(config)) 867 868 869 def _log_domain_added(**kwargs): 870 domain = kwargs['instance'] 871 msg = "domain {} added".format(domain) 872 log_event(domain.app, msg) 873 # adding a domain does not create a release, so we have to log here 874 domain.app.log(msg) 875 876 877 def _log_domain_removed(**kwargs): 878 domain = kwargs['instance'] 879 msg = "domain {} removed".format(domain) 880 log_event(domain.app, msg) 881 # adding a domain does not create a release, so we have to log here 882 domain.app.log(msg) 883 884 885 def _etcd_publish_key(**kwargs): 886 key = kwargs['instance'] 887 _etcd_client.write('/deis/builder/users/{}/{}'.format( 888 key.owner.username, fingerprint(key.public)), key.public) 889 890 891 def _etcd_purge_key(**kwargs): 892 key = kwargs['instance'] 893 _etcd_client.delete('/deis/builder/users/{}/{}'.format( 894 key.owner.username, fingerprint(key.public))) 895 896 897 def _etcd_purge_user(**kwargs): 898 username = kwargs['instance'].username 899 try: 900 _etcd_client.delete( 901 '/deis/builder/users/{}'.format(username), dir=True, recursive=True) 902 except KeyError: 903 # If _etcd_publish_key() wasn't called, there is no user dir to delete. 904 pass 905 906 907 def _etcd_create_app(**kwargs): 908 appname = kwargs['instance'] 909 if kwargs['created']: 910 _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True) 911 912 913 def _etcd_purge_app(**kwargs): 914 appname = kwargs['instance'] 915 _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True) 916 917 918 def _etcd_publish_domains(**kwargs): 919 app = kwargs['instance'].app 920 app_domains = app.domain_set.all() 921 if app_domains: 922 _etcd_client.write('/deis/domains/{}'.format(app), 923 ' '.join(str(d.domain) for d in app_domains)) 924 925 926 def _etcd_purge_domains(**kwargs): 927 app = kwargs['instance'].app 928 app_domains = app.domain_set.all() 929 if app_domains: 930 _etcd_client.write('/deis/domains/{}'.format(app), 931 ' '.join(str(d.domain) for d in app_domains)) 932 else: 933 _etcd_client.delete('/deis/domains/{}'.format(app)) 934 935 936 # Log significant app-related events 937 post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log') 938 post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log') 939 post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log') 940 post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log') 941 post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log') 942 943 944 # automatically generate a new token on creation 945 @receiver(post_save, sender=get_user_model()) 946 def create_auth_token(sender, instance=None, created=False, **kwargs): 947 if created: 948 Token.objects.create(user=instance) 949 950 951 # save FSM transitions as they happen 952 def _save_transition(**kwargs): 953 kwargs['instance'].save() 954 # close database connections after transition 955 # to avoid leaking connections inside threads 956 from django.db import connection 957 connection.close() 958 959 post_transition.connect(_save_transition) 960 961 # wire up etcd publishing if we can connect 962 try: 963 _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT)) 964 _etcd_client.get('/deis') 965 except etcd.EtcdException: 966 logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster') 967 _etcd_client = None 968 969 if _etcd_client: 970 post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models') 971 post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models') 972 post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models') 973 post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models') 974 post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models') 975 post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models') 976 post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')