github.com/ahjdzx/deis@v1.1.1/controller/api/models.py (about) 1 # -*- coding: utf-8 -*- 2 3 """ 4 Data models for the Deis API. 5 """ 6 7 from __future__ import unicode_literals 8 import etcd 9 import importlib 10 import logging 11 import os 12 import re 13 import subprocess 14 import time 15 import threading 16 17 from django.conf import settings 18 from django.contrib.auth import get_user_model 19 from django.core.exceptions import ValidationError 20 from django.db import models 21 from django.db.models import Count 22 from django.db.models import Max 23 from django.db.models.signals import post_delete, post_save 24 from django.dispatch import receiver 25 from django.utils.encoding import python_2_unicode_compatible 26 from django_fsm import FSMField, transition 27 from django_fsm.signals import post_transition 28 from docker.utils import utils 29 from json_field.fields import JSONField 30 import requests 31 from rest_framework.authtoken.models import Token 32 33 from api import fields 34 from registry import publish_release 35 from utils import dict_diff, fingerprint 36 37 38 logger = logging.getLogger(__name__) 39 40 41 def log_event(app, msg, level=logging.INFO): 42 msg = "{}: {}".format(app.id, msg) 43 logger.log(level, msg) # django logger 44 app.log(msg) # local filesystem 45 46 47 def validate_app_structure(value): 48 """Error if the dict values aren't ints >= 0.""" 49 try: 50 for k, v in value.iteritems(): 51 if int(v) < 0: 52 raise ValueError("Must be greater than or equal to zero") 53 except ValueError, err: 54 raise ValidationError(err) 55 56 57 def validate_comma_separated(value): 58 """Error if the value doesn't look like a list of hostnames or IP addresses 59 separated by commas. 60 """ 61 if not re.search(r'^[a-zA-Z0-9-,\.]+$', value): 62 raise ValidationError( 63 "{} should be a comma-separated list".format(value)) 64 65 66 def validate_domain(value): 67 """Error if the domain contains unexpected characters.""" 68 if not re.search(r'^[a-zA-Z0-9-\.]+$', value): 69 raise ValidationError('"{}" contains unexpected characters'.format(value)) 70 71 72 class AuditedModel(models.Model): 73 """Add created and updated fields to a model.""" 74 75 created = models.DateTimeField(auto_now_add=True) 76 updated = models.DateTimeField(auto_now=True) 77 78 class Meta: 79 """Mark :class:`AuditedModel` as abstract.""" 80 abstract = True 81 82 83 class UuidAuditedModel(AuditedModel): 84 """Add a UUID primary key to an :class:`AuditedModel`.""" 85 86 uuid = fields.UuidField('UUID', primary_key=True) 87 88 class Meta: 89 """Mark :class:`UuidAuditedModel` as abstract.""" 90 abstract = True 91 92 93 @python_2_unicode_compatible 94 class App(UuidAuditedModel): 95 """ 96 Application used to service requests on behalf of end-users 97 """ 98 99 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 100 id = models.SlugField(max_length=64, unique=True) 101 structure = JSONField(default={}, blank=True, validators=[validate_app_structure]) 102 103 class Meta: 104 permissions = (('use_app', 'Can use app'),) 105 106 def __str__(self): 107 return self.id 108 109 def _get_scheduler(self, *args, **kwargs): 110 module_name = 'scheduler.' + settings.SCHEDULER_MODULE 111 mod = importlib.import_module(module_name) 112 113 return mod.SchedulerClient(settings.SCHEDULER_TARGET, 114 settings.SCHEDULER_AUTH, 115 settings.SCHEDULER_OPTIONS, 116 settings.SSH_PRIVATE_KEY) 117 118 _scheduler = property(_get_scheduler) 119 120 @property 121 def url(self): 122 return self.id + '.' + settings.DEIS_DOMAIN 123 124 def log(self, message): 125 """Logs a message to the application's log file. 126 127 This is a workaround for how Django interacts with Python's logging module. Each app 128 needs its own FileHandler instance so it can write to its own log file. That won't work in 129 Django's case because logging is set up before you run the server and it disables all 130 existing logging configurations. 131 """ 132 with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f: 133 msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT), 134 message) 135 f.write(msg.encode('utf-8')) 136 137 def create(self, *args, **kwargs): 138 """Create a new application with an initial config and release""" 139 config = Config.objects.create(owner=self.owner, app=self) 140 Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None) 141 142 def delete(self, *args, **kwargs): 143 """Delete this application including all containers""" 144 for c in self.container_set.exclude(type='run'): 145 c.destroy() 146 self._clean_app_logs() 147 return super(App, self).delete(*args, **kwargs) 148 149 def _clean_app_logs(self): 150 """Delete application logs stored by the logger component""" 151 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 152 if os.path.exists(path): 153 os.remove(path) 154 155 def scale(self, user, structure): # noqa 156 """Scale containers up or down to match requested structure.""" 157 if self.release_set.latest().build is None: 158 raise EnvironmentError('No build associated with this release') 159 requested_structure = structure.copy() 160 release = self.release_set.latest() 161 # test for available process types 162 available_process_types = release.build.procfile or {} 163 for container_type in requested_structure.keys(): 164 if container_type == 'cmd': 165 continue # allow docker cmd types in case we don't have the image source 166 if container_type not in available_process_types: 167 raise EnvironmentError( 168 'Container type {} does not exist in application'.format(container_type)) 169 msg = '{} scaled containers '.format(user.username) + ' '.join( 170 "{}={}".format(k, v) for k, v in requested_structure.items()) 171 log_event(self, msg) 172 # iterate and scale by container type (web, worker, etc) 173 changed = False 174 to_add, to_remove = [], [] 175 for container_type in requested_structure.keys(): 176 containers = list(self.container_set.filter(type=container_type).order_by('created')) 177 # increment new container nums off the most recent container 178 results = self.container_set.filter(type=container_type).aggregate(Max('num')) 179 container_num = (results.get('num__max') or 0) + 1 180 requested = requested_structure.pop(container_type) 181 diff = requested - len(containers) 182 if diff == 0: 183 continue 184 changed = True 185 while diff < 0: 186 c = containers.pop() 187 to_remove.append(c) 188 diff += 1 189 while diff > 0: 190 # create a database record 191 c = Container.objects.create(owner=self.owner, 192 app=self, 193 release=release, 194 type=container_type, 195 num=container_num) 196 to_add.append(c) 197 container_num += 1 198 diff -= 1 199 if changed: 200 if to_add: 201 self._start_containers(to_add) 202 if to_remove: 203 self._destroy_containers(to_remove) 204 # save new structure to the database 205 vals = self.container_set.values('type').annotate(Count('pk')).order_by() 206 self.structure = {v['type']: v['pk__count'] for v in vals} 207 self.save() 208 return changed 209 210 def _start_containers(self, to_add): 211 """Creates and starts containers via the scheduler""" 212 create_threads = [] 213 start_threads = [] 214 for c in to_add: 215 create_threads.append(threading.Thread(target=c.create)) 216 start_threads.append(threading.Thread(target=c.start)) 217 [t.start() for t in create_threads] 218 [t.join() for t in create_threads] 219 if set([c.state for c in to_add]) != set([Container.CREATED]): 220 err = 'aborting, failed to create some containers' 221 log_event(self, err, logging.ERROR) 222 raise RuntimeError(err) 223 [t.start() for t in start_threads] 224 [t.join() for t in start_threads] 225 if set([c.state for c in to_add]) != set([Container.UP]): 226 err = 'warning, some containers failed to start' 227 log_event(self, err, logging.WARNING) 228 229 def _destroy_containers(self, to_destroy): 230 """Destroys containers via the scheduler""" 231 destroy_threads = [] 232 for c in to_destroy: 233 destroy_threads.append(threading.Thread(target=c.destroy)) 234 [t.start() for t in destroy_threads] 235 [t.join() for t in destroy_threads] 236 [c.delete() for c in to_destroy if c.state == Container.DESTROYED] 237 if set([c.state for c in to_destroy]) != set([Container.DESTROYED]): 238 err = 'aborting, failed to destroy some containers' 239 log_event(self, err, logging.ERROR) 240 raise RuntimeError(err) 241 242 def deploy(self, user, release, initial=False): 243 """Deploy a new release to this application""" 244 existing = self.container_set.exclude(type='run') 245 new = [] 246 for e in existing: 247 n = e.clone(release) 248 n.save() 249 new.append(n) 250 251 # create new containers 252 threads = [] 253 for c in new: 254 threads.append(threading.Thread(target=c.create)) 255 [t.start() for t in threads] 256 [t.join() for t in threads] 257 258 # check for containers that failed to create 259 if len(new) > 0 and set([c.state for c in new]) != set([Container.CREATED]): 260 err = 'aborting, failed to create some containers' 261 log_event(self, err, logging.ERROR) 262 self._destroy_containers(new) 263 raise RuntimeError(err) 264 265 # start new containers 266 threads = [] 267 for c in new: 268 threads.append(threading.Thread(target=c.start)) 269 [t.start() for t in threads] 270 [t.join() for t in threads] 271 272 # check for containers that didn't come up correctly 273 if len(new) > 0 and set([c.state for c in new]) != set([Container.UP]): 274 # report the deploy error 275 err = 'warning, some containers failed to start' 276 log_event(self, err, logging.WARNING) 277 278 # destroy old containers 279 if existing: 280 self._destroy_containers(existing) 281 282 # perform default scaling if necessary 283 if initial: 284 self._default_scale(user, release) 285 286 def _default_scale(self, user, release): 287 """Scale to default structure based on release type""" 288 # if there is no SHA, assume a docker image is being promoted 289 if not release.build.sha: 290 structure = {'cmd': 1} 291 292 # if a dockerfile exists without a procfile, assume docker workflow 293 elif release.build.dockerfile and not release.build.procfile: 294 structure = {'cmd': 1} 295 296 # if a procfile exists without a web entry, assume docker workflow 297 elif release.build.procfile and 'web' not in release.build.procfile: 298 structure = {'cmd': 1} 299 300 # default to heroku workflow 301 else: 302 structure = {'web': 1} 303 304 self.scale(user, structure) 305 306 def logs(self): 307 """Return aggregated log data for this application.""" 308 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 309 if not os.path.exists(path): 310 raise EnvironmentError('Could not locate logs') 311 data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path]) 312 return data 313 314 def run(self, user, command): 315 """Run a one-off command in an ephemeral app container.""" 316 # FIXME: remove the need for SSH private keys by using 317 # a scheduler that supports one-off admin tasks natively 318 if not settings.SSH_PRIVATE_KEY: 319 raise EnvironmentError('Support for admin commands is not configured') 320 if self.release_set.latest().build is None: 321 raise EnvironmentError('No build associated with this release to run this command') 322 # TODO: add support for interactive shell 323 msg = "{} runs '{}'".format(user.username, command) 324 log_event(self, msg) 325 c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1 326 327 # create database record for run process 328 c = Container.objects.create(owner=self.owner, 329 app=self, 330 release=self.release_set.latest(), 331 type='run', 332 num=c_num) 333 image = c.release.image 334 335 # check for backwards compatibility 336 def _has_hostname(image): 337 repo, tag = utils.parse_repository_tag(image) 338 return True if '/' in repo and '.' in repo.split('/')[0] else False 339 340 if not _has_hostname(image): 341 image = '{}:{}/{}'.format(settings.REGISTRY_HOST, 342 settings.REGISTRY_PORT, 343 image) 344 # SECURITY: shell-escape user input 345 escaped_command = command.replace("'", "'\\''") 346 return c.run(escaped_command) 347 348 349 @python_2_unicode_compatible 350 class Container(UuidAuditedModel): 351 """ 352 Docker container used to securely host an application process. 353 """ 354 INITIALIZED = 'initialized' 355 CREATED = 'created' 356 UP = 'up' 357 DOWN = 'down' 358 DESTROYED = 'destroyed' 359 CRASHED = 'crashed' 360 ERROR = 'error' 361 STATE_CHOICES = ( 362 (INITIALIZED, 'initialized'), 363 (CREATED, 'created'), 364 (UP, 'up'), 365 (DOWN, 'down'), 366 (DESTROYED, 'destroyed'), 367 (CRASHED, 'crashed'), 368 (ERROR, 'error'), 369 ) 370 371 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 372 app = models.ForeignKey('App') 373 release = models.ForeignKey('Release') 374 type = models.CharField(max_length=128, blank=False) 375 num = models.PositiveIntegerField() 376 state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, 377 protected=True, propagate=False) 378 379 def short_name(self): 380 return "{}.{}.{}".format(self.app.id, self.type, self.num) 381 short_name.short_description = 'Name' 382 383 def __str__(self): 384 return self.short_name() 385 386 class Meta: 387 get_latest_by = '-created' 388 ordering = ['created'] 389 390 def _get_job_id(self): 391 app = self.app.id 392 release = self.release 393 version = "v{}".format(release.version) 394 num = self.num 395 job_id = "{app}_{version}.{self.type}.{num}".format(**locals()) 396 return job_id 397 398 _job_id = property(_get_job_id) 399 400 def _get_scheduler(self): 401 return self.app._scheduler 402 403 _scheduler = property(_get_scheduler) 404 405 def _get_command(self): 406 try: 407 # if this is not procfile-based app, ensure they cannot break out 408 # and run arbitrary commands on the host 409 # FIXME: remove slugrunner's hardcoded entrypoint 410 if self.release.build.dockerfile or not self.release.build.sha: 411 return "bash -c '{}'".format(self.release.build.procfile[self.type]) 412 else: 413 return 'start {}'.format(self.type) 414 # if the key is not present or if a parent attribute is None 415 except (KeyError, TypeError, AttributeError): 416 # handle special case for Dockerfile deployments 417 return '' if self.type == 'cmd' else 'start {}'.format(self.type) 418 419 _command = property(_get_command) 420 421 def clone(self, release): 422 c = Container.objects.create(owner=self.owner, 423 app=self.app, 424 release=release, 425 type=self.type, 426 num=self.num) 427 return c 428 429 @transition(field=state, source=INITIALIZED, target=CREATED, on_error=ERROR) 430 def create(self): 431 image = self.release.image 432 kwargs = {'memory': self.release.config.memory, 433 'cpu': self.release.config.cpu, 434 'tags': self.release.config.tags} 435 job_id = self._job_id 436 try: 437 self._scheduler.create( 438 name=job_id, 439 image=image, 440 command=self._command, 441 **kwargs) 442 except Exception as e: 443 err = '{} (create): {}'.format(job_id, e) 444 log_event(self.app, err, logging.ERROR) 445 raise 446 447 @transition(field=state, source=[CREATED, UP, DOWN], target=UP, on_error=CRASHED) 448 def start(self): 449 job_id = self._job_id 450 try: 451 self._scheduler.start(job_id) 452 except Exception as e: 453 err = '{} (start): {}'.format(job_id, e) 454 log_event(self.app, err, logging.WARNING) 455 raise 456 457 @transition(field=state, source=UP, target=DOWN, on_error=ERROR) 458 def stop(self): 459 job_id = self._job_id 460 try: 461 self._scheduler.stop(job_id) 462 except Exception as e: 463 err = '{} (stop): {}'.format(job_id, e) 464 log_event(self.app, err, logging.ERROR) 465 raise 466 467 @transition(field=state, source='*', target=DESTROYED, on_error=ERROR) 468 def destroy(self): 469 job_id = self._job_id 470 try: 471 self._scheduler.destroy(job_id) 472 except Exception as e: 473 err = '{} (destroy): {}'.format(job_id, e) 474 log_event(self.app, err, logging.ERROR) 475 raise 476 477 def run(self, command): 478 """Run a one-off command""" 479 if self.release.build is None: 480 raise EnvironmentError('No build associated with this release ' 481 'to run this command') 482 image = self.release.image 483 job_id = self._job_id 484 entrypoint = '/bin/bash' 485 # if this is a procfile-based app, switch the entrypoint to slugrunner's default 486 # FIXME: remove slugrunner's hardcoded entrypoint 487 if self.release.build.procfile and \ 488 self.release.build.sha and not \ 489 self.release.build.dockerfile: 490 entrypoint = '/runner/init' 491 command = "'{}'".format(command) 492 else: 493 command = "-c '{}'".format(command) 494 try: 495 rc, output = self._scheduler.run(job_id, image, entrypoint, command) 496 return rc, output 497 except Exception as e: 498 err = '{} (run): {}'.format(job_id, e) 499 log_event(self.app, err, logging.ERROR) 500 raise 501 502 503 @python_2_unicode_compatible 504 class Push(UuidAuditedModel): 505 """ 506 Instance of a push used to trigger an application build 507 """ 508 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 509 app = models.ForeignKey('App') 510 sha = models.CharField(max_length=40) 511 512 fingerprint = models.CharField(max_length=255) 513 receive_user = models.CharField(max_length=255) 514 receive_repo = models.CharField(max_length=255) 515 516 ssh_connection = models.CharField(max_length=255) 517 ssh_original_command = models.CharField(max_length=255) 518 519 class Meta: 520 get_latest_by = 'created' 521 ordering = ['-created'] 522 unique_together = (('app', 'uuid'),) 523 524 def __str__(self): 525 return "{0}-{1}".format(self.app.id, self.sha[:7]) 526 527 528 @python_2_unicode_compatible 529 class Build(UuidAuditedModel): 530 """ 531 Instance of a software build used by runtime nodes 532 """ 533 534 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 535 app = models.ForeignKey('App') 536 image = models.CharField(max_length=256) 537 538 # optional fields populated by builder 539 sha = models.CharField(max_length=40, blank=True) 540 procfile = JSONField(default={}, blank=True) 541 dockerfile = models.TextField(blank=True) 542 543 class Meta: 544 get_latest_by = 'created' 545 ordering = ['-created'] 546 unique_together = (('app', 'uuid'),) 547 548 def create(self, user, *args, **kwargs): 549 latest_release = self.app.release_set.latest() 550 source_version = 'latest' 551 if self.sha: 552 source_version = 'git-{}'.format(self.sha) 553 new_release = latest_release.new(user, 554 build=self, 555 config=latest_release.config, 556 source_version=source_version) 557 initial = True if self.app.structure == {} else False 558 try: 559 self.app.deploy(user, new_release, initial=initial) 560 return new_release 561 except RuntimeError: 562 new_release.delete() 563 raise 564 565 def __str__(self): 566 return "{0}-{1}".format(self.app.id, self.uuid[:7]) 567 568 569 @python_2_unicode_compatible 570 class Config(UuidAuditedModel): 571 """ 572 Set of configuration values applied as environment variables 573 during runtime execution of the Application. 574 """ 575 576 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 577 app = models.ForeignKey('App') 578 values = JSONField(default={}, blank=True) 579 memory = JSONField(default={}, blank=True) 580 cpu = JSONField(default={}, blank=True) 581 tags = JSONField(default={}, blank=True) 582 583 class Meta: 584 get_latest_by = 'created' 585 ordering = ['-created'] 586 unique_together = (('app', 'uuid'),) 587 588 def __str__(self): 589 return "{}-{}".format(self.app.id, self.uuid[:7]) 590 591 592 @python_2_unicode_compatible 593 class Release(UuidAuditedModel): 594 """ 595 Software release deployed by the application platform 596 597 Releases contain a :class:`Build` and a :class:`Config`. 598 """ 599 600 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 601 app = models.ForeignKey('App') 602 version = models.PositiveIntegerField() 603 summary = models.TextField(blank=True, null=True) 604 605 config = models.ForeignKey('Config') 606 build = models.ForeignKey('Build', null=True) 607 608 class Meta: 609 get_latest_by = 'created' 610 ordering = ['-created'] 611 unique_together = (('app', 'version'),) 612 613 def __str__(self): 614 return "{0}-v{1}".format(self.app.id, self.version) 615 616 @property 617 def image(self): 618 return '{}:v{}'.format(self.app.id, str(self.version)) 619 620 def new(self, user, config, build, summary=None, source_version='latest'): 621 """ 622 Create a new application release using the provided Build and Config 623 on behalf of a user. 624 625 Releases start at v1 and auto-increment. 626 """ 627 # construct fully-qualified target image 628 new_version = self.version + 1 629 # create new release and auto-increment version 630 release = Release.objects.create( 631 owner=user, app=self.app, config=config, 632 build=build, version=new_version, summary=summary) 633 try: 634 release.publish() 635 except EnvironmentError as e: 636 # If we cannot publish this app, just log and carry on 637 logger.info(e) 638 pass 639 return release 640 641 def publish(self, source_version='latest'): 642 if self.build is None: 643 raise EnvironmentError('No build associated with this release to publish') 644 source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version 645 source_image = '{}:{}'.format(self.build.image, source_tag) 646 # IOW, this image did not come from the builder 647 # FIXME: remove check for mock registry module 648 if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE: 649 # we assume that the image is not present on our registry, 650 # so shell out a task to pull in the repository 651 data = { 652 'src': self.build.image 653 } 654 requests.post( 655 '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL, 656 self.app.id), 657 data=data, 658 ) 659 # update the source image to the repository we just imported 660 source_image = self.app.id 661 # if the image imported had a tag specified, use that tag as the source 662 if ':' in self.build.image: 663 if '/' not in self.build.image[self.build.image.rfind(':') + 1:]: 664 source_image += self.build.image[self.build.image.rfind(':'):] 665 publish_release(source_image, 666 self.config.values, 667 self.image) 668 669 def previous(self): 670 """ 671 Return the previous Release to this one. 672 673 :return: the previous :class:`Release`, or None 674 """ 675 releases = self.app.release_set 676 if self.pk: 677 releases = releases.exclude(pk=self.pk) 678 try: 679 # Get the Release previous to this one 680 prev_release = releases.latest() 681 except Release.DoesNotExist: 682 prev_release = None 683 return prev_release 684 685 def rollback(self, user, version): 686 if version < 1: 687 raise EnvironmentError('version cannot be below 0') 688 summary = "{} rolled back to v{}".format(user, version) 689 prev = self.app.release_set.get(version=version) 690 new_release = self.new( 691 user, 692 build=prev.build, 693 config=prev.config, 694 summary=summary, 695 source_version='v{}'.format(version)) 696 try: 697 self.app.deploy(user, new_release) 698 return new_release 699 except RuntimeError: 700 new_release.delete() 701 raise 702 703 def save(self, *args, **kwargs): # noqa 704 if not self.summary: 705 self.summary = '' 706 prev_release = self.previous() 707 # compare this build to the previous build 708 old_build = prev_release.build if prev_release else None 709 old_config = prev_release.config if prev_release else None 710 # if the build changed, log it and who pushed it 711 if self.version == 1: 712 self.summary += "{} created initial release".format(self.app.owner) 713 elif self.build != old_build: 714 if self.build.sha: 715 self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7]) 716 else: 717 self.summary += "{} deployed {}".format(self.build.owner, self.build.image) 718 # if the config data changed, log the dict diff 719 if self.config != old_config: 720 dict1 = self.config.values 721 dict2 = old_config.values if old_config else {} 722 diff = dict_diff(dict1, dict2) 723 # try to be as succinct as possible 724 added = ', '.join(k for k in diff.get('added', {})) 725 added = 'added ' + added if added else '' 726 changed = ', '.join(k for k in diff.get('changed', {})) 727 changed = 'changed ' + changed if changed else '' 728 deleted = ', '.join(k for k in diff.get('deleted', {})) 729 deleted = 'deleted ' + deleted if deleted else '' 730 changes = ', '.join(i for i in (added, changed, deleted) if i) 731 if changes: 732 if self.summary: 733 self.summary += ' and ' 734 self.summary += "{} {}".format(self.config.owner, changes) 735 # if the limits changed (memory or cpu), log the dict diff 736 changes = [] 737 old_mem = old_config.memory if old_config else {} 738 diff = dict_diff(self.config.memory, old_mem) 739 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 740 changes.append('memory') 741 old_cpu = old_config.cpu if old_config else {} 742 diff = dict_diff(self.config.cpu, old_cpu) 743 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 744 changes.append('cpu') 745 if changes: 746 changes = 'changed limits for '+', '.join(changes) 747 self.summary += "{} {}".format(self.config.owner, changes) 748 # if the tags changed, log the dict diff 749 changes = [] 750 old_tags = old_config.tags if old_config else {} 751 diff = dict_diff(self.config.tags, old_tags) 752 # try to be as succinct as possible 753 added = ', '.join(k for k in diff.get('added', {})) 754 added = 'added tag ' + added if added else '' 755 changed = ', '.join(k for k in diff.get('changed', {})) 756 changed = 'changed tag ' + changed if changed else '' 757 deleted = ', '.join(k for k in diff.get('deleted', {})) 758 deleted = 'deleted tag ' + deleted if deleted else '' 759 changes = ', '.join(i for i in (added, changed, deleted) if i) 760 if changes: 761 if self.summary: 762 self.summary += ' and ' 763 self.summary += "{} {}".format(self.config.owner, changes) 764 if not self.summary: 765 if self.version == 1: 766 self.summary = "{} created the initial release".format(self.owner) 767 else: 768 self.summary = "{} changed nothing".format(self.owner) 769 super(Release, self).save(*args, **kwargs) 770 771 772 @python_2_unicode_compatible 773 class Domain(AuditedModel): 774 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 775 app = models.ForeignKey('App') 776 domain = models.TextField(blank=False, null=False, unique=True) 777 778 def __str__(self): 779 return self.domain 780 781 782 @python_2_unicode_compatible 783 class Key(UuidAuditedModel): 784 """An SSH public key.""" 785 786 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 787 id = models.CharField(max_length=128) 788 public = models.TextField(unique=True) 789 790 class Meta: 791 verbose_name = 'SSH Key' 792 unique_together = (('owner', 'id')) 793 794 def __str__(self): 795 return "{}...{}".format(self.public[:18], self.public[-31:]) 796 797 798 # define update/delete callbacks for synchronizing 799 # models with the configuration management backend 800 801 def _log_build_created(**kwargs): 802 if kwargs.get('created'): 803 build = kwargs['instance'] 804 log_event(build.app, "build {} created".format(build)) 805 806 807 def _log_release_created(**kwargs): 808 if kwargs.get('created'): 809 release = kwargs['instance'] 810 log_event(release.app, "release {} created".format(release)) 811 # append release lifecycle logs to the app 812 release.app.log(release.summary) 813 814 815 def _log_config_updated(**kwargs): 816 config = kwargs['instance'] 817 log_event(config.app, "config {} updated".format(config)) 818 819 820 def _log_domain_added(**kwargs): 821 domain = kwargs['instance'] 822 msg = "domain {} added".format(domain) 823 log_event(domain.app, msg) 824 # adding a domain does not create a release, so we have to log here 825 domain.app.log(msg) 826 827 828 def _log_domain_removed(**kwargs): 829 domain = kwargs['instance'] 830 msg = "domain {} removed".format(domain) 831 log_event(domain.app, msg) 832 # adding a domain does not create a release, so we have to log here 833 domain.app.log(msg) 834 835 836 def _etcd_publish_key(**kwargs): 837 key = kwargs['instance'] 838 _etcd_client.write('/deis/builder/users/{}/{}'.format( 839 key.owner.username, fingerprint(key.public)), key.public) 840 841 842 def _etcd_purge_key(**kwargs): 843 key = kwargs['instance'] 844 _etcd_client.delete('/deis/builder/users/{}/{}'.format( 845 key.owner.username, fingerprint(key.public))) 846 847 848 def _etcd_purge_user(**kwargs): 849 username = kwargs['instance'].username 850 try: 851 _etcd_client.delete( 852 '/deis/builder/users/{}'.format(username), dir=True, recursive=True) 853 except KeyError: 854 # If _etcd_publish_key() wasn't called, there is no user dir to delete. 855 pass 856 857 858 def _etcd_create_app(**kwargs): 859 appname = kwargs['instance'] 860 if kwargs['created']: 861 _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True) 862 863 864 def _etcd_purge_app(**kwargs): 865 appname = kwargs['instance'] 866 _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True) 867 868 869 def _etcd_publish_domains(**kwargs): 870 app = kwargs['instance'].app 871 app_domains = app.domain_set.all() 872 if app_domains: 873 _etcd_client.write('/deis/domains/{}'.format(app), 874 ' '.join(str(d.domain) for d in app_domains)) 875 876 877 def _etcd_purge_domains(**kwargs): 878 app = kwargs['instance'].app 879 app_domains = app.domain_set.all() 880 if app_domains: 881 _etcd_client.write('/deis/domains/{}'.format(app), 882 ' '.join(str(d.domain) for d in app_domains)) 883 else: 884 _etcd_client.delete('/deis/domains/{}'.format(app)) 885 886 887 # Log significant app-related events 888 post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log') 889 post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log') 890 post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log') 891 post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log') 892 post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log') 893 894 895 # automatically generate a new token on creation 896 @receiver(post_save, sender=get_user_model()) 897 def create_auth_token(sender, instance=None, created=False, **kwargs): 898 if created: 899 Token.objects.create(user=instance) 900 901 902 # save FSM transitions as they happen 903 def _save_transition(**kwargs): 904 kwargs['instance'].save() 905 # close database connections after transition 906 # to avoid leaking connections inside threads 907 from django.db import connection 908 connection.close() 909 910 post_transition.connect(_save_transition) 911 912 # wire up etcd publishing if we can connect 913 try: 914 _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT)) 915 _etcd_client.get('/deis') 916 except etcd.EtcdException: 917 logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster') 918 _etcd_client = None 919 920 if _etcd_client: 921 post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models') 922 post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models') 923 post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models') 924 post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models') 925 post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models') 926 post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models') 927 post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')