github.com/spg/deis@v1.7.3/controller/api/models.py (about) 1 # -*- coding: utf-8 -*- 2 3 """ 4 Data models for the Deis API. 5 """ 6 7 from __future__ import unicode_literals 8 import base64 9 from datetime import datetime 10 import etcd 11 import importlib 12 import logging 13 import os 14 import re 15 import subprocess 16 import time 17 from threading import Thread 18 19 from django.conf import settings 20 from django.contrib.auth import get_user_model 21 from django.core.exceptions import ValidationError, SuspiciousOperation 22 from django.db import models 23 from django.db.models import Count 24 from django.db.models import Max 25 from django.db.models.signals import post_delete, post_save 26 from django.dispatch import receiver 27 from django.utils.encoding import python_2_unicode_compatible 28 from docker.utils import utils as dockerutils 29 from json_field.fields import JSONField 30 from OpenSSL import crypto 31 import requests 32 from rest_framework.authtoken.models import Token 33 34 from api import fields, utils 35 from registry import publish_release 36 from utils import dict_diff, fingerprint 37 38 39 logger = logging.getLogger(__name__) 40 41 42 def close_db_connections(func, *args, **kwargs): 43 """ 44 Decorator to explicitly close db connections during threaded execution 45 46 Note this is necessary to work around: 47 https://code.djangoproject.com/ticket/22420 48 """ 49 def _close_db_connections(*args, **kwargs): 50 ret = None 51 try: 52 ret = func(*args, **kwargs) 53 finally: 54 from django.db import connections 55 for conn in connections.all(): 56 conn.close() 57 return ret 58 return _close_db_connections 59 60 61 def log_event(app, msg, level=logging.INFO): 62 # controller needs to know which app this log comes from 63 logger.log(level, "{}: {}".format(app.id, msg)) 64 app.log(msg) 65 66 67 def validate_base64(value): 68 """Check that value contains only valid base64 characters.""" 69 try: 70 base64.b64decode(value.split()[1]) 71 except Exception as e: 72 raise ValidationError(e) 73 74 75 def validate_id_is_docker_compatible(value): 76 """ 77 Check that the ID follows docker's image name constraints 78 """ 79 match = re.match(r'^[a-z0-9-]+$', value) 80 if not match: 81 raise ValidationError("App IDs can only contain [a-z0-9-].") 82 83 84 def validate_app_structure(value): 85 """Error if the dict values aren't ints >= 0.""" 86 try: 87 if any(int(v) < 0 for v in value.viewvalues()): 88 raise ValueError("Must be greater than or equal to zero") 89 except ValueError, err: 90 raise ValidationError(err) 91 92 93 def validate_reserved_names(value): 94 """A value cannot use some reserved names.""" 95 if value in settings.DEIS_RESERVED_NAMES: 96 raise ValidationError('{} is a reserved name.'.format(value)) 97 98 99 def validate_comma_separated(value): 100 """Error if the value doesn't look like a list of hostnames or IP addresses 101 separated by commas. 102 """ 103 if not re.search(r'^[a-zA-Z0-9-,\.]+$', value): 104 raise ValidationError( 105 "{} should be a comma-separated list".format(value)) 106 107 108 def validate_domain(value): 109 """Error if the domain contains unexpected characters.""" 110 if not re.search(r'^[a-zA-Z0-9-\.]+$', value): 111 raise ValidationError('"{}" contains unexpected characters'.format(value)) 112 113 114 def validate_certificate(value): 115 try: 116 crypto.load_certificate(crypto.FILETYPE_PEM, value) 117 except crypto.Error as e: 118 raise ValidationError('Could not load certificate: {}'.format(e)) 119 120 121 class AuditedModel(models.Model): 122 """Add created and updated fields to a model.""" 123 124 created = models.DateTimeField(auto_now_add=True) 125 updated = models.DateTimeField(auto_now=True) 126 127 class Meta: 128 """Mark :class:`AuditedModel` as abstract.""" 129 abstract = True 130 131 132 def select_app_name(): 133 """Select a unique randomly generated app name""" 134 name = utils.generate_app_name() 135 136 while App.objects.filter(id=name).exists(): 137 name = utils.generate_app_name() 138 139 return name 140 141 142 class UuidAuditedModel(AuditedModel): 143 """Add a UUID primary key to an :class:`AuditedModel`.""" 144 145 uuid = fields.UuidField('UUID', primary_key=True) 146 147 class Meta: 148 """Mark :class:`UuidAuditedModel` as abstract.""" 149 abstract = True 150 151 152 @python_2_unicode_compatible 153 class App(UuidAuditedModel): 154 """ 155 Application used to service requests on behalf of end-users 156 """ 157 158 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 159 id = models.SlugField(max_length=64, unique=True, default=select_app_name, 160 validators=[validate_id_is_docker_compatible, 161 validate_reserved_names]) 162 structure = JSONField(default={}, blank=True, validators=[validate_app_structure]) 163 164 class Meta: 165 permissions = (('use_app', 'Can use app'),) 166 167 @property 168 def _scheduler(self): 169 mod = importlib.import_module(settings.SCHEDULER_MODULE) 170 return mod.SchedulerClient(settings.SCHEDULER_TARGET, 171 settings.SCHEDULER_AUTH, 172 settings.SCHEDULER_OPTIONS, 173 settings.SSH_PRIVATE_KEY) 174 175 def __str__(self): 176 return self.id 177 178 @property 179 def url(self): 180 return self.id + '.' + settings.DEIS_DOMAIN 181 182 def log(self, message): 183 """Logs a message to the application's log file. 184 185 This is a workaround for how Django interacts with Python's logging module. Each app 186 needs its own FileHandler instance so it can write to its own log file. That won't work in 187 Django's case because logging is set up before you run the server and it disables all 188 existing logging configurations. 189 """ 190 with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f: 191 msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT), 192 message) 193 f.write(msg.encode('utf-8')) 194 195 def create(self, *args, **kwargs): 196 """Create a new application with an initial config and release""" 197 config = Config.objects.create(owner=self.owner, app=self) 198 Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None) 199 200 def delete(self, *args, **kwargs): 201 """Delete this application including all containers""" 202 try: 203 # attempt to remove containers from the scheduler 204 self._destroy_containers([c for c in self.container_set.exclude(type='run')]) 205 except RuntimeError: 206 pass 207 self._clean_app_logs() 208 return super(App, self).delete(*args, **kwargs) 209 210 def restart(self, **kwargs): 211 to_restart = self.container_set.all() 212 if kwargs.get('type'): 213 to_restart = to_restart.filter(type=kwargs.get('type')) 214 if kwargs.get('num'): 215 to_restart = to_restart.filter(num=kwargs.get('num')) 216 self._restart_containers(to_restart) 217 return to_restart 218 219 def _clean_app_logs(self): 220 """Delete application logs stored by the logger component""" 221 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 222 if os.path.exists(path): 223 os.remove(path) 224 225 def scale(self, user, structure): # noqa 226 """Scale containers up or down to match requested structure.""" 227 if self.release_set.latest().build is None: 228 raise EnvironmentError('No build associated with this release') 229 requested_structure = structure.copy() 230 release = self.release_set.latest() 231 # test for available process types 232 available_process_types = release.build.procfile or {} 233 for container_type in requested_structure: 234 if container_type == 'cmd': 235 continue # allow docker cmd types in case we don't have the image source 236 if container_type not in available_process_types: 237 raise EnvironmentError( 238 'Container type {} does not exist in application'.format(container_type)) 239 msg = '{} scaled containers '.format(user.username) + ' '.join( 240 "{}={}".format(k, v) for k, v in requested_structure.items()) 241 log_event(self, msg) 242 # iterate and scale by container type (web, worker, etc) 243 changed = False 244 to_add, to_remove = [], [] 245 # iterate on a copy of the container_type keys 246 for container_type in requested_structure.keys(): 247 containers = list(self.container_set.filter(type=container_type).order_by('created')) 248 # increment new container nums off the most recent container 249 results = self.container_set.filter(type=container_type).aggregate(Max('num')) 250 container_num = (results.get('num__max') or 0) + 1 251 requested = requested_structure.pop(container_type) 252 diff = requested - len(containers) 253 if diff == 0: 254 continue 255 changed = True 256 while diff < 0: 257 c = containers.pop() 258 to_remove.append(c) 259 diff += 1 260 while diff > 0: 261 # create a database record 262 c = Container.objects.create(owner=self.owner, 263 app=self, 264 release=release, 265 type=container_type, 266 num=container_num) 267 to_add.append(c) 268 container_num += 1 269 diff -= 1 270 if changed: 271 if to_add: 272 self._start_containers(to_add) 273 if to_remove: 274 self._destroy_containers(to_remove) 275 # save new structure to the database 276 vals = self.container_set.exclude(type='run').values( 277 'type').annotate(Count('pk')).order_by() 278 new_structure = structure.copy() 279 new_structure.update({v['type']: v['pk__count'] for v in vals}) 280 self.structure = new_structure 281 self.save() 282 return changed 283 284 def _start_containers(self, to_add): 285 """Creates and starts containers via the scheduler""" 286 if not to_add: 287 return 288 create_threads = [Thread(target=c.create) for c in to_add] 289 start_threads = [Thread(target=c.start) for c in to_add] 290 [t.start() for t in create_threads] 291 [t.join() for t in create_threads] 292 if any(c.state != 'created' for c in to_add): 293 err = 'aborting, failed to create some containers' 294 log_event(self, err, logging.ERROR) 295 raise RuntimeError(err) 296 [t.start() for t in start_threads] 297 [t.join() for t in start_threads] 298 if set([c.state for c in to_add]) != set(['up']): 299 err = 'warning, some containers failed to start' 300 log_event(self, err, logging.WARNING) 301 302 def _restart_containers(self, to_restart): 303 """Restarts containers via the scheduler""" 304 if not to_restart: 305 return 306 stop_threads = [Thread(target=c.stop) for c in to_restart] 307 start_threads = [Thread(target=c.start) for c in to_restart] 308 [t.start() for t in stop_threads] 309 [t.join() for t in stop_threads] 310 if any(c.state != 'created' for c in to_restart): 311 err = 'warning, some containers failed to stop' 312 log_event(self, err, logging.WARNING) 313 [t.start() for t in start_threads] 314 [t.join() for t in start_threads] 315 if any(c.state != 'up' for c in to_restart): 316 err = 'warning, some containers failed to start' 317 log_event(self, err, logging.WARNING) 318 319 def _destroy_containers(self, to_destroy): 320 """Destroys containers via the scheduler""" 321 if not to_destroy: 322 return 323 destroy_threads = [Thread(target=c.destroy) for c in to_destroy] 324 [t.start() for t in destroy_threads] 325 [t.join() for t in destroy_threads] 326 [c.delete() for c in to_destroy if c.state == 'destroyed'] 327 if any(c.state != 'destroyed' for c in to_destroy): 328 err = 'aborting, failed to destroy some containers' 329 log_event(self, err, logging.ERROR) 330 raise RuntimeError(err) 331 332 def deploy(self, user, release): 333 """Deploy a new release to this application""" 334 existing = self.container_set.exclude(type='run') 335 new = [] 336 for e in existing: 337 n = e.clone(release) 338 n.save() 339 new.append(n) 340 341 self._start_containers(new) 342 343 # destroy old containers 344 if existing: 345 self._destroy_containers(existing) 346 347 # perform default scaling if necessary 348 if self.structure == {} and release.build is not None: 349 self._default_scale(user, release) 350 351 def _default_scale(self, user, release): 352 """Scale to default structure based on release type""" 353 # if there is no SHA, assume a docker image is being promoted 354 if not release.build.sha: 355 structure = {'cmd': 1} 356 357 # if a dockerfile exists without a procfile, assume docker workflow 358 elif release.build.dockerfile and not release.build.procfile: 359 structure = {'cmd': 1} 360 361 # if a procfile exists without a web entry, assume docker workflow 362 elif release.build.procfile and 'web' not in release.build.procfile: 363 structure = {'cmd': 1} 364 365 # default to heroku workflow 366 else: 367 structure = {'web': 1} 368 369 self.scale(user, structure) 370 371 def logs(self, log_lines): 372 """Return aggregated log data for this application.""" 373 path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log') 374 if not os.path.exists(path): 375 raise EnvironmentError('Could not locate logs') 376 data = subprocess.check_output(['tail', '-n', log_lines, path]) 377 return data 378 379 def run(self, user, command): 380 """Run a one-off command in an ephemeral app container.""" 381 # FIXME: remove the need for SSH private keys by using 382 # a scheduler that supports one-off admin tasks natively 383 if not settings.SSH_PRIVATE_KEY: 384 raise EnvironmentError('Support for admin commands is not configured') 385 if self.release_set.latest().build is None: 386 raise EnvironmentError('No build associated with this release to run this command') 387 # TODO: add support for interactive shell 388 msg = "{} runs '{}'".format(user.username, command) 389 log_event(self, msg) 390 c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1 391 392 # create database record for run process 393 c = Container.objects.create(owner=self.owner, 394 app=self, 395 release=self.release_set.latest(), 396 type='run', 397 num=c_num) 398 image = c.release.image 399 400 # check for backwards compatibility 401 def _has_hostname(image): 402 repo, tag = dockerutils.parse_repository_tag(image) 403 return True if '/' in repo and '.' in repo.split('/')[0] else False 404 405 if not _has_hostname(image): 406 image = '{}:{}/{}'.format(settings.REGISTRY_HOST, 407 settings.REGISTRY_PORT, 408 image) 409 # SECURITY: shell-escape user input 410 escaped_command = command.replace("'", "'\\''") 411 return c.run(escaped_command) 412 413 414 @python_2_unicode_compatible 415 class Container(UuidAuditedModel): 416 """ 417 Docker container used to securely host an application process. 418 """ 419 420 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 421 app = models.ForeignKey('App') 422 release = models.ForeignKey('Release') 423 type = models.CharField(max_length=128, blank=False) 424 num = models.PositiveIntegerField() 425 426 @property 427 def _scheduler(self): 428 return self.app._scheduler 429 430 @property 431 def state(self): 432 return self._scheduler.state(self._job_id).name 433 434 def short_name(self): 435 return "{}.{}.{}".format(self.app.id, self.type, self.num) 436 short_name.short_description = 'Name' 437 438 def __str__(self): 439 return self.short_name() 440 441 class Meta: 442 get_latest_by = '-created' 443 ordering = ['created'] 444 445 def _get_job_id(self): 446 app = self.app.id 447 release = self.release 448 version = "v{}".format(release.version) 449 num = self.num 450 job_id = "{app}_{version}.{self.type}.{num}".format(**locals()) 451 return job_id 452 453 _job_id = property(_get_job_id) 454 455 def _get_command(self): 456 try: 457 # if this is not procfile-based app, ensure they cannot break out 458 # and run arbitrary commands on the host 459 # FIXME: remove slugrunner's hardcoded entrypoint 460 if self.release.build.dockerfile or not self.release.build.sha: 461 return "bash -c '{}'".format(self.release.build.procfile[self.type]) 462 else: 463 return 'start {}'.format(self.type) 464 # if the key is not present or if a parent attribute is None 465 except (KeyError, TypeError, AttributeError): 466 # handle special case for Dockerfile deployments 467 return '' if self.type == 'cmd' else 'start {}'.format(self.type) 468 469 _command = property(_get_command) 470 471 def clone(self, release): 472 c = Container.objects.create(owner=self.owner, 473 app=self.app, 474 release=release, 475 type=self.type, 476 num=self.num) 477 return c 478 479 @close_db_connections 480 def create(self): 481 image = self.release.image 482 kwargs = {'memory': self.release.config.memory, 483 'cpu': self.release.config.cpu, 484 'tags': self.release.config.tags} 485 job_id = self._job_id 486 try: 487 self._scheduler.create( 488 name=job_id, 489 image=image, 490 command=self._command, 491 **kwargs) 492 except Exception as e: 493 err = '{} (create): {}'.format(job_id, e) 494 log_event(self.app, err, logging.ERROR) 495 raise 496 497 @close_db_connections 498 def start(self): 499 job_id = self._job_id 500 try: 501 self._scheduler.start(job_id) 502 except Exception as e: 503 err = '{} (start): {}'.format(job_id, e) 504 log_event(self.app, err, logging.WARNING) 505 raise 506 507 @close_db_connections 508 def stop(self): 509 job_id = self._job_id 510 try: 511 self._scheduler.stop(job_id) 512 except Exception as e: 513 err = '{} (stop): {}'.format(job_id, e) 514 log_event(self.app, err, logging.ERROR) 515 raise 516 517 @close_db_connections 518 def destroy(self): 519 job_id = self._job_id 520 try: 521 self._scheduler.destroy(job_id) 522 except Exception as e: 523 err = '{} (destroy): {}'.format(job_id, e) 524 log_event(self.app, err, logging.ERROR) 525 raise 526 527 def run(self, command): 528 """Run a one-off command""" 529 if self.release.build is None: 530 raise EnvironmentError('No build associated with this release ' 531 'to run this command') 532 image = self.release.image 533 job_id = self._job_id 534 entrypoint = '/bin/bash' 535 # if this is a procfile-based app, switch the entrypoint to slugrunner's default 536 # FIXME: remove slugrunner's hardcoded entrypoint 537 if self.release.build.procfile and \ 538 self.release.build.sha and not \ 539 self.release.build.dockerfile: 540 entrypoint = '/runner/init' 541 command = "'{}'".format(command) 542 else: 543 command = "-c '{}'".format(command) 544 try: 545 rc, output = self._scheduler.run(job_id, image, entrypoint, command) 546 return rc, output 547 except Exception as e: 548 err = '{} (run): {}'.format(job_id, e) 549 log_event(self.app, err, logging.ERROR) 550 raise 551 552 553 @python_2_unicode_compatible 554 class Push(UuidAuditedModel): 555 """ 556 Instance of a push used to trigger an application build 557 """ 558 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 559 app = models.ForeignKey('App') 560 sha = models.CharField(max_length=40) 561 562 fingerprint = models.CharField(max_length=255) 563 receive_user = models.CharField(max_length=255) 564 receive_repo = models.CharField(max_length=255) 565 566 ssh_connection = models.CharField(max_length=255) 567 ssh_original_command = models.CharField(max_length=255) 568 569 class Meta: 570 get_latest_by = 'created' 571 ordering = ['-created'] 572 unique_together = (('app', 'uuid'),) 573 574 def __str__(self): 575 return "{0}-{1}".format(self.app.id, self.sha[:7]) 576 577 578 @python_2_unicode_compatible 579 class Build(UuidAuditedModel): 580 """ 581 Instance of a software build used by runtime nodes 582 """ 583 584 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 585 app = models.ForeignKey('App') 586 image = models.CharField(max_length=256) 587 588 # optional fields populated by builder 589 sha = models.CharField(max_length=40, blank=True) 590 procfile = JSONField(default={}, blank=True) 591 dockerfile = models.TextField(blank=True) 592 593 class Meta: 594 get_latest_by = 'created' 595 ordering = ['-created'] 596 unique_together = (('app', 'uuid'),) 597 598 def create(self, user, *args, **kwargs): 599 latest_release = self.app.release_set.latest() 600 source_version = 'latest' 601 if self.sha: 602 source_version = 'git-{}'.format(self.sha) 603 new_release = latest_release.new(user, 604 build=self, 605 config=latest_release.config, 606 source_version=source_version) 607 try: 608 self.app.deploy(user, new_release) 609 return new_release 610 except RuntimeError: 611 new_release.delete() 612 raise 613 614 def save(self, **kwargs): 615 try: 616 previous_build = self.app.build_set.latest() 617 to_destroy = [] 618 for proctype in previous_build.procfile: 619 if proctype not in self.procfile: 620 for c in self.app.container_set.filter(type=proctype): 621 to_destroy.append(c) 622 self.app._destroy_containers(to_destroy) 623 except Build.DoesNotExist: 624 pass 625 return super(Build, self).save(**kwargs) 626 627 def __str__(self): 628 return "{0}-{1}".format(self.app.id, self.uuid[:7]) 629 630 631 @python_2_unicode_compatible 632 class Config(UuidAuditedModel): 633 """ 634 Set of configuration values applied as environment variables 635 during runtime execution of the Application. 636 """ 637 638 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 639 app = models.ForeignKey('App') 640 values = JSONField(default={}, blank=True) 641 memory = JSONField(default={}, blank=True) 642 cpu = JSONField(default={}, blank=True) 643 tags = JSONField(default={}, blank=True) 644 645 class Meta: 646 get_latest_by = 'created' 647 ordering = ['-created'] 648 unique_together = (('app', 'uuid'),) 649 650 def __str__(self): 651 return "{}-{}".format(self.app.id, self.uuid[:7]) 652 653 def save(self, **kwargs): 654 """merge the old config with the new""" 655 try: 656 previous_config = self.app.config_set.latest() 657 for attr in ['cpu', 'memory', 'tags', 'values']: 658 # Guard against migrations from older apps without fixes to 659 # JSONField encoding. 660 try: 661 data = getattr(previous_config, attr).copy() 662 except AttributeError: 663 data = {} 664 try: 665 new_data = getattr(self, attr).copy() 666 except AttributeError: 667 new_data = {} 668 data.update(new_data) 669 # remove config keys if we provided a null value 670 [data.pop(k) for k, v in new_data.viewitems() if v is None] 671 setattr(self, attr, data) 672 except Config.DoesNotExist: 673 pass 674 return super(Config, self).save(**kwargs) 675 676 677 @python_2_unicode_compatible 678 class Release(UuidAuditedModel): 679 """ 680 Software release deployed by the application platform 681 682 Releases contain a :class:`Build` and a :class:`Config`. 683 """ 684 685 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 686 app = models.ForeignKey('App') 687 version = models.PositiveIntegerField() 688 summary = models.TextField(blank=True, null=True) 689 690 config = models.ForeignKey('Config') 691 build = models.ForeignKey('Build', null=True) 692 693 class Meta: 694 get_latest_by = 'created' 695 ordering = ['-created'] 696 unique_together = (('app', 'version'),) 697 698 def __str__(self): 699 return "{0}-v{1}".format(self.app.id, self.version) 700 701 @property 702 def image(self): 703 return '{}:v{}'.format(self.app.id, str(self.version)) 704 705 def new(self, user, config, build, summary=None, source_version='latest'): 706 """ 707 Create a new application release using the provided Build and Config 708 on behalf of a user. 709 710 Releases start at v1 and auto-increment. 711 """ 712 # construct fully-qualified target image 713 new_version = self.version + 1 714 # create new release and auto-increment version 715 release = Release.objects.create( 716 owner=user, app=self.app, config=config, 717 build=build, version=new_version, summary=summary) 718 try: 719 release.publish() 720 except EnvironmentError as e: 721 # If we cannot publish this app, just log and carry on 722 log_event(self.app, e) 723 pass 724 return release 725 726 def publish(self, source_version='latest'): 727 if self.build is None: 728 raise EnvironmentError('No build associated with this release to publish') 729 source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version 730 source_image = '{}:{}'.format(self.build.image, source_tag) 731 # IOW, this image did not come from the builder 732 # FIXME: remove check for mock registry module 733 if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE: 734 # we assume that the image is not present on our registry, 735 # so shell out a task to pull in the repository 736 data = { 737 'src': self.build.image 738 } 739 requests.post( 740 '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL, 741 self.app.id), 742 data=data, 743 ) 744 # update the source image to the repository we just imported 745 source_image = self.app.id 746 # if the image imported had a tag specified, use that tag as the source 747 if ':' in self.build.image: 748 if '/' not in self.build.image[self.build.image.rfind(':') + 1:]: 749 source_image += self.build.image[self.build.image.rfind(':'):] 750 publish_release(source_image, 751 self.config.values, 752 self.image) 753 754 def previous(self): 755 """ 756 Return the previous Release to this one. 757 758 :return: the previous :class:`Release`, or None 759 """ 760 releases = self.app.release_set 761 if self.pk: 762 releases = releases.exclude(pk=self.pk) 763 try: 764 # Get the Release previous to this one 765 prev_release = releases.latest() 766 except Release.DoesNotExist: 767 prev_release = None 768 return prev_release 769 770 def rollback(self, user, version): 771 if version < 1: 772 raise EnvironmentError('version cannot be below 0') 773 summary = "{} rolled back to v{}".format(user, version) 774 prev = self.app.release_set.get(version=version) 775 new_release = self.new( 776 user, 777 build=prev.build, 778 config=prev.config, 779 summary=summary, 780 source_version='v{}'.format(version)) 781 try: 782 self.app.deploy(user, new_release) 783 return new_release 784 except RuntimeError: 785 new_release.delete() 786 raise 787 788 def save(self, *args, **kwargs): # noqa 789 if not self.summary: 790 self.summary = '' 791 prev_release = self.previous() 792 # compare this build to the previous build 793 old_build = prev_release.build if prev_release else None 794 old_config = prev_release.config if prev_release else None 795 # if the build changed, log it and who pushed it 796 if self.version == 1: 797 self.summary += "{} created initial release".format(self.app.owner) 798 elif self.build != old_build: 799 if self.build.sha: 800 self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7]) 801 else: 802 self.summary += "{} deployed {}".format(self.build.owner, self.build.image) 803 # if the config data changed, log the dict diff 804 if self.config != old_config: 805 dict1 = self.config.values 806 dict2 = old_config.values if old_config else {} 807 diff = dict_diff(dict1, dict2) 808 # try to be as succinct as possible 809 added = ', '.join(k for k in diff.get('added', {})) 810 added = 'added ' + added if added else '' 811 changed = ', '.join(k for k in diff.get('changed', {})) 812 changed = 'changed ' + changed if changed else '' 813 deleted = ', '.join(k for k in diff.get('deleted', {})) 814 deleted = 'deleted ' + deleted if deleted else '' 815 changes = ', '.join(i for i in (added, changed, deleted) if i) 816 if changes: 817 if self.summary: 818 self.summary += ' and ' 819 self.summary += "{} {}".format(self.config.owner, changes) 820 # if the limits changed (memory or cpu), log the dict diff 821 changes = [] 822 old_mem = old_config.memory if old_config else {} 823 diff = dict_diff(self.config.memory, old_mem) 824 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 825 changes.append('memory') 826 old_cpu = old_config.cpu if old_config else {} 827 diff = dict_diff(self.config.cpu, old_cpu) 828 if diff.get('added') or diff.get('changed') or diff.get('deleted'): 829 changes.append('cpu') 830 if changes: 831 changes = 'changed limits for '+', '.join(changes) 832 self.summary += "{} {}".format(self.config.owner, changes) 833 # if the tags changed, log the dict diff 834 changes = [] 835 old_tags = old_config.tags if old_config else {} 836 diff = dict_diff(self.config.tags, old_tags) 837 # try to be as succinct as possible 838 added = ', '.join(k for k in diff.get('added', {})) 839 added = 'added tag ' + added if added else '' 840 changed = ', '.join(k for k in diff.get('changed', {})) 841 changed = 'changed tag ' + changed if changed else '' 842 deleted = ', '.join(k for k in diff.get('deleted', {})) 843 deleted = 'deleted tag ' + deleted if deleted else '' 844 changes = ', '.join(i for i in (added, changed, deleted) if i) 845 if changes: 846 if self.summary: 847 self.summary += ' and ' 848 self.summary += "{} {}".format(self.config.owner, changes) 849 if not self.summary: 850 if self.version == 1: 851 self.summary = "{} created the initial release".format(self.owner) 852 else: 853 self.summary = "{} changed nothing".format(self.owner) 854 super(Release, self).save(*args, **kwargs) 855 856 857 @python_2_unicode_compatible 858 class Domain(AuditedModel): 859 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 860 app = models.ForeignKey('App') 861 domain = models.TextField(blank=False, null=False, unique=True) 862 863 def __str__(self): 864 return self.domain 865 866 867 @python_2_unicode_compatible 868 class Certificate(AuditedModel): 869 """ 870 Public and private key pair used to secure application traffic at the router. 871 """ 872 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 873 # there is no upper limit on the size of an x.509 certificate 874 certificate = models.TextField(validators=[validate_certificate]) 875 key = models.TextField() 876 # X.509 certificates allow any string of information as the common name. 877 common_name = models.TextField(unique=True) 878 expires = models.DateTimeField() 879 880 def __str__(self): 881 return self.common_name 882 883 def _get_certificate(self): 884 try: 885 return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate) 886 except crypto.Error as e: 887 raise SuspiciousOperation(e) 888 889 def save(self, *args, **kwargs): 890 certificate = self._get_certificate() 891 if not self.common_name: 892 self.common_name = certificate.get_subject().CN 893 if not self.expires: 894 # convert openssl's expiry date format to Django's DateTimeField format 895 self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ') 896 return super(Certificate, self).save(*args, **kwargs) 897 898 899 @python_2_unicode_compatible 900 class Key(UuidAuditedModel): 901 """An SSH public key.""" 902 903 owner = models.ForeignKey(settings.AUTH_USER_MODEL) 904 id = models.CharField(max_length=128) 905 public = models.TextField(unique=True, validators=[validate_base64]) 906 fingerprint = models.CharField(max_length=128) 907 908 class Meta: 909 verbose_name = 'SSH Key' 910 unique_together = (('owner', 'fingerprint')) 911 912 def __str__(self): 913 return "{}...{}".format(self.public[:18], self.public[-31:]) 914 915 def save(self, *args, **kwargs): 916 self.fingerprint = fingerprint(self.public) 917 return super(Key, self).save(*args, **kwargs) 918 919 920 # define update/delete callbacks for synchronizing 921 # models with the configuration management backend 922 923 def _log_build_created(**kwargs): 924 if kwargs.get('created'): 925 build = kwargs['instance'] 926 # log only to the controller; this event will be logged in the release summary 927 logger.info("{}: build {} created".format(build.app, build)) 928 929 930 def _log_release_created(**kwargs): 931 if kwargs.get('created'): 932 release = kwargs['instance'] 933 # log only to the controller; this event will be logged in the release summary 934 logger.info("{}: release {} created".format(release.app, release)) 935 # append release lifecycle logs to the app 936 release.app.log(release.summary) 937 938 939 def _log_config_updated(**kwargs): 940 config = kwargs['instance'] 941 # log only to the controller; this event will be logged in the release summary 942 logger.info("{}: config {} updated".format(config.app, config)) 943 944 945 def _log_domain_added(**kwargs): 946 domain = kwargs['instance'] 947 msg = "domain {} added".format(domain) 948 log_event(domain.app, msg) 949 950 951 def _log_domain_removed(**kwargs): 952 domain = kwargs['instance'] 953 msg = "domain {} removed".format(domain) 954 log_event(domain.app, msg) 955 956 957 def _log_cert_added(**kwargs): 958 cert = kwargs['instance'] 959 logger.info("cert {} added".format(cert)) 960 961 962 def _log_cert_removed(**kwargs): 963 cert = kwargs['instance'] 964 logger.info("cert {} removed".format(cert)) 965 966 967 def _etcd_publish_key(**kwargs): 968 key = kwargs['instance'] 969 _etcd_client.write('/deis/builder/users/{}/{}'.format( 970 key.owner.username, fingerprint(key.public)), key.public) 971 972 973 def _etcd_purge_key(**kwargs): 974 key = kwargs['instance'] 975 try: 976 _etcd_client.delete('/deis/builder/users/{}/{}'.format( 977 key.owner.username, fingerprint(key.public))) 978 except KeyError: 979 pass 980 981 982 def _etcd_purge_user(**kwargs): 983 username = kwargs['instance'].username 984 try: 985 _etcd_client.delete( 986 '/deis/builder/users/{}'.format(username), dir=True, recursive=True) 987 except KeyError: 988 # If _etcd_publish_key() wasn't called, there is no user dir to delete. 989 pass 990 991 992 def _etcd_create_app(**kwargs): 993 appname = kwargs['instance'] 994 if kwargs['created']: 995 _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True) 996 997 998 def _etcd_purge_app(**kwargs): 999 appname = kwargs['instance'] 1000 try: 1001 _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True) 1002 except KeyError: 1003 pass 1004 1005 1006 def _etcd_publish_cert(**kwargs): 1007 cert = kwargs['instance'] 1008 if kwargs['created']: 1009 _etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate) 1010 _etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key) 1011 1012 1013 def _etcd_purge_cert(**kwargs): 1014 cert = kwargs['instance'] 1015 try: 1016 _etcd_client.delete('/deis/certs/{}'.format(cert), 1017 prevExist=True, dir=True, recursive=True) 1018 except KeyError: 1019 pass 1020 1021 1022 def _etcd_publish_domains(**kwargs): 1023 domain = kwargs['instance'] 1024 if kwargs['created']: 1025 _etcd_client.write('/deis/domains/{}'.format(domain), domain.app) 1026 1027 1028 def _etcd_purge_domains(**kwargs): 1029 domain = kwargs['instance'] 1030 try: 1031 _etcd_client.delete('/deis/domains/{}'.format(domain), 1032 prevExist=True, dir=True, recursive=True) 1033 except KeyError: 1034 pass 1035 1036 1037 # Log significant app-related events 1038 post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log') 1039 post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log') 1040 post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log') 1041 post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log') 1042 post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log') 1043 post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log') 1044 post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log') 1045 1046 1047 # automatically generate a new token on creation 1048 @receiver(post_save, sender=get_user_model()) 1049 def create_auth_token(sender, instance=None, created=False, **kwargs): 1050 if created: 1051 Token.objects.create(user=instance) 1052 1053 # wire up etcd publishing if we can connect 1054 try: 1055 _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT)) 1056 _etcd_client.get('/deis') 1057 except etcd.EtcdException: 1058 logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster') 1059 _etcd_client = None 1060 1061 if _etcd_client: 1062 post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models') 1063 post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models') 1064 post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models') 1065 post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models') 1066 post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models') 1067 post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models') 1068 post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models') 1069 post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models') 1070 post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')