github.com/datadog/cilium@v1.6.12/Vagrantfile (about)

     1  # -*- mode: ruby -*-
     2  # vi: set ft=ruby :
     3  
     4  # The source of truth for vagrant box versions.
     5  # Sets SERVER_BOX, SERVER_VERSION, NETNEXT_SERVER_BOX and NETNEXT_SERVER_VERSION
     6  # Accepts overrides from env variables
     7  require_relative 'vagrant_box_defaults.rb'
     8  $SERVER_BOX = (ENV['SERVER_BOX'] || $SERVER_BOX)
     9  $SERVER_VERSION= (ENV['SERVER_VERSION'] || $SERVER_VERSION)
    10  $NETNEXT_SERVER_BOX = (ENV['NETNEXT_SERVER_BOX'] || $NETNEXT_SERVER_BOX)
    11  $NETNEXT_SERVER_VERSION= (ENV['NETNEXT_SERVER_VERSION'] || $NETNEXT_SERVER_VERSION)
    12  
    13  if ENV['NETNEXT'] == "true"
    14      $SERVER_BOX = $NETNEXT_SERVER_BOX
    15      $SERVER_VERSION = $NETNEXT_SERVER_VERSION
    16  end
    17  
    18  Vagrant.require_version ">= 2.0.0"
    19  
    20  if ARGV.first == "up" && ENV['CILIUM_SCRIPT'] != 'true'
    21      raise Vagrant::Errors::VagrantError.new, <<END
    22  Calling 'vagrant up' directly is not supported.  Instead, please run the following:
    23    export NWORKERS=n
    24    ./contrib/vagrant/start.sh
    25  END
    26  end
    27  
    28  if ENV['IPV4'] == '0'
    29      raise Vagrant::Errors::VagrantError.new, <<END
    30  Disabling IPv4 is currently not allowed until k8s 1.9 is released
    31  END
    32  end
    33  
    34  $bootstrap = <<SCRIPT
    35  echo "----------------------------------------------------------------"
    36  export PATH=/home/vagrant/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
    37  
    38  echo "editing journald configuration"
    39  sudo bash -c "echo RateLimitIntervalSec=1s >> /etc/systemd/journald.conf"
    40  sudo bash -c "echo RateLimitBurst=10000 >> /etc/systemd/journald.conf"
    41  echo "restarting systemd-journald"
    42  sudo systemctl restart systemd-journald
    43  echo "getting status of systemd-journald"
    44  sudo service systemd-journald status
    45  echo "done configuring journald"
    46  
    47  sudo service docker restart
    48  echo 'cd ~/go/src/github.com/cilium/cilium' >> /home/vagrant/.bashrc
    49  sudo chown -R vagrant:vagrant /home/vagrant 2>/dev/null
    50  curl -SsL https://github.com/cilium/bpf-map/releases/download/v1.0/bpf-map -o bpf-map
    51  chmod +x bpf-map
    52  mv bpf-map /usr/bin
    53  SCRIPT
    54  
    55  $build = <<SCRIPT
    56  export PATH=/home/vagrant/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
    57  ~/go/src/github.com/cilium/cilium/common/build.sh
    58  rm -fr ~/go/bin/cilium*
    59  SCRIPT
    60  
    61  $install = <<SCRIPT
    62  sudo -E make -C /home/vagrant/go/src/github.com/cilium/cilium/ install
    63  
    64  sudo mkdir -p /etc/sysconfig
    65  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium-consul.service /lib/systemd/system
    66  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium-docker.service /lib/systemd/system
    67  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium-etcd.service /lib/systemd/system
    68  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium.service /lib/systemd/system
    69  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium-operator.service /lib/systemd/system
    70  sudo cp /home/vagrant/go/src/github.com/cilium/cilium/contrib/systemd/cilium /etc/sysconfig
    71  
    72  getent group cilium >/dev/null || sudo groupadd -r cilium
    73  sudo usermod -a -G cilium vagrant
    74  SCRIPT
    75  
    76  $testsuite = <<SCRIPT
    77  sudo -E env PATH="${PATH}" make -C ~/go/src/github.com/cilium/cilium/ runtime-tests
    78  SCRIPT
    79  
    80  $node_ip_base = ENV['IPV4_BASE_ADDR'] || ""
    81  $node_nfs_base_ip = ENV['IPV4_BASE_ADDR_NFS'] || ""
    82  $num_workers = (ENV['NWORKERS'] || 0).to_i
    83  $workers_ipv4_addrs = $num_workers.times.collect { |n| $node_ip_base + "#{n+(ENV['FIRST_IP_SUFFIX']).to_i+1}" }
    84  $workers_ipv4_addrs_nfs = $num_workers.times.collect { |n| $node_nfs_base_ip + "#{n+(ENV['FIRST_IP_SUFFIX_NFS']).to_i+1}" }
    85  $master_ip = ENV['MASTER_IPV4']
    86  $master_ipv6 = ENV['MASTER_IPV6_PUBLIC']
    87  $workers_ipv6_addrs_str = ENV['IPV6_PUBLIC_WORKERS_ADDRS'] || ""
    88  $workers_ipv6_addrs = $workers_ipv6_addrs_str.split(' ')
    89  
    90  # Create unique ID for use in vboxnet name so Jenkins pipeline can have concurrent builds.
    91  $job_name = ENV['JOB_BASE_NAME'] || "local"
    92  
    93  $build_number = ENV['BUILD_NUMBER'] || "0"
    94  $build_id = "#{$job_name}-#{$build_number}"
    95  
    96  # Only create the build_id_name for Jenkins environment so that
    97  # we can run VMs locally without having any the `build_id` in the name.
    98  if ENV['BUILD_NUMBER'] then
    99      $build_id_name = "-build-#{$build_id}"
   100  end
   101  
   102  if ENV['K8S'] then
   103      $vm_base_name = "k8s"
   104  else
   105      $vm_base_name = "runtime"
   106  end
   107  
   108  # Set locate to en_US.UTF-8
   109  ENV["LC_ALL"] = "en_US.UTF-8"
   110  ENV["LC_CTYPE"] = "en_US.UTF-8"
   111  
   112  # We need this workaround since kube-proxy is not aware of multiple network
   113  # interfaces. If we send a packet to a service IP that packet is sent
   114  # to the default route, because the service IP is unknown by the linux routing
   115  # table, with the source IP of the interface in the default routing table, even
   116  # though the service IP should be routed to a different interface.
   117  # This particular workaround is only needed for cilium, running on a pod on host
   118  # network namespace, to reach out kube-api-server.
   119  $kube_proxy_workaround = <<SCRIPT
   120  sudo iptables -t nat -A POSTROUTING -o enp0s8 ! -s 192.168.34.12 -j MASQUERADE
   121  SCRIPT
   122  
   123  Vagrant.configure(2) do |config|
   124      config.vm.provision "bootstrap", type: "shell", inline: $bootstrap
   125      config.vm.provision "build", type: "shell", run: "always", privileged: false, inline: $build
   126      config.vm.provision "install", type: "shell", run: "always", privileged: false, inline: $install
   127      config.vm.box_check_update = false
   128  
   129      config.vm.provider "virtualbox" do |vb|
   130          # Do not inherit DNS server from host, use proxy
   131          vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
   132          vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
   133  
   134          # Prevent VirtualBox from interfering with host audio stack
   135          vb.customize ["modifyvm", :id, "--audio", "none"]
   136  
   137          config.vm.box = $SERVER_BOX
   138          config.vm.box_version = $SERVER_VERSION
   139          vb.memory = ENV['VM_MEMORY'].to_i
   140          vb.cpus = ENV['VM_CPUS'].to_i
   141          if ENV["NFS"] then
   142              mount_type = "nfs"
   143              # Don't forget to enable this ports on your host before starting the VM
   144              # in order to have nfs working
   145              # iptables -I INPUT -p udp -s 192.168.34.0/24 --dport 111 -j ACCEPT
   146              # iptables -I INPUT -p udp -s 192.168.34.0/24 --dport 2049 -j ACCEPT
   147              # iptables -I INPUT -p udp -s 192.168.34.0/24 --dport 20048 -j ACCEPT
   148          else
   149              mount_type = ""
   150          end
   151          config.vm.synced_folder '.', '/home/vagrant/go/src/github.com/cilium/cilium', type: mount_type
   152          if ENV['USER_MOUNTS'] then
   153              # Allow multiple mounts divided by commas
   154              ENV['USER_MOUNTS'].split(",").each do |mnt|
   155                  # Split "<to>=<from>"
   156                  user_mount = mnt.split("=", 2)
   157                  # Only one element, assume a path relative to home directories in both ends
   158                  if user_mount.length == 1 then
   159                      user_mount_to = "/home/vagrant/" + user_mount[0]
   160                      user_mount_from = "~/" + user_mount[0]
   161                  else
   162                      user_mount_to = user_mount[0]
   163                      # Remove "~/" prefix if any.
   164                      if user_mount_to.start_with?('~/') then
   165                          user_mount_to[0..1] = ''
   166                      end
   167                      # Add home directory prefix for non-absolute paths
   168                      if !user_mount_to.start_with?('/') then
   169                          user_mount_to = "/home/vagrant/" + user_mount_to
   170                      end
   171                      user_mount_from = user_mount[1]
   172                      # Add home prefix for host for any path in the project directory
   173                      # as it is already mounted.
   174                      if !user_mount_from.start_with?('/', '.', '~') then
   175                          user_mount_from = "~/" + user_mount_from
   176                      end
   177                  end
   178                  puts "Mounting host directory #{user_mount_from} as #{user_mount_to}"
   179                  config.vm.synced_folder "#{user_mount_from}", "#{user_mount_to}", type: mount_type
   180              end
   181          end
   182      end
   183  
   184      master_vm_name = "#{$vm_base_name}1#{$build_id_name}"
   185      config.vm.define master_vm_name, primary: true do |cm|
   186          node_ip = "#{$master_ip}"
   187  		cm.vm.network "forwarded_port", guest: 6443, host: 7443
   188          cm.vm.network "private_network", ip: "#{$master_ip}",
   189              virtualbox__intnet: "cilium-test-#{$build_id}",
   190              :libvirt__guest_ipv6 => "yes",
   191              :libvirt__dhcp_enabled => false
   192          if ENV["NFS"] || ENV["IPV6_EXT"] then
   193              if ENV['FIRST_IP_SUFFIX_NFS'] then
   194                  $nfs_ipv4_master_addr = $node_nfs_base_ip + "#{ENV['FIRST_IP_SUFFIX_NFS']}"
   195              end
   196              cm.vm.network "private_network", ip: "#{$nfs_ipv4_master_addr}", bridge: "enp0s9"
   197              # Add IPv6 address this way or we get hit by a virtualbox bug
   198              cm.vm.provision "ipv6-config",
   199                  type: "shell",
   200                  run: "always",
   201                  inline: "ip -6 a a #{$master_ipv6}/16 dev enp0s9"
   202              node_ip = "#{$nfs_ipv4_master_addr}"
   203              if ENV["IPV6_EXT"] then
   204                  node_ip = "#{$master_ipv6}"
   205              end
   206          end
   207          cm.vm.hostname = "#{$vm_base_name}1"
   208          if ENV['CILIUM_TEMP'] then
   209             if ENV["K8S"] then
   210                 k8sinstall = "#{ENV['CILIUM_TEMP']}/cilium-k8s-install-1st-part.sh"
   211                 cm.vm.provision "k8s-install-master-part-1",
   212                     type: "shell",
   213                     run: "always",
   214                     env: {"node_ip" => node_ip},
   215                     privileged: true,
   216                     path: k8sinstall
   217             end
   218             script = "#{ENV['CILIUM_TEMP']}/node-1.sh"
   219             cm.vm.provision "config-install", type: "shell", privileged: true, run: "always", path: script
   220             # In k8s mode cilium needs etcd in order to run which was started in
   221             # the first part of the script. The 2nd part will install the
   222             # policies into kubernetes and cilium.
   223             if ENV["K8S"] then
   224                 k8sinstall = "#{ENV['CILIUM_TEMP']}/cilium-k8s-install-2nd-part.sh"
   225                 cm.vm.provision "k8s-install-master-part-2",
   226                     type: "shell",
   227                     run: "always",
   228                     env: {"node_ip" => node_ip},
   229                     privileged: true,
   230                     path: k8sinstall
   231             end
   232          end
   233          if ENV['RUN_TEST_SUITE'] then
   234             cm.vm.provision "testsuite", run: "always", type: "shell", privileged: false, inline: $testsuite
   235          end
   236      end
   237  
   238      $num_workers.times do |n|
   239          # n starts with 0
   240          node_vm_name = "#{$vm_base_name}#{n+2}#{$build_id_name}"
   241          node_hostname = "#{$vm_base_name}#{n+2}"
   242          config.vm.define node_vm_name do |node|
   243              node_ip = $workers_ipv4_addrs[n]
   244              node.vm.network "private_network", ip: "#{node_ip}",
   245                  virtualbox__intnet: "cilium-test-#{$build_id}",
   246                  :libvirt__guest_ipv6 => 'yes',
   247                  :libvirt__dhcp_enabled => false
   248              if ENV["NFS"] || ENV["IPV6_EXT"] then
   249                  nfs_ipv4_addr = $workers_ipv4_addrs_nfs[n]
   250                  node_ip = "#{nfs_ipv4_addr}"
   251                  ipv6_addr = $workers_ipv6_addrs[n]
   252                  node.vm.network "private_network", ip: "#{nfs_ipv4_addr}", bridge: "enp0s9"
   253                  # Add IPv6 address this way or we get hit by a virtualbox bug
   254                  node.vm.provision "ipv6-config",
   255                      type: "shell",
   256                      run: "always",
   257                      inline: "ip -6 a a #{ipv6_addr}/16 dev enp0s9"
   258                  if ENV["IPV6_EXT"] then
   259                      node_ip = "#{ipv6_addr}"
   260                  end
   261              end
   262              node.vm.hostname = "#{$vm_base_name}#{n+2}"
   263              if ENV['CILIUM_TEMP'] then
   264                  if ENV["K8S"] then
   265                      k8sinstall = "#{ENV['CILIUM_TEMP']}/cilium-k8s-install-1st-part.sh"
   266                      node.vm.provision "k8s-install-node-part-1",
   267                          type: "shell",
   268                          run: "always",
   269                          env: {"node_ip" => node_ip},
   270                          privileged: true,
   271                          path: k8sinstall
   272                  end
   273                  script = "#{ENV['CILIUM_TEMP']}/node-#{n+2}.sh"
   274                  node.vm.provision "config-install", type: "shell", privileged: true, run: "always", path: script
   275                  if ENV["K8S"] then
   276                      k8sinstall = "#{ENV['CILIUM_TEMP']}/cilium-k8s-install-2nd-part.sh"
   277                      node.vm.provision "k8s-install-node-part-2",
   278                          type: "shell",
   279                          run: "always",
   280                          env: {"node_ip" => node_ip},
   281                          privileged: true,
   282                          path: k8sinstall
   283                  end
   284              end
   285          end
   286      end
   287  end