go.ligato.io/vpp-agent/v3@v3.5.0/tests/e2e/e2etest/microservice.go (about) 1 package e2etest 2 3 import ( 4 "runtime" 5 "testing" 6 7 docker "github.com/fsouza/go-dockerclient" 8 "github.com/go-errors/errors" 9 "github.com/vishvananda/netns" 10 nslinuxcalls "go.ligato.io/vpp-agent/v3/plugins/linux/nsplugin/linuxcalls" 11 ) 12 13 const ( 14 msImage = "busybox:1.31" 15 msLabelKey = "e2e.test.ms" 16 MsNamePrefix = "e2e-test-ms-" 17 msStopTimeout = 1 // seconds 18 ) 19 20 // Microservice represents running microservice 21 type Microservice struct { 22 ComponentRuntime 23 Pinger 24 Diger 25 26 ctx *TestCtx 27 name string 28 nsCalls nslinuxcalls.NetworkNamespaceAPI 29 } 30 31 // NewMicroservice creates and starts new microservice container 32 func NewMicroservice( 33 ctx *TestCtx, 34 msName string, 35 nsCalls nslinuxcalls.NetworkNamespaceAPI, 36 optMods ...MicroserviceOptModifier, 37 ) (*Microservice, error) { 38 // compute options 39 opts := DefaultMicroserviceOpt(ctx, msName) 40 for _, mod := range optMods { 41 mod(opts) 42 } 43 44 // create struct for ETCD server 45 ms := &Microservice{ 46 ComponentRuntime: opts.Runtime, 47 ctx: ctx, 48 name: msName, 49 nsCalls: nsCalls, 50 } 51 52 // Note: if runtime doesn't implement Pinger/Diger interface and test use it, then compilation 53 // will be ok but runtime will throw "panic: runtime error: invalid memory address or nil pointer 54 // dereference" when referencing Ping/Dig function 55 if pinger, ok := opts.Runtime.(Pinger); ok { 56 ms.Pinger = pinger 57 } 58 if diger, ok := opts.Runtime.(Diger); ok { 59 ms.Diger = diger 60 } 61 62 // get runtime specific options and start microservice in runtime environment 63 startOpts, err := opts.RuntimeStartOptions(ctx, opts) 64 if err != nil { 65 return nil, errors.Errorf("can't get microservice %s start option for runtime due to: %v", msName, err) 66 } 67 err = ms.Start(startOpts) 68 if err != nil { 69 return nil, errors.Errorf("can't start microservice %s due to: %v", msName, err) 70 } 71 return ms, nil 72 } 73 74 func (ms *Microservice) Stop(options ...interface{}) error { 75 if err := ms.ComponentRuntime.Stop(options); err != nil { 76 // not additionally cleaning up after attempting to stop test topology component because 77 // it would lock access to further inspection of this component (i.e. why it won't stop) 78 return err 79 } 80 // cleanup 81 delete(ms.ctx.microservices, ms.name) 82 return nil 83 } 84 85 // MicroserviceStartOptionsForContainerRuntime translates MicroserviceOpt to options for ComponentRuntime.Start(option) 86 // method implemented by ContainerRuntime 87 func MicroserviceStartOptionsForContainerRuntime(ctx *TestCtx, options interface{}) (interface{}, error) { 88 opts, ok := options.(*MicroserviceOpt) 89 if !ok { 90 return nil, errors.Errorf("expected MicroserviceOpt but got %+v", options) 91 } 92 93 msLabel := MsNamePrefix + opts.Name 94 createOpts := &docker.CreateContainerOptions{ 95 Context: ctx.ctx, 96 Name: msLabel, 97 Config: &docker.Config{ 98 Image: msImage, 99 Labels: map[string]string{ 100 msLabelKey: opts.Name, 101 }, 102 //Entrypoint: 103 Env: []string{"MICROSERVICE_LABEL=" + msLabel}, 104 Cmd: []string{"tail", "-f", "/dev/null"}, 105 }, 106 HostConfig: &docker.HostConfig{ 107 // networking configured via VPP in E2E tests 108 NetworkMode: "none", 109 }, 110 } 111 112 if opts.ContainerOptsHook != nil { 113 opts.ContainerOptsHook(createOpts) 114 } 115 116 return &ContainerStartOptions{ 117 ContainerOptions: createOpts, 118 Pull: true, 119 }, nil 120 } 121 122 // TODO this is runtime specific -> integrate it into runtime concept 123 func removeDanglingMicroservices(t *testing.T, dockerClient *docker.Client) { 124 // remove any running microservices prior to starting a new test 125 containers, err := dockerClient.ListContainers(docker.ListContainersOptions{ 126 All: true, 127 Filters: map[string][]string{ 128 "label": {msLabelKey}, 129 }, 130 }) 131 if err != nil { 132 t.Fatalf("failed to list existing microservices: %v", err) 133 } 134 for _, container := range containers { 135 err = dockerClient.RemoveContainer(docker.RemoveContainerOptions{ 136 ID: container.ID, 137 Force: true, 138 }) 139 if err != nil { 140 t.Fatalf("failed to remove existing microservices: %v", err) 141 } else { 142 t.Logf("removed existing microservice: %s", container.Labels[msLabelKey]) 143 } 144 } 145 } 146 147 // TODO this is runtime specific -> integrate it into runtime concept 148 // enterNetNs enters the **network** namespace of the microservice (other namespaces 149 // remain unchanged). Leave using the returned callback. 150 func (ms *Microservice) EnterNetNs() (exitNetNs func()) { 151 ms.ctx.t.Helper() 152 origns, err := netns.Get() 153 if err != nil { 154 ms.ctx.t.Fatalf("failed to obtain current network namespace: %v", err) 155 } 156 nsHandle, err := ms.nsCalls.GetNamespaceFromPid(ms.PID()) 157 if err != nil { 158 ms.ctx.t.Fatalf("failed to obtain handle for network namespace of microservice '%s': %v", 159 ms.name, err) 160 } 161 defer nsHandle.Close() 162 163 runtime.LockOSThread() 164 err = ms.nsCalls.SetNamespace(nsHandle) 165 if err != nil { 166 ms.ctx.t.Fatalf("failed to enter network namespace of microservice '%s': %v", 167 ms.name, err) 168 } 169 return func() { 170 err = ms.nsCalls.SetNamespace(origns) 171 if err != nil { 172 ms.ctx.t.Fatalf("failed to return back to the original network namespace: %v", err) 173 } 174 origns.Close() 175 runtime.UnlockOSThread() 176 } 177 }