sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/sidecar/censor_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package sidecar 18 19 import ( 20 "bytes" 21 "fmt" 22 "io" 23 "os" 24 "os/exec" 25 "path/filepath" 26 "sort" 27 "testing" 28 29 "github.com/google/go-cmp/cmp" 30 "sigs.k8s.io/prow/pkg/gcsupload" 31 "sigs.k8s.io/prow/pkg/pod-utils/wrapper" 32 33 "sigs.k8s.io/prow/pkg/secretutil" 34 "sigs.k8s.io/prow/pkg/testutil" 35 ) 36 37 func TestCensor(t *testing.T) { 38 preamble := func() string { 39 return `In my younger and more vulnerable years my father gave me some advice that I’ve been turning over in my mind ever since.` 40 } 41 42 var testCases = []struct { 43 name string 44 input, output string 45 secrets []string 46 bufferSize int 47 }{ 48 { 49 name: "input smaller than buffer size", 50 input: preamble()[:100], 51 secrets: []string{"younger", "my"}, 52 output: "In XX XXXXXXX and more vulnerable years XX father gave me some advice that I’ve been turning over ", 53 bufferSize: 200, 54 }, 55 { 56 name: "input larger than buffer size, not a multiple", 57 input: preamble()[:100], 58 secrets: []string{"younger", "my"}, 59 output: "In XX XXXXXXX and more vulnerable years XX father gave me some advice that I’ve been turning over ", 60 bufferSize: 16, 61 }, 62 } 63 for _, testCase := range testCases { 64 t.Run(testCase.name, func(t *testing.T) { 65 censorer := secretutil.NewCensorer() 66 censorer.Refresh(testCase.secrets...) 67 input := io.NopCloser(bytes.NewBufferString(testCase.input)) 68 outputSink := &bytes.Buffer{} 69 output := nopWriteCloser(outputSink) 70 if err := censor(input, output, censorer, testCase.bufferSize); err != nil { 71 t.Fatalf("expected no error from censor, got %v", err) 72 } 73 if diff := cmp.Diff(outputSink.String(), testCase.output); diff != "" { 74 t.Fatalf("got incorrect output after censoring: %v", diff) 75 } 76 }) 77 } 78 79 } 80 81 func nopWriteCloser(w io.Writer) io.WriteCloser { 82 return &nopCloser{Writer: w} 83 } 84 85 type nopCloser struct { 86 io.Writer 87 } 88 89 func (nopCloser) Close() error { return nil } 90 91 const inputDir = "testdata/input" 92 93 func copyTestData(t *testing.T) string { 94 tempDir := t.TempDir() 95 if err := filepath.Walk(inputDir, func(path string, info os.FileInfo, err error) error { 96 if err != nil { 97 return err 98 } 99 relpath, _ := filepath.Rel(inputDir, path) // this errors when it's not relative, but that's known here 100 dest := filepath.Join(tempDir, relpath) 101 if info.IsDir() { 102 return os.MkdirAll(dest, info.Mode()) 103 } 104 if info.Mode()&os.ModeSymlink == os.ModeSymlink { 105 link, err := os.Readlink(path) 106 if err != nil { 107 t.Fatalf("failed to read input link: %v", err) 108 } 109 return os.Symlink(link, dest) 110 } 111 if info.Name() == "link" { 112 link, err := os.ReadFile(path) 113 if err != nil { 114 t.Fatalf("failed to read input link: %v", err) 115 } 116 return os.Symlink(string(link), dest) 117 } 118 out, err := os.Create(dest) 119 if err != nil { 120 return err 121 } 122 defer func() { 123 if err := out.Close(); err != nil { 124 t.Fatalf("could not close output file: %v", err) 125 } 126 }() 127 in, err := os.Open(path) 128 if err != nil { 129 return err 130 } 131 defer func() { 132 if err := in.Close(); err != nil { 133 t.Fatalf("could not close input file: %v", err) 134 } 135 }() 136 if _, err := io.Copy(out, in); err != nil { 137 return err 138 } 139 return nil 140 }); err != nil { 141 t.Fatalf("failed to copy input to temp dir: %v", err) 142 } 143 return tempDir 144 } 145 146 const ( 147 artifactPath = "artifacts" 148 logPath = "logs" 149 ) 150 151 func optionsForTestData(location string) Options { 152 return Options{ 153 GcsOptions: &gcsupload.Options{ 154 Items: []string{filepath.Join(location, artifactPath)}, 155 }, 156 Entries: []wrapper.Options{ 157 {ProcessLog: filepath.Join(location, logPath, "one.log")}, 158 {ProcessLog: filepath.Join(location, logPath, "two.log")}, 159 {ProcessLog: filepath.Join(location, logPath, "three.log")}, 160 }, 161 CensoringOptions: &CensoringOptions{ 162 SecretDirectories: []string{"testdata/secrets"}, 163 ExcludeDirectories: []string{"**/exclude"}, 164 IniFilenames: []string{".awscred"}, 165 }, 166 } 167 168 } 169 170 // TestCensorRobustnessForCorruptArchive tests that all possible artifacts are censored even in 171 // the presence of a corrupt archive (test that the censoring does not bail out too soon) 172 func TestCensorRobustnessForCorruptArchive(t *testing.T) { 173 // copy input to a temp dir so we don't touch the golden input files 174 tempDir := copyTestData(t) 175 // also, tar the input - it's not trivial to diff two tarballs while only caring about 176 // file content, not metadata, so this test will tar up the archive from the input and 177 // untar it after the fact for simple diffs and updates 178 archiveDir := filepath.Join(tempDir, artifactPath, "archive") 179 180 // create a corrupt archive as well to test for resiliency 181 corruptArchiveFile := filepath.Join(tempDir, artifactPath, "corrupt.tar.gz") 182 if err := archive(archiveDir, corruptArchiveFile); err != nil { 183 t.Fatalf("failed to archive input: %v", err) 184 } 185 file, err := os.OpenFile(corruptArchiveFile, os.O_RDWR, 0666) 186 if err != nil { 187 t.Fatalf("failed to open archived input: %v", err) 188 } 189 raw, err := io.ReadAll(file) 190 if err != nil { 191 t.Fatalf("failed to read archived input: %v", err) 192 } 193 // the third byte in a gzip archive is a flag; some values are 194 // reserved - if we set this to be some corrupt value, we expect 195 // that the archive will be detected as gzip but that reading this 196 // archive to be impossible. 197 // ref: https://datatracker.ietf.org/doc/html/rfc1952#page-5 198 raw[3] = 0x6 199 if n, err := file.WriteAt(raw, 0); err != nil || n != len(raw) { 200 t.Fatalf("failed to write corrupted archive: wrote %d (of %d) bytes, err: %v", n, len(raw), err) 201 } 202 options := optionsForTestData(tempDir) 203 204 // We expect the error to happen 205 expectedError := fmt.Sprintf("could not censor archive %s: could not unpack archive: could not read archive: unexpected EOF", corruptArchiveFile) 206 if diff := cmp.Diff(expectedError, options.censor().Error()); diff != "" { 207 t.Errorf("censor() did not end with expected error:\n%s", diff) 208 } 209 210 if err := os.Remove(corruptArchiveFile); err != nil { 211 t.Fatalf("failed to remove archive: %v", err) 212 } 213 214 testutil.CompareWithFixtureDir(t, "testdata/output", tempDir) 215 } 216 217 func TestCensorIntegration(t *testing.T) { 218 // copy input to a temp dir so we don't touch the golden input files 219 tempDir := copyTestData(t) 220 // also, tar the input - it's not trivial to diff two tarballs while only caring about 221 // file content, not metadata, so this test will tar up the archive from the input and 222 // untar it after the fact for simple diffs and updates 223 archiveDir := filepath.Join(tempDir, artifactPath, "archive") 224 archiveFile := filepath.Join(tempDir, artifactPath, "archive.tar.gz") 225 if err := archive(archiveDir, archiveFile); err != nil { 226 t.Fatalf("failed to archive input: %v", err) 227 } 228 229 bufferSize := 1 230 options := optionsForTestData(tempDir) 231 232 // this will be smaller than the size of a secret, so this tests our buffer calculation 233 options.CensoringOptions.CensoringBufferSize = &bufferSize 234 235 if err := options.censor(); err != nil { 236 t.Fatalf("got an error from censoring: %v", err) 237 } 238 239 if err := unarchive(archiveFile, archiveDir); err != nil { 240 t.Fatalf("failed to unarchive input: %v", err) 241 } 242 if err := os.Remove(archiveFile); err != nil { 243 t.Fatalf("failed to remove archive: %v", err) 244 } 245 246 testutil.CompareWithFixtureDir(t, "testdata/output", tempDir) 247 } 248 249 func TestArchiveMatchesTar(t *testing.T) { 250 tempDir := t.TempDir() 251 archiveOutput := filepath.Join(tempDir, "archive.tar.gz") 252 archiveDir := "testdata/archives" 253 archiveInputs := filepath.Join(archiveDir, "archive/") 254 if err := archive(archiveInputs, archiveOutput); err != nil { 255 t.Fatalf("failed to archive input: %v", err) 256 } 257 tarOutput := t.TempDir() 258 cmd := exec.Command("tar", "-C", tarOutput, "-xzvf", archiveOutput) 259 if out, err := cmd.CombinedOutput(); err != nil { 260 t.Fatalf("could not run tar: %v:\n %s", err, string(out)) 261 } 262 testutil.CompareWithFixtureDir(t, tarOutput, archiveInputs) 263 } 264 265 func TestUnarchive(t *testing.T) { 266 unarchiveOutput := t.TempDir() 267 archiveDir := "testdata/archives" 268 archiveInputs := filepath.Join(archiveDir, "archive/") 269 archiveFile := filepath.Join(archiveDir, "archive.tar.gz") 270 if err := unarchive(archiveFile, unarchiveOutput); err != nil { 271 t.Fatalf("failed to unarchive input: %v", err) 272 } 273 testutil.CompareWithFixtureDir(t, archiveInputs, unarchiveOutput) 274 } 275 276 func TestUnarchiveMatchesTar(t *testing.T) { 277 unarchiveOutput := t.TempDir() 278 archiveDir := "testdata/archives" 279 archiveFile := filepath.Join(archiveDir, "archive.tar.gz") 280 if err := unarchive(archiveFile, unarchiveOutput); err != nil { 281 t.Fatalf("failed to unarchive input: %v", err) 282 } 283 tarOutput := t.TempDir() 284 cmd := exec.Command("tar", "-C", tarOutput, "-xzvf", archiveFile) 285 if out, err := cmd.CombinedOutput(); err != nil { 286 t.Fatalf("could not run tar: %v:\n %s", err, string(out)) 287 } 288 testutil.CompareWithFixtureDir(t, tarOutput, unarchiveOutput) 289 } 290 291 func TestRoundTrip(t *testing.T) { 292 tempDir := t.TempDir() 293 archiveOutput := filepath.Join(tempDir, "archive.tar.gz") 294 unarchiveOutput := filepath.Join(tempDir, "archive/") 295 archiveDir := "testdata/archives" 296 archiveInputs := filepath.Join(archiveDir, "archive/") 297 if err := archive(archiveInputs, archiveOutput); err != nil { 298 t.Fatalf("failed to archive input: %v", err) 299 } 300 if err := unarchive(archiveOutput, unarchiveOutput); err != nil { 301 t.Fatalf("failed to unarchive input: %v", err) 302 } 303 testutil.CompareWithFixtureDir(t, archiveInputs, unarchiveOutput) 304 } 305 306 func TestLoadDockerCredentials(t *testing.T) { 307 expected := []string{"a", "b", "c", "d", "e", "f"} 308 dockercfgraw := []byte(`{ 309 "registry": { 310 "password": "a", 311 "auth": "b" 312 }, 313 "other": { 314 "password": "c", 315 "auth": "d" 316 }, 317 "third": { 318 "auth": "e" 319 }, 320 "fourth": { 321 "password": "f" 322 } 323 }`) 324 dockerconfigjsonraw := []byte(`{ 325 "auths": { 326 "registry": { 327 "password": "a", 328 "auth": "b" 329 }, 330 "other": { 331 "password": "c", 332 "auth": "d" 333 }, 334 "third": { 335 "auth": "e" 336 }, 337 "fourth": { 338 "password": "f" 339 } 340 } 341 }`) 342 malformed := []byte(`notreallyjson`) 343 344 if _, err := loadDockercfgAuths(malformed); err == nil { 345 t.Error("dockercfg: expected loading malformed data to error, but it did not") 346 } 347 if _, err := loadDockerconfigJsonAuths(malformed); err == nil { 348 t.Error("dockerconfigjson: expected loading malformed data to error, but it did not") 349 } 350 351 actual, err := loadDockercfgAuths(dockercfgraw) 352 if err != nil { 353 t.Errorf("dockercfg: expected loading data not to error, but it did: %v", err) 354 } 355 sort.Strings(actual) 356 if diff := cmp.Diff(actual, expected); diff != "" { 357 t.Errorf("dockercfg: got incorrect values: %s", err) 358 } 359 360 actual, err = loadDockerconfigJsonAuths(dockerconfigjsonraw) 361 if err != nil { 362 t.Errorf("dockerconfigjson: expected loading data not to error, but it did: %v", err) 363 } 364 sort.Strings(actual) 365 if diff := cmp.Diff(actual, expected); diff != "" { 366 t.Errorf("dockerconfigjson: got incorrect values: %s", err) 367 } 368 } 369 370 func TestShouldCensor(t *testing.T) { 371 var testCases = []struct { 372 name string 373 path string 374 options CensoringOptions 375 expected bool 376 }{ 377 { 378 name: "no options defaults to include", 379 options: CensoringOptions{}, 380 path: "/usr/bin/bash", 381 expected: true, 382 }, 383 { 384 name: "not matching include defaults to false", 385 options: CensoringOptions{ 386 IncludeDirectories: []string{"/tmp/**/*"}, 387 }, 388 path: "/usr/bin/bash", 389 expected: false, 390 }, 391 { 392 name: "matching include censors", 393 options: CensoringOptions{ 394 IncludeDirectories: []string{"/usr/**/*"}, 395 }, 396 path: "/usr/bin/bash", 397 expected: true, 398 }, 399 { 400 name: "matching include and exclude does not censor", 401 options: CensoringOptions{ 402 IncludeDirectories: []string{"/usr/**/*"}, 403 ExcludeDirectories: []string{"/usr/bin/**/*"}, 404 }, 405 path: "/usr/bin/bash", 406 expected: false, 407 }, 408 { 409 name: "matching exclude does not censor", 410 options: CensoringOptions{ 411 ExcludeDirectories: []string{"/usr/bin/**/*"}, 412 }, 413 path: "/usr/bin/bash", 414 expected: false, 415 }, 416 } 417 for _, testCase := range testCases { 418 t.Run(testCase.name, func(t *testing.T) { 419 should, err := shouldCensor(testCase.options, testCase.path) 420 if err != nil { 421 t.Fatalf("%s: got an error from shouldCensor: %v", testCase.name, err) 422 } 423 if should != testCase.expected { 424 t.Errorf("%s: expected %v, got %v", testCase.name, testCase.expected, should) 425 } 426 }) 427 } 428 }