github.com/rudderlabs/rudder-go-kit@v0.30.0/filemanager/filemanager_test.go (about)

     1  package filemanager_test
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"flag"
     7  	"fmt"
     8  	"io"
     9  	"log"
    10  	"net/http"
    11  	"os"
    12  	"os/signal"
    13  	"path"
    14  	"path/filepath"
    15  	"regexp"
    16  	"strings"
    17  	"syscall"
    18  	"testing"
    19  	"time"
    20  
    21  	"cloud.google.com/go/storage"
    22  	jsoniter "github.com/json-iterator/go"
    23  	"github.com/minio/minio-go/v7"
    24  	"github.com/minio/minio-go/v7/pkg/credentials"
    25  	"github.com/ory/dockertest/v3"
    26  	"github.com/stretchr/testify/assert"
    27  	"github.com/stretchr/testify/require"
    28  	"google.golang.org/api/option"
    29  
    30  	"github.com/Azure/azure-storage-blob-go/azblob"
    31  
    32  	"github.com/rudderlabs/rudder-go-kit/config"
    33  	"github.com/rudderlabs/rudder-go-kit/filemanager"
    34  	"github.com/rudderlabs/rudder-go-kit/httputil"
    35  	"github.com/rudderlabs/rudder-go-kit/logger"
    36  )
    37  
    38  var (
    39  	AzuriteEndpoint, gcsURL, minioEndpoint, azureSASTokens string
    40  	base64Secret                                           = base64.StdEncoding.EncodeToString([]byte(secretAccessKey))
    41  	bucket                                                 = "filemanager-test-1"
    42  	region                                                 = "us-east-1"
    43  	accessKeyId                                            = "MYACCESSKEY"
    44  	secretAccessKey                                        = "MYSECRETKEY"
    45  	hold                                                   bool
    46  	regexRequiredSuffix                                    = regexp.MustCompile(".json.gz$")
    47  	fileList                                               []string
    48  )
    49  
    50  func TestMain(m *testing.M) {
    51  	config.Reset()
    52  	logger.Reset()
    53  
    54  	os.Exit(run(m))
    55  }
    56  
    57  // run minio server & store data in it.
    58  func run(m *testing.M) int {
    59  	flag.BoolVar(&hold, "hold", false, "hold environment clean-up after test execution until Ctrl+C is provided")
    60  	flag.Parse()
    61  
    62  	// docker pool setup
    63  	pool, err := dockertest.NewPool("")
    64  	if err != nil {
    65  		panic(fmt.Errorf("Could not connect to docker: %s", err))
    66  	}
    67  
    68  	// running minio container on docker
    69  	minioResource, err := pool.RunWithOptions(&dockertest.RunOptions{
    70  		Repository: "minio/minio",
    71  		Tag:        "latest",
    72  		Cmd:        []string{"server", "/data"},
    73  		Env: []string{
    74  			fmt.Sprintf("MINIO_ACCESS_KEY=%s", accessKeyId),
    75  			fmt.Sprintf("MINIO_SECRET_KEY=%s", secretAccessKey),
    76  			fmt.Sprintf("MINIO_SITE_REGION=%s", region),
    77  		},
    78  	})
    79  	if err != nil {
    80  		panic(fmt.Errorf("Could not start resource: %s", err))
    81  	}
    82  	defer func() {
    83  		if err := pool.Purge(minioResource); err != nil {
    84  			log.Printf("Could not purge resource: %s \n", err)
    85  		}
    86  	}()
    87  
    88  	minioEndpoint = fmt.Sprintf("localhost:%s", minioResource.GetPort("9000/tcp"))
    89  
    90  	// check if minio server is up & running.
    91  	if err := pool.Retry(func() error {
    92  		url := fmt.Sprintf("http://%s/minio/health/live", minioEndpoint)
    93  		resp, err := http.Get(url)
    94  		if err != nil {
    95  			return err
    96  		}
    97  		defer func() { httputil.CloseResponse(resp) }()
    98  
    99  		if resp.StatusCode != http.StatusOK {
   100  			return fmt.Errorf("status code not OK")
   101  		}
   102  		return nil
   103  	}); err != nil {
   104  		log.Fatalf("Could not connect to docker: %s", err)
   105  	}
   106  	fmt.Println("minio is up & running properly")
   107  
   108  	useSSL := false
   109  	minioClient, err := minio.New(minioEndpoint, &minio.Options{
   110  		Creds:  credentials.NewStaticV4(accessKeyId, secretAccessKey, ""),
   111  		Secure: useSSL,
   112  	})
   113  	if err != nil {
   114  		panic(err)
   115  	}
   116  	fmt.Println("minioClient created successfully")
   117  
   118  	// creating bucket inside minio where testing will happen.
   119  	err = minioClient.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: "us-east-1"})
   120  	if err != nil {
   121  		panic(err)
   122  	}
   123  	fmt.Println("bucket created successfully")
   124  
   125  	// Running Azure emulator, Azurite.
   126  	AzuriteResource, err := pool.RunWithOptions(&dockertest.RunOptions{
   127  		Repository: "mcr.microsoft.com/azure-storage/azurite",
   128  		Tag:        "latest",
   129  		Env: []string{
   130  			fmt.Sprintf("AZURITE_ACCOUNTS=%s:%s", accessKeyId, base64Secret),
   131  			fmt.Sprintf("DefaultEndpointsProtocol=%s", "http"),
   132  		},
   133  	})
   134  	if err != nil {
   135  		log.Fatalf("Could not start azure resource: %s", err)
   136  	}
   137  	defer func() {
   138  		if err := pool.Purge(AzuriteResource); err != nil {
   139  			log.Printf("Could not purge resource: %s \n", err)
   140  		}
   141  	}()
   142  	AzuriteEndpoint = fmt.Sprintf("localhost:%s", AzuriteResource.GetPort("10000/tcp"))
   143  	fmt.Println("Azurite endpoint", AzuriteEndpoint)
   144  	fmt.Println("azurite resource successfully created")
   145  
   146  	azureSASTokens, err = createAzureSASTokens()
   147  	if err != nil {
   148  		log.Fatalf("Could not create azure sas tokens: %s", err)
   149  	}
   150  
   151  	// Running GCS emulator
   152  	GCSResource, err := pool.RunWithOptions(&dockertest.RunOptions{
   153  		Repository: "fsouza/fake-gcs-server",
   154  		Tag:        "1.45.2",
   155  		Cmd:        []string{"-scheme", "http", "-backend", "memory", "-location", "us-east-1"},
   156  	})
   157  	if err != nil {
   158  		log.Fatalf("Could not start resource: %s", err)
   159  	}
   160  	defer func() {
   161  		if err := pool.Purge(GCSResource); err != nil {
   162  			log.Printf("Could not purge resource: %s \n", err)
   163  		}
   164  	}()
   165  
   166  	GCSEndpoint := fmt.Sprintf("localhost:%s", GCSResource.GetPort("4443/tcp"))
   167  	fmt.Println("GCS test server successfully created with endpoint: ", GCSEndpoint)
   168  	gcsURL = fmt.Sprintf("http://%s/storage/v1/", GCSEndpoint)
   169  	os.Setenv("STORAGE_EMULATOR_HOST", GCSEndpoint)
   170  	os.Setenv("RSERVER_WORKLOAD_IDENTITY_TYPE", "GKE")
   171  
   172  	for i := 0; i < 10; i++ {
   173  
   174  		if err := func() error {
   175  			client, err := storage.NewClient(context.TODO(), option.WithEndpoint(gcsURL))
   176  			if err != nil {
   177  				return fmt.Errorf("failed to create client: %w", err)
   178  			}
   179  			bkt := client.Bucket(bucket)
   180  			err = bkt.Create(context.Background(), "test", &storage.BucketAttrs{Name: bucket})
   181  			if err != nil {
   182  				return fmt.Errorf("failed to create bucket: %w", err)
   183  			}
   184  			return nil
   185  		}(); err != nil {
   186  			if i == 9 {
   187  				log.Fatalf("Could not connect to docker: %s", err)
   188  			}
   189  			time.Sleep(time.Second)
   190  			continue
   191  		}
   192  		fmt.Println("bucket created successfully")
   193  		break
   194  	}
   195  
   196  	// getting list of files in `testData` directory while will be used to testing filemanager.
   197  	searchDir := "./goldenDirectory"
   198  	err = filepath.Walk(searchDir, func(path string, f os.FileInfo, err error) error {
   199  		if regexRequiredSuffix.MatchString(path) {
   200  			fileList = append(fileList, path)
   201  		}
   202  		return nil
   203  	})
   204  	if err != nil {
   205  		panic(err)
   206  	}
   207  	if len(fileList) == 0 {
   208  		panic("file list empty, no data to test.")
   209  	}
   210  	fmt.Println("files list: ", fileList)
   211  
   212  	code := m.Run()
   213  	blockOnHold()
   214  	return code
   215  }
   216  
   217  func createAzureSASTokens() (string, error) {
   218  	credential, err := azblob.NewSharedKeyCredential(accessKeyId, base64Secret)
   219  	if err != nil {
   220  		return "", err
   221  	}
   222  
   223  	sasQueryParams, err := azblob.AccountSASSignatureValues{
   224  		Protocol:      azblob.SASProtocolHTTPSandHTTP,
   225  		ExpiryTime:    time.Now().UTC().Add(1 * time.Hour),
   226  		Permissions:   azblob.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true}.String(),
   227  		Services:      azblob.AccountSASServices{Blob: true}.String(),
   228  		ResourceTypes: azblob.AccountSASResourceTypes{Container: true, Object: true}.String(),
   229  	}.NewSASQueryParameters(credential)
   230  	if err != nil {
   231  		return "", err
   232  	}
   233  
   234  	return sasQueryParams.Encode(), nil
   235  }
   236  
   237  func TestFileManager(t *testing.T) {
   238  	tests := []struct {
   239  		name          string
   240  		skip          string
   241  		destName      string
   242  		config        map[string]interface{}
   243  		otherPrefixes []string
   244  	}{
   245  		{
   246  			name:          "testing s3manager functionality",
   247  			destName:      "S3",
   248  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   249  			config: map[string]interface{}{
   250  				"bucketName":       bucket,
   251  				"accessKeyID":      accessKeyId,
   252  				"accessKey":        secretAccessKey,
   253  				"enableSSE":        false,
   254  				"prefix":           "some-prefix",
   255  				"endPoint":         minioEndpoint,
   256  				"s3ForcePathStyle": true,
   257  				"disableSSL":       true,
   258  				"region":           region,
   259  			},
   260  		},
   261  		{
   262  			name:          "testing minio functionality",
   263  			destName:      "MINIO",
   264  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   265  			config: map[string]interface{}{
   266  				"bucketName":       bucket,
   267  				"accessKeyID":      accessKeyId,
   268  				"secretAccessKey":  secretAccessKey,
   269  				"enableSSE":        false,
   270  				"prefix":           "some-prefix",
   271  				"endPoint":         minioEndpoint,
   272  				"s3ForcePathStyle": true,
   273  				"disableSSL":       true,
   274  				"region":           region,
   275  			},
   276  		},
   277  		{
   278  			name:          "testing digital ocean functionality",
   279  			destName:      "DIGITAL_OCEAN_SPACES",
   280  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   281  			config: map[string]interface{}{
   282  				"bucketName":     bucket,
   283  				"accessKeyID":    accessKeyId,
   284  				"accessKey":      secretAccessKey,
   285  				"prefix":         "some-prefix",
   286  				"endPoint":       minioEndpoint,
   287  				"forcePathStyle": true,
   288  				"disableSSL":     true,
   289  				"region":         region,
   290  				"enableSSE":      false,
   291  			},
   292  		},
   293  		{
   294  			name:          "testing Azure blob storage filemanager functionality with account keys configured",
   295  			destName:      "AZURE_BLOB",
   296  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   297  			config: map[string]interface{}{
   298  				"containerName":  bucket,
   299  				"prefix":         "some-prefix",
   300  				"accountName":    accessKeyId,
   301  				"accountKey":     base64Secret,
   302  				"endPoint":       AzuriteEndpoint,
   303  				"forcePathStyle": true,
   304  				"disableSSL":     true,
   305  			},
   306  		},
   307  		{
   308  			name:          "testing Azure blob storage filemanager functionality with sas tokens configured",
   309  			destName:      "AZURE_BLOB",
   310  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   311  			config: map[string]interface{}{
   312  				"containerName":  bucket,
   313  				"prefix":         "some-prefix",
   314  				"accountName":    accessKeyId,
   315  				"useSASTokens":   true,
   316  				"sasToken":       azureSASTokens,
   317  				"endPoint":       AzuriteEndpoint,
   318  				"forcePathStyle": true,
   319  				"disableSSL":     true,
   320  			},
   321  		},
   322  		{
   323  			name:          "testing GCS filemanager functionality",
   324  			destName:      "GCS",
   325  			otherPrefixes: []string{"other-prefix-1", "other-prefix-2"},
   326  			config: map[string]interface{}{
   327  				"bucketName": bucket,
   328  				"prefix":     "some-prefix",
   329  				"endPoint":   gcsURL,
   330  				"disableSSL": true,
   331  				"jsonReads":  true,
   332  			},
   333  		},
   334  	}
   335  
   336  	for _, tt := range tests {
   337  
   338  		t.Run(tt.name, func(t *testing.T) {
   339  			if tt.skip != "" {
   340  				t.Skip(tt.skip)
   341  			}
   342  			fm, err := filemanager.New(&filemanager.Settings{
   343  				Provider: tt.destName,
   344  				Config:   tt.config,
   345  				Logger:   logger.NOP,
   346  			})
   347  			if err != nil {
   348  				t.Fatal(err)
   349  			}
   350  			prefix := tt.config["prefix"].(string)
   351  
   352  			var prefixes []string
   353  			if prefix != "" {
   354  				prefixes = append(prefixes, prefix)
   355  			}
   356  			prefixes = append(prefixes, tt.otherPrefixes...)
   357  
   358  			// upload all files
   359  			uploadOutputs := make([]filemanager.UploadedFile, 0)
   360  			for _, file := range fileList {
   361  				filePtr, err := os.Open(file)
   362  				require.NoError(t, err, "error while opening testData file to upload")
   363  				uploadOutput, err := fm.Upload(context.TODO(), filePtr, tt.otherPrefixes...)
   364  				require.NoError(t, err, "error while uploading file")
   365  				paths := append([]string{}, prefixes...)
   366  				paths = append(paths, path.Base(file))
   367  				require.Equal(t, path.Join(paths...),
   368  					uploadOutput.ObjectName)
   369  				uploadOutputs = append(uploadOutputs, uploadOutput)
   370  				filePtr.Close()
   371  			}
   372  
   373  			// list files using ListFilesWithPrefix
   374  			originalFileObject := make([]*filemanager.FileInfo, 0)
   375  			originalFileNames := make(map[string]int)
   376  			fileListNames := make(map[string]int)
   377  
   378  			session := fm.ListFilesWithPrefix(context.TODO(), path.Join(prefixes...), "", 1)
   379  			for i := 0; i < len(fileList); i++ {
   380  				files, err := session.Next()
   381  				require.NoError(t, err, "expected no error while listing files")
   382  				require.Equal(t, 1, len(files), "number of files should be 1")
   383  				originalFileObject = append(originalFileObject, files[0])
   384  				originalFileNames[files[0].Key]++
   385  				paths := append([]string{}, prefixes...)
   386  				paths = append(paths, path.Base(fileList[i]))
   387  				fileListNames[path.Join(paths...)]++
   388  			}
   389  			require.Equal(t, len(originalFileObject), len(fileList), "actual number of files different than expected")
   390  			for fileListName, count := range fileListNames {
   391  				require.Equal(t, count, originalFileNames[fileListName], "files different than expected when listed")
   392  			}
   393  
   394  			tempFm, err := filemanager.New(&filemanager.Settings{
   395  				Provider: tt.destName,
   396  				Config:   tt.config,
   397  				Logger:   logger.NOP,
   398  			})
   399  			if err != nil {
   400  				t.Fatal(err)
   401  			}
   402  
   403  			iteratorMap := make(map[string]int)
   404  			iteratorCount := 0
   405  			iter := filemanager.IterateFilesWithPrefix(context.TODO(), path.Join(prefixes...), "", int64(len(fileList)), tempFm)
   406  			for iter.Next() {
   407  				iteratorFile := iter.Get().Key
   408  				iteratorMap[iteratorFile]++
   409  				iteratorCount++
   410  			}
   411  			require.NoError(t, iter.Err(), "no error expected while iterating files")
   412  			require.Equal(t, len(fileList), iteratorCount, "actual number of files different than expected")
   413  			for fileListName, count := range fileListNames {
   414  				require.Equal(t, count, iteratorMap[fileListName], "files different than expected when iterated")
   415  			}
   416  
   417  			// based on the obtained location, get object name by calling GetObjectNameFromLocation
   418  			objectName, err := fm.GetObjectNameFromLocation(uploadOutputs[0].Location)
   419  			require.NoError(t, err, "no error expected")
   420  			require.Equal(t, uploadOutputs[0].ObjectName, objectName, "actual object name different than expected")
   421  
   422  			// also get download key from file location by calling GetDownloadKeyFromFileLocation
   423  			expectedKey := uploadOutputs[0].ObjectName
   424  			key := fm.GetDownloadKeyFromFileLocation(uploadOutputs[0].Location)
   425  			require.Equal(t, expectedKey, key, "actual object key different than expected")
   426  
   427  			// get prefix based on config
   428  			splitString := strings.Split(uploadOutputs[0].ObjectName, "/")
   429  			var expectedPrefix string
   430  			if len(splitString) > 1 {
   431  				expectedPrefix = splitString[0]
   432  			}
   433  			actualPrefix := fm.Prefix()
   434  			require.Equal(t, expectedPrefix, actualPrefix, "actual prefix different than expected")
   435  
   436  			// download one of the files & assert if it matches the original one present locally.
   437  			filePtr, err := os.Open(fileList[0])
   438  			if err != nil {
   439  				fmt.Printf("error: %s while opening file: %s ", err, fileList[0])
   440  			}
   441  			originalFile, err := io.ReadAll(filePtr)
   442  			if err != nil {
   443  				fmt.Printf("error: %s, while reading file: %s", err, fileList[0])
   444  			}
   445  			filePtr.Close()
   446  
   447  			DownloadedFileName := path.Join(t.TempDir(), "TmpDownloadedFile")
   448  
   449  			// fail to download the file with cancelled context
   450  			filePtr, err = os.OpenFile(DownloadedFileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
   451  			if err != nil {
   452  				fmt.Println("error while Creating file to download data: ", err)
   453  			}
   454  			ctx, cancel := context.WithCancel(context.TODO())
   455  			cancel()
   456  			err = fm.Download(ctx, filePtr, key)
   457  			require.Error(t, err, "expected error while downloading file")
   458  			filePtr.Close()
   459  
   460  			filePtr, err = os.OpenFile(DownloadedFileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
   461  			if err != nil {
   462  				fmt.Println("error while Creating file to download data: ", err)
   463  			}
   464  			err = fm.Download(context.TODO(), filePtr, key)
   465  
   466  			require.NoError(t, err, "expected no error")
   467  			filePtr.Close()
   468  			filePtr, err = os.OpenFile(DownloadedFileName, os.O_RDWR, 0o644)
   469  			if err != nil {
   470  				fmt.Println("error while Creating file to download data: ", err)
   471  			}
   472  			downloadedFile, err := io.ReadAll(filePtr)
   473  			if err != nil {
   474  				fmt.Println("error while reading downloaded file: ", err)
   475  			}
   476  			filePtr.Close()
   477  
   478  			ans := strings.Compare(string(originalFile), string(downloadedFile))
   479  			require.Equal(t, 0, ans, "downloaded file different than actual file")
   480  
   481  			// fail to delete the file with cancelled context
   482  			ctx, cancel = context.WithCancel(context.TODO())
   483  			cancel()
   484  			err = fm.Delete(ctx, []string{key})
   485  			require.Error(t, err, "expected error while deleting file")
   486  
   487  			// delete that file
   488  			err = fm.Delete(context.TODO(), []string{key})
   489  			require.NoError(t, err, "expected no error while deleting object")
   490  			// list files again & assert if that file is still present.
   491  			fmNew, err := filemanager.New(&filemanager.Settings{
   492  				Provider: tt.destName,
   493  				Config:   tt.config,
   494  				Logger:   logger.NOP,
   495  			})
   496  			if err != nil {
   497  				panic(err)
   498  			}
   499  			newFileObject, err := fmNew.ListFilesWithPrefix(context.TODO(), "", "", 1000).Next()
   500  			if err != nil {
   501  				fmt.Println("error while getting new file object: ", err)
   502  			}
   503  			require.Equal(t, len(originalFileObject)-1, len(newFileObject), "expected original file list length to be greater than new list by 1, but is different")
   504  		})
   505  
   506  		t.Run(tt.name, func(t *testing.T) {
   507  			if tt.skip != "" {
   508  				t.Skip(tt.skip)
   509  			}
   510  			fm, err := filemanager.New(&filemanager.Settings{
   511  				Provider: tt.destName,
   512  				Config:   tt.config,
   513  				Logger:   logger.NOP,
   514  			})
   515  			if err != nil {
   516  				t.Fatal(err)
   517  			}
   518  
   519  			// fail to upload file
   520  			file := fileList[0]
   521  			filePtr, err := os.Open(file)
   522  			require.NoError(t, err, "error while opening testData file to upload")
   523  			ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
   524  			cancel()
   525  			_, err = fm.Upload(ctx, filePtr)
   526  			require.Error(t, err, "expected error while uploading file")
   527  			filePtr.Close()
   528  
   529  			// MINIO doesn't support list files with context cancellation
   530  			if tt.destName != "MINIO" {
   531  				// fail to fetch file list
   532  				ctx1, cancel := context.WithTimeout(context.TODO(), time.Second*5)
   533  				cancel()
   534  				_, err = fm.ListFilesWithPrefix(ctx1, "", "", 1000).Next()
   535  				require.Error(t, err, "expected error while listing files")
   536  
   537  				iter := filemanager.IterateFilesWithPrefix(ctx1, "", "", 1000, fm)
   538  				next := iter.Next()
   539  				require.Equal(t, false, next, "next should be false when context is cancelled")
   540  				err = iter.Err()
   541  				require.Error(t, err, "expected error while iterating files")
   542  			}
   543  		})
   544  
   545  	}
   546  }
   547  
   548  func TestGCSManager_unsupported_credentials(t *testing.T) {
   549  	var config map[string]interface{}
   550  	err := jsoniter.Unmarshal(
   551  		[]byte(`{
   552  			"project": "my-project",
   553  			"location": "US",
   554  			"bucketName": "my-bucket",
   555  			"prefix": "rudder",
   556  			"namespace": "namespace",
   557  			"credentials":"{\"installed\":{\"client_id\":\"1234.apps.googleusercontent.com\",\"project_id\":\"project_id\",\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\",\"token_uri\":\"https://oauth2.googleapis.com/token\",\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\",\"client_secret\":\"client_secret\",\"redirect_uris\":[\"urn:ietf:wg:oauth:2.0:oob\",\"http://localhost\"]}}",
   558  			"syncFrequency": "1440",
   559  			"syncStartAt": "09:00"
   560  		}`),
   561  		&config,
   562  	)
   563  	assert.NoError(t, err)
   564  	manager, err := filemanager.NewGCSManager(config, logger.NOP, func() time.Duration { return time.Minute })
   565  	assert.NoError(t, err)
   566  	_, err = manager.ListFilesWithPrefix(context.TODO(), "", "/tests", 100).Next()
   567  	assert.NotNil(t, err)
   568  	assert.Contains(t, err.Error(), "client_credentials.json file is not supported")
   569  }
   570  
   571  func blockOnHold() {
   572  	if !hold {
   573  		return
   574  	}
   575  
   576  	log.Println("Test on hold, before cleanup")
   577  	log.Println("Press Ctrl+C to exit")
   578  
   579  	c := make(chan os.Signal, 1)
   580  	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
   581  
   582  	<-c
   583  	close(c)
   584  }