github.com/psiphon-Labs/psiphon-tunnel-core@v2.0.28+incompatible/psiphon/dataStoreRecovery_test.go (about)

     1  //go:build !PSIPHON_USE_BADGER_DB && !PSIPHON_USE_FILES_DB
     2  // +build !PSIPHON_USE_BADGER_DB,!PSIPHON_USE_FILES_DB
     3  
     4  /*
     5   * Copyright (c) 2019, Psiphon Inc.
     6   * All rights reserved.
     7   *
     8   * This program is free software: you can redistribute it and/or modify
     9   * it under the terms of the GNU General Public License as published by
    10   * the Free Software Foundation, either version 3 of the License, or
    11   * (at your option) any later version.
    12   *
    13   * This program is distributed in the hope that it will be useful,
    14   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16   * GNU General Public License for more details.
    17   *
    18   * You should have received a copy of the GNU General Public License
    19   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    20   *
    21   */
    22  
    23  package psiphon
    24  
    25  import (
    26  	"context"
    27  	"fmt"
    28  	"io/ioutil"
    29  	"os"
    30  	"path/filepath"
    31  	"strings"
    32  	"sync"
    33  	"testing"
    34  
    35  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
    36  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng"
    37  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
    38  )
    39  
    40  // Set canTruncateOpenDataStore to false on platforms, such as Windows, where
    41  // the OS doesn't allow an open memory-mapped file to be truncated. This will
    42  // skip the associated test cases.
    43  var canTruncateOpenDataStore = true
    44  
    45  func TestBoltResiliency(t *testing.T) {
    46  
    47  	testDataDirName, err := ioutil.TempDir("", "psiphon-bolt-recovery-test")
    48  	if err != nil {
    49  		t.Fatalf("TempDir failed: %s", err)
    50  	}
    51  	defer os.RemoveAll(testDataDirName)
    52  
    53  	SetEmitDiagnosticNotices(true, true)
    54  
    55  	clientConfigJSON := `
    56      {
    57          "ClientPlatform" : "",
    58          "ClientVersion" : "0",
    59          "SponsorId" : "0",
    60          "PropagationChannelId" : "0",
    61          "ConnectionWorkerPoolSize" : 10,
    62          "EstablishTunnelTimeoutSeconds" : 1,
    63          "EstablishTunnelPausePeriodSeconds" : 1
    64      }`
    65  
    66  	clientConfig, err := LoadConfig([]byte(clientConfigJSON))
    67  	if err != nil {
    68  		t.Fatalf("LoadConfig failed: %s", err)
    69  	}
    70  
    71  	clientConfig.DataRootDirectory = testDataDirName
    72  
    73  	err = clientConfig.Commit(false)
    74  	if err != nil {
    75  		t.Fatalf("Commit failed: %s", err)
    76  	}
    77  
    78  	serverEntryCount := 100
    79  
    80  	noticeCandidateServers := make(chan struct{}, 1)
    81  	noticeExiting := make(chan struct{}, 1)
    82  	noticeResetDatastore := make(chan struct{}, 1)
    83  	noticeDatastoreFailed := make(chan struct{}, 1)
    84  
    85  	SetNoticeWriter(NewNoticeReceiver(
    86  		func(notice []byte) {
    87  
    88  			noticeType, payload, err := GetNotice(notice)
    89  			if err != nil {
    90  				return
    91  			}
    92  
    93  			printNotice := false
    94  
    95  			switch noticeType {
    96  			case "CandidateServers":
    97  				count := int(payload["count"].(float64))
    98  				if count != serverEntryCount {
    99  					t.Fatalf("unexpected server entry count: %d", count)
   100  				}
   101  				select {
   102  				case noticeCandidateServers <- struct{}{}:
   103  				default:
   104  				}
   105  			case "Exiting":
   106  				select {
   107  				case noticeExiting <- struct{}{}:
   108  				default:
   109  				}
   110  			case "Alert":
   111  				message := payload["message"].(string)
   112  				var channel chan struct{}
   113  				if strings.Contains(message, "tryDatastoreOpenDB: reset") {
   114  					channel = noticeResetDatastore
   115  				} else if strings.Contains(message, "datastore has failed") {
   116  					channel = noticeDatastoreFailed
   117  				}
   118  				if channel != nil {
   119  					select {
   120  					case channel <- struct{}{}:
   121  					default:
   122  					}
   123  				}
   124  			}
   125  
   126  			if printNotice {
   127  				fmt.Printf("%s\n", string(notice))
   128  			}
   129  		}))
   130  
   131  	drainNoticeChannel := func(channel chan struct{}) {
   132  		for {
   133  			select {
   134  			case channel <- struct{}{}:
   135  			default:
   136  				return
   137  			}
   138  		}
   139  	}
   140  
   141  	drainNoticeChannels := func() {
   142  		drainNoticeChannel(noticeCandidateServers)
   143  		drainNoticeChannel(noticeExiting)
   144  		drainNoticeChannel(noticeResetDatastore)
   145  		drainNoticeChannel(noticeDatastoreFailed)
   146  	}
   147  
   148  	// Paving sufficient server entries, then truncating the datastore file to
   149  	// remove some server entry data, then iterating over all server entries (to
   150  	// produce the CandidateServers output) triggers datastore corruption
   151  	// detection and, at start up, reset/recovery.
   152  
   153  	paveServerEntries := func() {
   154  		for i := 0; i < serverEntryCount; i++ {
   155  
   156  			n := 16
   157  			fields := make(protocol.ServerEntryFields)
   158  			fields["ipAddress"] = fmt.Sprintf("127.0.0.%d", i+1)
   159  			fields["sshPort"] = 2222
   160  			fields["sshUsername"] = prng.HexString(n)
   161  			fields["sshPassword"] = prng.HexString(n)
   162  			fields["sshHostKey"] = prng.HexString(n)
   163  			fields["capabilities"] = []string{"SSH", "ssh-api-requests"}
   164  			fields["region"] = "US"
   165  			fields["configurationVersion"] = 1
   166  
   167  			fields.SetLocalSource(protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
   168  			fields.SetLocalTimestamp(
   169  				common.TruncateTimestampToHour(common.GetCurrentTimestamp()))
   170  
   171  			err = StoreServerEntry(fields, true)
   172  			if err != nil {
   173  				t.Fatalf("StoreServerEntry failed: %s", err)
   174  			}
   175  		}
   176  	}
   177  
   178  	startController := func() func() {
   179  		controller, err := NewController(clientConfig)
   180  		if err != nil {
   181  			t.Fatalf("NewController failed: %s", err)
   182  		}
   183  		ctx, cancelFunc := context.WithCancel(context.Background())
   184  		controllerWaitGroup := new(sync.WaitGroup)
   185  		controllerWaitGroup.Add(1)
   186  		go func() {
   187  			defer controllerWaitGroup.Done()
   188  			controller.Run(ctx)
   189  		}()
   190  		return func() {
   191  			cancelFunc()
   192  			controllerWaitGroup.Wait()
   193  		}
   194  	}
   195  
   196  	truncateDataStore := func() {
   197  		filename := filepath.Join(testDataDirName, "ca.psiphon.PsiphonTunnel.tunnel-core", "datastore", "psiphon.boltdb")
   198  		file, err := os.OpenFile(filename, os.O_RDWR, 0666)
   199  		if err != nil {
   200  			t.Fatalf("OpenFile failed: %s", err)
   201  		}
   202  		defer file.Close()
   203  		fileInfo, err := file.Stat()
   204  		if err != nil {
   205  			t.Fatalf("Stat failed: %s", err)
   206  		}
   207  		err = file.Truncate(fileInfo.Size() / 4)
   208  		if err != nil {
   209  			t.Fatalf("Truncate failed: %s", err)
   210  		}
   211  		err = file.Sync()
   212  		if err != nil {
   213  			t.Fatalf("Sync failed: %s", err)
   214  		}
   215  	}
   216  
   217  	// Populate datastore with 100 server entries.
   218  
   219  	err = OpenDataStore(clientConfig)
   220  	if err != nil {
   221  		t.Fatalf("OpenDataStore failed: %s", err)
   222  	}
   223  
   224  	paveServerEntries()
   225  
   226  	stopController := startController()
   227  
   228  	<-noticeCandidateServers
   229  
   230  	stopController()
   231  
   232  	CloseDataStore()
   233  
   234  	drainNoticeChannels()
   235  
   236  	// Truncate datastore file before running controller; expect a datastore
   237  	// "reset" notice on OpenDataStore.
   238  
   239  	t.Logf("test: recover from datastore corrupted before opening")
   240  
   241  	truncateDataStore()
   242  
   243  	err = OpenDataStore(clientConfig)
   244  	if err != nil {
   245  		t.Fatalf("OpenDataStore failed: %s", err)
   246  	}
   247  
   248  	<-noticeResetDatastore
   249  
   250  	if !canTruncateOpenDataStore {
   251  		CloseDataStore()
   252  		return
   253  	}
   254  
   255  	paveServerEntries()
   256  
   257  	// Truncate datastore while running the controller. First, complete one
   258  	// successful data scan (CandidateServers). The next scan should trigger a
   259  	// datastore "failed" notice.
   260  
   261  	t.Logf("test: detect corrupt datastore while running")
   262  
   263  	stopController = startController()
   264  
   265  	<-noticeCandidateServers
   266  
   267  	truncateDataStore()
   268  
   269  	<-noticeDatastoreFailed
   270  
   271  	<-noticeExiting
   272  
   273  	stopController()
   274  
   275  	CloseDataStore()
   276  
   277  	drainNoticeChannels()
   278  
   279  	// Restart successfully after previous failure shutdown.
   280  
   281  	t.Logf("test: after restart, recover from datastore corrupted while running")
   282  
   283  	err = OpenDataStore(clientConfig)
   284  	if err != nil {
   285  		t.Fatalf("OpenDataStore failed: %s", err)
   286  	}
   287  
   288  	<-noticeResetDatastore
   289  
   290  	paveServerEntries()
   291  
   292  	stopController = startController()
   293  
   294  	<-noticeCandidateServers
   295  
   296  	stopController()
   297  
   298  	CloseDataStore()
   299  }