github.com/Psiphon-Labs/psiphon-tunnel-core@v2.0.28+incompatible/psiphon/common/osl/osl.go (about)

     1  /*
     2   * Copyright (c) 2016, Psiphon Inc.
     3   * All rights reserved.
     4   *
     5   * This program is free software: you can redistribute it and/or modify
     6   * it under the terms of the GNU General Public License as published by
     7   * the Free Software Foundation, either version 3 of the License, or
     8   * (at your option) any later version.
     9   *
    10   * This program is distributed in the hope that it will be useful,
    11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13   * GNU General Public License for more details.
    14   *
    15   * You should have received a copy of the GNU General Public License
    16   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17   *
    18   */
    19  
    20  // Package osl implements the Obfuscated Server List (OSL) mechanism. This
    21  // mechanism is a method of distributing server lists only to clients that
    22  // demonstrate certain behavioral traits. Clients are seeded with Server
    23  // List Obfuscation Keys (SLOKs) as they meet the configured criteria. These
    24  // keys are stored and later combined to assemble keys to decrypt out-of-band
    25  // distributed OSL files that contain server lists.
    26  //
    27  // This package contains the core routines used in psiphond (to track client
    28  // traits and issue SLOKs), clients (to manage SLOKs and decrypt OSLs), and
    29  // automation (to create OSLs for distribution).
    30  package osl
    31  
    32  import (
    33  	"crypto/aes"
    34  	"crypto/cipher"
    35  	"crypto/hmac"
    36  	"crypto/md5"
    37  	"crypto/sha256"
    38  	"encoding/base64"
    39  	"encoding/binary"
    40  	"encoding/hex"
    41  	"encoding/json"
    42  	std_errors "errors"
    43  	"fmt"
    44  	"io"
    45  	"net"
    46  	"net/url"
    47  	"path"
    48  	"path/filepath"
    49  	"sort"
    50  	"strings"
    51  	"sync"
    52  	"sync/atomic"
    53  	"time"
    54  
    55  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
    56  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/nacl/secretbox"
    57  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
    58  	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/sss"
    59  )
    60  
    61  const (
    62  	KEY_LENGTH_BYTES    = 32
    63  	REGISTRY_FILENAME   = "osl-registry"
    64  	OSL_FILENAME_FORMAT = "osl-%s"
    65  )
    66  
    67  // Config is an OSL configuration, which consists of a list of schemes.
    68  // The Reload function supports hot reloading of rules data while the
    69  // process is running.
    70  type Config struct {
    71  	common.ReloadableFile
    72  
    73  	Schemes []*Scheme
    74  }
    75  
    76  // Scheme defines a OSL seeding and distribution strategy. SLOKs to
    77  // decrypt OSLs are issued based on client network activity -- defined
    78  // in the SeedSpecs -- and time. OSLs are created for periods of time
    79  // and can be decrypted by clients that are seeded with a sufficient
    80  // selection of SLOKs for that time period. Distribution of server
    81  // entries to OSLs is delegated to automation.
    82  type Scheme struct {
    83  
    84  	// Epoch is the start time of the scheme, the start time of the
    85  	// first OSL and when SLOKs will first be issued. It must be
    86  	// specified in UTC and must be a multiple of SeedPeriodNanoseconds.
    87  	Epoch string
    88  
    89  	// Regions is a list of client country codes this scheme applies to.
    90  	// If empty, the scheme applies to all regions.
    91  	Regions []string
    92  
    93  	// PropagationChannelIDs is a list of client propagtion channel IDs
    94  	// this scheme applies to. Propagation channel IDs are an input
    95  	// to SLOK key derivation.
    96  	PropagationChannelIDs []string
    97  
    98  	// MasterKey is the base random key used for SLOK key derivation. It
    99  	// must be unique for each scheme. It must be 32 random bytes, base64
   100  	// encoded.
   101  	MasterKey []byte
   102  
   103  	// SeedSpecs is the set of different client network activity patterns
   104  	// that will result in issuing SLOKs. For a given time period, a distinct
   105  	// SLOK is issued for each SeedSpec.
   106  	// Duplicate subnets may appear in multiple SeedSpecs.
   107  	SeedSpecs []*SeedSpec
   108  
   109  	// SeedSpecThreshold is the threshold scheme for combining SLOKs to
   110  	// decrypt an OSL. For any fixed time period, at least K (threshold) of
   111  	// N (total) SLOKs from the N SeedSpecs must be seeded for a client to be
   112  	// able to reassemble the OSL key.
   113  	// Limitation: thresholds must be at least 2.
   114  	SeedSpecThreshold int
   115  
   116  	// SeedPeriodNanoseconds is the time period granularity of SLOKs.
   117  	// New SLOKs are issued every SeedPeriodNanoseconds. Client progress
   118  	// towards activity levels is reset at the end of each period.
   119  	SeedPeriodNanoseconds int64
   120  
   121  	// KeySplits is the time period threshold scheme layered on top of the
   122  	// SeedSpecThreshold scheme for combining SLOKs to decrypt an OSL.
   123  	// There must be at least one level. For one level, any K (threshold) of
   124  	// N (total) SeedSpec SLOK groups must be sufficiently seeded for a client
   125  	// to be able to reassemble the OSL key. When an additional level is
   126  	// specified, then K' of N' groups of N of K SeedSpec SLOK groups must be
   127  	// sufficiently seeded. And so on. The first level in the list is the
   128  	// lowest level. The time period for OSLs is determined by the totals in
   129  	// the KeySplits.
   130  	//
   131  	// Example:
   132  	//
   133  	//   SeedSpecs = <3 specs>
   134  	//   SeedSpecThreshold = 2
   135  	//   SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
   136  	//   SeedPeriodKeySplits = [{10, 7}, {60, 5}]
   137  	//
   138  	//   In these scheme, up to 3 distinct SLOKs, one per spec, are issued
   139  	//   every 100 milliseconds.
   140  	//
   141  	//   Distinct OSLs are paved for every minute (60 seconds). Each OSL
   142  	//   key is split such that, for those 60 seconds, a client must seed
   143  	//   2/3 spec SLOKs for 7 of 10 consecutive 100 ms. time periods within
   144  	//   a second, for any 5 of 60 seconds within the minute.
   145  	//
   146  	SeedPeriodKeySplits []KeySplit
   147  
   148  	// The following fields are ephemeral state.
   149  
   150  	epoch                 time.Time
   151  	subnetLookups         []common.SubnetLookup
   152  	derivedSLOKCacheMutex sync.RWMutex
   153  	derivedSLOKCache      map[slokReference]*SLOK
   154  }
   155  
   156  // SeedSpec defines a client traffic pattern that results in a seeded SLOK.
   157  // For each time period, a unique SLOK is issued to a client that meets the
   158  // traffic levels specified in Targets. All upstream port forward traffic to
   159  // UpstreamSubnets is counted towards the targets.
   160  //
   161  // ID is a SLOK key derivation component and must be 32 random bytes, base64
   162  // encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
   163  // for JSON config file comments.
   164  type SeedSpec struct {
   165  	Description     string
   166  	ID              []byte
   167  	UpstreamSubnets []string
   168  	Targets         TrafficValues
   169  }
   170  
   171  // TrafficValues defines a client traffic level that seeds a SLOK.
   172  // BytesRead and BytesWritten are the minimum bytes transferred counts to
   173  // seed a SLOK. Both UDP and TCP data will be counted towards these totals.
   174  // PortForwardDurationNanoseconds is the duration that a TCP or UDP port
   175  // forward is active (not connected, in the UDP case). All threshold
   176  // settings must be met to seed a SLOK; any threshold may be set to 0 to
   177  // be trivially satisfied.
   178  type TrafficValues struct {
   179  	BytesRead                      int64
   180  	BytesWritten                   int64
   181  	PortForwardDurationNanoseconds int64
   182  }
   183  
   184  // KeySplit defines a secret key splitting scheme where the secret is split
   185  // into n (total) shares and any K (threshold) of N shares must be known
   186  // to recostruct the split secret.
   187  type KeySplit struct {
   188  	Total     int
   189  	Threshold int
   190  }
   191  
   192  // ClientSeedState tracks the progress of a client towards seeding SLOKs
   193  // across all schemes the client qualifies for.
   194  type ClientSeedState struct {
   195  	propagationChannelID string
   196  	seedProgress         []*ClientSeedProgress
   197  	mutex                sync.Mutex
   198  	signalIssueSLOKs     chan struct{}
   199  	issuedSLOKs          map[string]*SLOK
   200  	payloadSLOKs         []*SLOK
   201  }
   202  
   203  // ClientSeedProgress tracks client progress towards seeding SLOKs for
   204  // a particular scheme.
   205  type ClientSeedProgress struct {
   206  	// Note: 64-bit ints used with atomic operations are placed
   207  	// at the start of struct to ensure 64-bit alignment.
   208  	// (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
   209  	progressSLOKTime int64
   210  	scheme           *Scheme
   211  	trafficProgress  []*TrafficValues
   212  }
   213  
   214  // ClientSeedPortForward map a client port forward, which is relaying
   215  // traffic to a specific upstream address, to all seed state progress
   216  // counters for SeedSpecs with subnets containing the upstream address.
   217  // As traffic is relayed through the port forwards, the bytes transferred
   218  // and duration count towards the progress of these SeedSpecs and
   219  // associated SLOKs.
   220  type ClientSeedPortForward struct {
   221  	state              *ClientSeedState
   222  	progressReferences []progressReference
   223  }
   224  
   225  // progressReference points to a particular ClientSeedProgress and
   226  // TrafficValues for to update with traffic events for a
   227  // ClientSeedPortForward.
   228  type progressReference struct {
   229  	seedProgressIndex    int
   230  	trafficProgressIndex int
   231  }
   232  
   233  // slokReference uniquely identifies a SLOK by specifying all the fields
   234  // used to derive the SLOK secret key and ID.
   235  // Note: SeedSpecID is not a []byte as slokReference is used as a map key.
   236  type slokReference struct {
   237  	PropagationChannelID string
   238  	SeedSpecID           string
   239  	Time                 time.Time
   240  }
   241  
   242  // SLOK is a seeded SLOK issued to a client. The client will store the
   243  // SLOK in its local database; look it up by ID when checking which OSLs it
   244  // can reassemble keys for; and use the key material to reassemble OSL
   245  // file keys.
   246  type SLOK struct {
   247  	ID  []byte
   248  	Key []byte
   249  }
   250  
   251  // SeedPayload is the list of seeded SLOKs sent to a client.
   252  type SeedPayload struct {
   253  	SLOKs []*SLOK
   254  }
   255  
   256  // NewConfig initializes a Config with the settings in the specified
   257  // file.
   258  func NewConfig(filename string) (*Config, error) {
   259  
   260  	config := &Config{}
   261  
   262  	config.ReloadableFile = common.NewReloadableFile(
   263  		filename,
   264  		true,
   265  		func(fileContent []byte, _ time.Time) error {
   266  			newConfig, err := LoadConfig(fileContent)
   267  			if err != nil {
   268  				return errors.Trace(err)
   269  			}
   270  			// Modify actual traffic rules only after validation
   271  			config.Schemes = newConfig.Schemes
   272  			return nil
   273  		})
   274  
   275  	_, err := config.Reload()
   276  	if err != nil {
   277  		return nil, errors.Trace(err)
   278  	}
   279  
   280  	return config, nil
   281  }
   282  
   283  // LoadConfig loads, validates, and initializes a JSON encoded OSL
   284  // configuration.
   285  func LoadConfig(configJSON []byte) (*Config, error) {
   286  
   287  	var config Config
   288  	err := json.Unmarshal(configJSON, &config)
   289  	if err != nil {
   290  		return nil, errors.Trace(err)
   291  	}
   292  
   293  	var previousEpoch time.Time
   294  
   295  	for _, scheme := range config.Schemes {
   296  
   297  		if scheme == nil {
   298  			return nil, errors.TraceNew("invalid scheme")
   299  		}
   300  
   301  		epoch, err := time.Parse(time.RFC3339, scheme.Epoch)
   302  		if err != nil {
   303  			return nil, errors.Tracef("invalid epoch format: %s", err)
   304  		}
   305  
   306  		if epoch.UTC() != epoch {
   307  			return nil, errors.TraceNew("invalid epoch timezone")
   308  		}
   309  
   310  		if epoch.Round(time.Duration(scheme.SeedPeriodNanoseconds)) != epoch {
   311  			return nil, errors.TraceNew("invalid epoch period")
   312  		}
   313  
   314  		if epoch.Before(previousEpoch) {
   315  			return nil, errors.TraceNew("invalid epoch order")
   316  		}
   317  
   318  		previousEpoch = epoch
   319  
   320  		scheme.epoch = epoch
   321  		scheme.subnetLookups = make([]common.SubnetLookup, len(scheme.SeedSpecs))
   322  		scheme.derivedSLOKCache = make(map[slokReference]*SLOK)
   323  
   324  		if len(scheme.MasterKey) != KEY_LENGTH_BYTES {
   325  			return nil, errors.TraceNew("invalid master key")
   326  		}
   327  
   328  		for index, seedSpec := range scheme.SeedSpecs {
   329  
   330  			if seedSpec == nil {
   331  				return nil, errors.TraceNew("invalid seed spec")
   332  			}
   333  
   334  			if len(seedSpec.ID) != KEY_LENGTH_BYTES {
   335  				return nil, errors.TraceNew("invalid seed spec ID")
   336  			}
   337  
   338  			// TODO: check that subnets do not overlap, as required by SubnetLookup
   339  			subnetLookup, err := common.NewSubnetLookup(seedSpec.UpstreamSubnets)
   340  			if err != nil {
   341  				return nil, errors.Tracef("invalid upstream subnets: %s", err)
   342  			}
   343  
   344  			scheme.subnetLookups[index] = subnetLookup
   345  		}
   346  
   347  		if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
   348  			return nil, errors.TraceNew("invalid seed spec key split")
   349  		}
   350  
   351  		if len(scheme.SeedPeriodKeySplits) < 1 {
   352  			return nil, errors.TraceNew("invalid seed period key split count")
   353  		}
   354  
   355  		for _, keySplit := range scheme.SeedPeriodKeySplits {
   356  			if !isValidShamirSplit(keySplit.Total, keySplit.Threshold) {
   357  				return nil, errors.TraceNew("invalid seed period key split")
   358  			}
   359  		}
   360  	}
   361  
   362  	return &config, nil
   363  }
   364  
   365  // NewClientSeedState creates a new client seed state to track
   366  // client progress towards seeding SLOKs. psiphond maintains one
   367  // ClientSeedState for each connected client.
   368  //
   369  // A signal is sent on signalIssueSLOKs when sufficient progress
   370  // has been made that a new SLOK *may* be issued. psiphond will
   371  // receive the signal and then call GetClientSeedPayload/IssueSLOKs
   372  // to issue SLOKs, generate payload, and send to the client. The
   373  // sender will not block sending to signalIssueSLOKs; the channel
   374  // should be appropriately buffered.
   375  func (config *Config) NewClientSeedState(
   376  	clientRegion, propagationChannelID string,
   377  	signalIssueSLOKs chan struct{}) *ClientSeedState {
   378  
   379  	config.ReloadableFile.RLock()
   380  	defer config.ReloadableFile.RUnlock()
   381  
   382  	state := &ClientSeedState{
   383  		propagationChannelID: propagationChannelID,
   384  		signalIssueSLOKs:     signalIssueSLOKs,
   385  		issuedSLOKs:          make(map[string]*SLOK),
   386  		payloadSLOKs:         nil,
   387  	}
   388  
   389  	for _, scheme := range config.Schemes {
   390  
   391  		// All matching schemes are selected.
   392  		// Note: this implementation assumes a few simple schemes. For more
   393  		// schemes with many propagation channel IDs or region filters, use
   394  		// maps for more efficient lookup.
   395  		if scheme.epoch.Before(time.Now().UTC()) &&
   396  			common.Contains(scheme.PropagationChannelIDs, propagationChannelID) &&
   397  			(len(scheme.Regions) == 0 || common.Contains(scheme.Regions, clientRegion)) {
   398  
   399  			// Empty progress is initialized up front for all seed specs. Once
   400  			// created, the progress structure is read-only (the slice, not the
   401  			// TrafficValue fields); this permits lock-free operation.
   402  			trafficProgress := make([]*TrafficValues, len(scheme.SeedSpecs))
   403  			for index := 0; index < len(scheme.SeedSpecs); index++ {
   404  				trafficProgress[index] = &TrafficValues{}
   405  			}
   406  
   407  			seedProgress := &ClientSeedProgress{
   408  				scheme:           scheme,
   409  				progressSLOKTime: getSLOKTime(scheme.SeedPeriodNanoseconds),
   410  				trafficProgress:  trafficProgress,
   411  			}
   412  
   413  			state.seedProgress = append(state.seedProgress, seedProgress)
   414  		}
   415  	}
   416  
   417  	return state
   418  }
   419  
   420  // Hibernate clears references to short-lived objects (currently,
   421  // signalIssueSLOKs) so that a ClientSeedState can be stored for
   422  // later resumption without blocking garbage collection of the
   423  // short-lived objects.
   424  //
   425  // The ClientSeedState will still hold references to its Config;
   426  // the caller is responsible for discarding hibernated seed states
   427  // when the config changes.
   428  //
   429  // The caller should ensure that all ClientSeedPortForwards
   430  // associated with this ClientSeedState are closed before
   431  // hibernation.
   432  func (state *ClientSeedState) Hibernate() {
   433  	state.mutex.Lock()
   434  	defer state.mutex.Unlock()
   435  
   436  	state.signalIssueSLOKs = nil
   437  }
   438  
   439  // Resume resumes a hibernated ClientSeedState by resetting the required
   440  // objects (currently, signalIssueSLOKs) cleared by Hibernate.
   441  func (state *ClientSeedState) Resume(
   442  	signalIssueSLOKs chan struct{}) {
   443  
   444  	state.mutex.Lock()
   445  	defer state.mutex.Unlock()
   446  
   447  	state.signalIssueSLOKs = signalIssueSLOKs
   448  }
   449  
   450  // NewClientSeedPortForward creates a new client port forward
   451  // traffic progress tracker. Port forward progress reported to the
   452  // ClientSeedPortForward is added to seed state progress for all
   453  // seed specs containing upstreamIPAddress in their subnets.
   454  // The return value will be nil when activity for upstreamIPAddress
   455  // does not count towards any progress.
   456  // NewClientSeedPortForward may be invoked concurrently by many
   457  // psiphond port forward establishment goroutines.
   458  func (state *ClientSeedState) NewClientSeedPortForward(
   459  	upstreamIPAddress net.IP) *ClientSeedPortForward {
   460  
   461  	// Concurrency: access to ClientSeedState is unsynchronized
   462  	// but references only read-only fields.
   463  
   464  	if len(state.seedProgress) == 0 {
   465  		return nil
   466  	}
   467  
   468  	var progressReferences []progressReference
   469  
   470  	// Determine which seed spec subnets contain upstreamIPAddress
   471  	// and point to the progress for each. When progress is reported,
   472  	// it is added directly to all of these TrafficValues instances.
   473  	// Assumes state.progress entries correspond 1-to-1 with
   474  	// state.scheme.subnetLookups.
   475  	// Note: this implementation assumes a small number of schemes and
   476  	// seed specs. For larger numbers, instead of N SubnetLookups, create
   477  	// a single SubnetLookup which returns, for a given IP address, all
   478  	// matching subnets and associated seed specs.
   479  	for seedProgressIndex, seedProgress := range state.seedProgress {
   480  		for trafficProgressIndex, subnetLookup := range seedProgress.scheme.subnetLookups {
   481  			if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
   482  				progressReferences = append(
   483  					progressReferences,
   484  					progressReference{
   485  						seedProgressIndex:    seedProgressIndex,
   486  						trafficProgressIndex: trafficProgressIndex,
   487  					})
   488  			}
   489  		}
   490  	}
   491  
   492  	if progressReferences == nil {
   493  		return nil
   494  	}
   495  
   496  	return &ClientSeedPortForward{
   497  		state:              state,
   498  		progressReferences: progressReferences,
   499  	}
   500  }
   501  
   502  func (state *ClientSeedState) sendIssueSLOKsSignal() {
   503  	state.mutex.Lock()
   504  	defer state.mutex.Unlock()
   505  
   506  	if state.signalIssueSLOKs != nil {
   507  		select {
   508  		case state.signalIssueSLOKs <- struct{}{}:
   509  		default:
   510  		}
   511  	}
   512  }
   513  
   514  // UpdateProgress adds port forward bytes transferred and duration to
   515  // all seed spec progresses associated with the port forward.
   516  // If UpdateProgress is invoked after the SLOK time period has rolled
   517  // over, any pending seeded SLOKs are issued and all progress is reset.
   518  // UpdateProgress may be invoked concurrently by many psiphond port
   519  // relay goroutines. The implementation of UpdateProgress prioritizes
   520  // not blocking port forward relaying; a consequence of this lock-free
   521  // design is that progress reported at the exact time of SLOK time period
   522  // rollover may be dropped.
   523  func (portForward *ClientSeedPortForward) UpdateProgress(
   524  	bytesRead, bytesWritten, durationNanoseconds int64) {
   525  
   526  	// Concurrency: non-blocking -- access to ClientSeedState is unsynchronized
   527  	// to read-only fields, atomic, or channels, except in the case of a time
   528  	// period rollover, in which case a mutex is acquired.
   529  
   530  	for _, progressReference := range portForward.progressReferences {
   531  
   532  		seedProgress := portForward.state.seedProgress[progressReference.seedProgressIndex]
   533  		trafficProgress := seedProgress.trafficProgress[progressReference.trafficProgressIndex]
   534  
   535  		slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
   536  
   537  		// If the SLOK time period has changed since progress was last recorded,
   538  		// call issueSLOKs which will issue any SLOKs for that past time period
   539  		// and then clear all progress. Progress will then be recorded for the
   540  		// current time period.
   541  		// As it acquires the state mutex, issueSLOKs may stall other port
   542  		// forwards for this client. The delay is minimized by SLOK caching,
   543  		// which avoids redundant crypto operations.
   544  		if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
   545  			portForward.state.mutex.Lock()
   546  			portForward.state.issueSLOKs()
   547  			portForward.state.mutex.Unlock()
   548  
   549  			// Call to issueSLOKs may have issued new SLOKs. Note that
   550  			// this will only happen if the time period rolls over with
   551  			// sufficient progress pending while the signalIssueSLOKs
   552  			// receiver did not call IssueSLOKs soon enough.
   553  			portForward.state.sendIssueSLOKsSignal()
   554  		}
   555  
   556  		// Add directly to the permanent TrafficValues progress accumulators
   557  		// for the state's seed specs. Concurrently, other port forwards may
   558  		// be adding to the same accumulators. Also concurrently, another
   559  		// goroutine may be invoking issueSLOKs, which zeros all the accumulators.
   560  		// As a consequence, progress may be dropped at the exact time of
   561  		// time period rollover.
   562  
   563  		seedSpec := seedProgress.scheme.SeedSpecs[progressReference.trafficProgressIndex]
   564  
   565  		alreadyExceedsTargets := trafficProgress.exceeds(&seedSpec.Targets)
   566  
   567  		atomic.AddInt64(&trafficProgress.BytesRead, bytesRead)
   568  		atomic.AddInt64(&trafficProgress.BytesWritten, bytesWritten)
   569  		atomic.AddInt64(&trafficProgress.PortForwardDurationNanoseconds, durationNanoseconds)
   570  
   571  		// With the target newly met for a SeedSpec, a new
   572  		// SLOK *may* be issued.
   573  		if !alreadyExceedsTargets && trafficProgress.exceeds(&seedSpec.Targets) {
   574  			portForward.state.sendIssueSLOKsSignal()
   575  		}
   576  	}
   577  }
   578  
   579  func (lhs *TrafficValues) exceeds(rhs *TrafficValues) bool {
   580  	return atomic.LoadInt64(&lhs.BytesRead) >= atomic.LoadInt64(&rhs.BytesRead) &&
   581  		atomic.LoadInt64(&lhs.BytesWritten) >= atomic.LoadInt64(&rhs.BytesWritten) &&
   582  		atomic.LoadInt64(&lhs.PortForwardDurationNanoseconds) >=
   583  			atomic.LoadInt64(&rhs.PortForwardDurationNanoseconds)
   584  }
   585  
   586  // issueSLOKs checks client progress against each candidate seed spec
   587  // and seeds SLOKs when the client traffic levels are achieved. After
   588  // checking progress, and if the SLOK time period has changed since
   589  // progress was last recorded, progress is reset. Partial, insufficient
   590  // progress is intentionally dropped when the time period rolls over.
   591  // Derived SLOKs are cached to avoid redundant CPU intensive operations.
   592  // All issued SLOKs are retained in the client state for the duration
   593  // of the client's session.
   594  func (state *ClientSeedState) issueSLOKs() {
   595  
   596  	// Concurrency: the caller must lock state.mutex.
   597  
   598  	if len(state.seedProgress) == 0 {
   599  		return
   600  	}
   601  
   602  	for _, seedProgress := range state.seedProgress {
   603  
   604  		progressSLOKTime := time.Unix(0, seedProgress.progressSLOKTime)
   605  
   606  		for index, trafficProgress := range seedProgress.trafficProgress {
   607  
   608  			seedSpec := seedProgress.scheme.SeedSpecs[index]
   609  
   610  			if trafficProgress.exceeds(&seedSpec.Targets) {
   611  
   612  				ref := &slokReference{
   613  					PropagationChannelID: state.propagationChannelID,
   614  					SeedSpecID:           string(seedSpec.ID),
   615  					Time:                 progressSLOKTime,
   616  				}
   617  
   618  				seedProgress.scheme.derivedSLOKCacheMutex.RLock()
   619  				slok, ok := seedProgress.scheme.derivedSLOKCache[*ref]
   620  				seedProgress.scheme.derivedSLOKCacheMutex.RUnlock()
   621  				if !ok {
   622  					slok = seedProgress.scheme.deriveSLOK(ref)
   623  					seedProgress.scheme.derivedSLOKCacheMutex.Lock()
   624  					seedProgress.scheme.derivedSLOKCache[*ref] = slok
   625  					seedProgress.scheme.derivedSLOKCacheMutex.Unlock()
   626  				}
   627  
   628  				// Previously issued SLOKs are not re-added to
   629  				// the payload.
   630  				if state.issuedSLOKs[string(slok.ID)] == nil {
   631  					state.issuedSLOKs[string(slok.ID)] = slok
   632  					state.payloadSLOKs = append(state.payloadSLOKs, slok)
   633  				}
   634  			}
   635  		}
   636  
   637  		slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
   638  
   639  		if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
   640  			atomic.StoreInt64(&seedProgress.progressSLOKTime, slokTime)
   641  			// The progress map structure is not reset or modifed; instead
   642  			// the mapped accumulator values are zeroed. Concurrently, port
   643  			// forward relay goroutines continue to add to these accumulators.
   644  			for _, trafficProgress := range seedProgress.trafficProgress {
   645  				atomic.StoreInt64(&trafficProgress.BytesRead, 0)
   646  				atomic.StoreInt64(&trafficProgress.BytesWritten, 0)
   647  				atomic.StoreInt64(&trafficProgress.PortForwardDurationNanoseconds, 0)
   648  			}
   649  		}
   650  	}
   651  }
   652  
   653  func getSLOKTime(seedPeriodNanoseconds int64) int64 {
   654  	return time.Now().UTC().Truncate(time.Duration(seedPeriodNanoseconds)).UnixNano()
   655  }
   656  
   657  // GetSeedPayload issues any pending SLOKs and returns the accumulated
   658  // SLOKs for a given client. psiphond will calls this when it receives
   659  // signalIssueSLOKs which is the trigger to check for new SLOKs.
   660  // Note: caller must not modify the SLOKs in SeedPayload.SLOKs
   661  // as these are shared data.
   662  func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
   663  
   664  	state.mutex.Lock()
   665  	defer state.mutex.Unlock()
   666  
   667  	if len(state.seedProgress) == 0 {
   668  		return &SeedPayload{}
   669  	}
   670  
   671  	state.issueSLOKs()
   672  
   673  	sloks := make([]*SLOK, len(state.payloadSLOKs))
   674  	for index, slok := range state.payloadSLOKs {
   675  		sloks[index] = slok
   676  	}
   677  
   678  	return &SeedPayload{
   679  		SLOKs: sloks,
   680  	}
   681  }
   682  
   683  // ClearSeedPayload resets the accumulated SLOK payload (but not SLOK
   684  // progress). psiphond calls this after the client has acknowledged
   685  // receipt of a payload.
   686  func (state *ClientSeedState) ClearSeedPayload() {
   687  
   688  	state.mutex.Lock()
   689  	defer state.mutex.Unlock()
   690  
   691  	state.payloadSLOKs = nil
   692  }
   693  
   694  // deriveSLOK produces SLOK secret keys and IDs using HKDF-Expand
   695  // defined in https://tools.ietf.org/html/rfc5869.
   696  func (scheme *Scheme) deriveSLOK(ref *slokReference) *SLOK {
   697  
   698  	timeBytes := make([]byte, 8)
   699  	binary.LittleEndian.PutUint64(timeBytes, uint64(ref.Time.UnixNano()))
   700  
   701  	key := deriveKeyHKDF(
   702  		scheme.MasterKey,
   703  		[]byte(ref.PropagationChannelID),
   704  		[]byte(ref.SeedSpecID),
   705  		timeBytes)
   706  
   707  	// TODO: is ID derivation cryptographically sound?
   708  	id := deriveKeyHKDF(
   709  		scheme.MasterKey,
   710  		key)
   711  
   712  	return &SLOK{
   713  		ID:  id,
   714  		Key: key,
   715  	}
   716  }
   717  
   718  // GetOSLDuration returns the total time duration of an OSL,
   719  // which is a function of the scheme's SeedPeriodNanoSeconds,
   720  // the duration of a single SLOK, and the scheme's SeedPeriodKeySplits,
   721  // the number of SLOKs associated with an OSL.
   722  func (scheme *Scheme) GetOSLDuration() time.Duration {
   723  	slokTimePeriodsPerOSL := 1
   724  	for _, keySplit := range scheme.SeedPeriodKeySplits {
   725  		slokTimePeriodsPerOSL *= keySplit.Total
   726  	}
   727  
   728  	return time.Duration(
   729  		int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds)
   730  }
   731  
   732  // PaveFile describes an OSL data file to be paved to an out-of-band
   733  // distribution drop site. There are two types of files: a registry,
   734  // which describes how to assemble keys for OSLs, and the encrypted
   735  // OSL files.
   736  type PaveFile struct {
   737  	Name     string
   738  	Contents []byte
   739  }
   740  
   741  // Registry describes a set of OSL files.
   742  type Registry struct {
   743  	FileSpecs []*OSLFileSpec
   744  }
   745  
   746  // An OSLFileSpec includes an ID which is used to reference the
   747  // OSL file and describes the key splits used to divide the OSL
   748  // file key along with the SLOKs required to reassemble those keys.
   749  //
   750  // The MD5Sum field is a checksum of the contents of the OSL file
   751  // to be used to skip redownloading previously downloaded files.
   752  // MD5 is not cryptographically secure and this checksum is not
   753  // relied upon for OSL verification. MD5 is used for compatibility
   754  // with out-of-band distribution hosts.
   755  type OSLFileSpec struct {
   756  	ID        []byte
   757  	KeyShares *KeyShares
   758  	MD5Sum    []byte
   759  }
   760  
   761  // KeyShares is a tree data structure which describes the
   762  // key splits used to divide a secret key. BoxedShares are encrypted
   763  // shares of the key, and #Threshold amount of decrypted BoxedShares
   764  // are required to reconstruct the secret key. The keys for BoxedShares
   765  // are either SLOKs (referenced by SLOK ID) or random keys that are
   766  // themselves split as described in child KeyShares.
   767  type KeyShares struct {
   768  	Threshold   int
   769  	BoxedShares [][]byte
   770  	SLOKIDs     [][]byte
   771  	KeyShares   []*KeyShares
   772  }
   773  
   774  type PaveLogInfo struct {
   775  	FileName             string
   776  	SchemeIndex          int
   777  	PropagationChannelID string
   778  	OSLID                string
   779  	OSLTime              time.Time
   780  	OSLDuration          time.Duration
   781  	ServerEntryCount     int
   782  }
   783  
   784  // Pave creates the full set of OSL files, for all schemes in the
   785  // configuration, to be dropped in an out-of-band distribution site.
   786  // Only OSLs for the propagation channel ID associated with the
   787  // distribution site are paved. This function is used by automation.
   788  //
   789  // The Name component of each file relates to the values returned by
   790  // the client functions GetRegistryURL and GetOSLFileURL.
   791  //
   792  // Pave returns a pave file for the entire registry of all OSLs from
   793  // epoch to endTime, and a pave file for each OSL. paveServerEntries is
   794  // a map from hex-encoded OSL IDs to server entries to pave into that OSL.
   795  // When entries are found, OSL will contain those entries, newline
   796  // separated. Otherwise the OSL will still be issued, but be empty (unless
   797  // the scheme is in omitEmptyOSLsSchemes). The server entries are paved
   798  // in string value sort order, ensuring that the OSL content remains
   799  // constant as long as the same _set_ of server entries is input.
   800  //
   801  // If startTime is specified and is after epoch, the pave file will contain
   802  // OSLs for the first period at or after startTime.
   803  //
   804  // As OSLs outside the epoch-endTime range will no longer appear in
   805  // the registry, Pave is intended to be used to create the full set
   806  // of OSLs for a distribution site; i.e., not incrementally.
   807  //
   808  // Automation is responsible for consistently distributing server entries
   809  // to OSLs in the case where OSLs are repaved in subsequent calls.
   810  func (config *Config) Pave(
   811  	startTime time.Time,
   812  	endTime time.Time,
   813  	propagationChannelID string,
   814  	signingPublicKey string,
   815  	signingPrivateKey string,
   816  	paveServerEntries map[string][]string,
   817  	omitMD5SumsSchemes []int,
   818  	omitEmptyOSLsSchemes []int,
   819  	logCallback func(*PaveLogInfo)) ([]*PaveFile, error) {
   820  
   821  	config.ReloadableFile.RLock()
   822  	defer config.ReloadableFile.RUnlock()
   823  
   824  	var paveFiles []*PaveFile
   825  
   826  	registry := &Registry{}
   827  
   828  	for schemeIndex, scheme := range config.Schemes {
   829  		if common.Contains(scheme.PropagationChannelIDs, propagationChannelID) {
   830  
   831  			omitMD5Sums := common.ContainsInt(omitMD5SumsSchemes, schemeIndex)
   832  
   833  			omitEmptyOSLs := common.ContainsInt(omitEmptyOSLsSchemes, schemeIndex)
   834  
   835  			oslDuration := scheme.GetOSLDuration()
   836  
   837  			oslTime := scheme.epoch
   838  
   839  			if !startTime.IsZero() && !startTime.Before(scheme.epoch) {
   840  				for oslTime.Before(startTime) {
   841  					oslTime = oslTime.Add(oslDuration)
   842  				}
   843  			}
   844  
   845  			for !oslTime.After(endTime) {
   846  
   847  				firstSLOKTime := oslTime
   848  				fileKey, fileSpec, err := makeOSLFileSpec(
   849  					scheme, propagationChannelID, firstSLOKTime)
   850  				if err != nil {
   851  					return nil, errors.Trace(err)
   852  				}
   853  
   854  				hexEncodedOSLID := hex.EncodeToString(fileSpec.ID)
   855  
   856  				serverEntryCount := len(paveServerEntries[hexEncodedOSLID])
   857  
   858  				if serverEntryCount > 0 || !omitEmptyOSLs {
   859  
   860  					registry.FileSpecs = append(registry.FileSpecs, fileSpec)
   861  
   862  					serverEntries := append([]string(nil), paveServerEntries[hexEncodedOSLID]...)
   863  					sort.Strings(serverEntries)
   864  
   865  					// payload will be "" when nothing is found in serverEntries
   866  					payload := strings.Join(serverEntries, "\n")
   867  
   868  					serverEntriesPackage, err := common.WriteAuthenticatedDataPackage(
   869  						payload,
   870  						signingPublicKey,
   871  						signingPrivateKey)
   872  					if err != nil {
   873  						return nil, errors.Trace(err)
   874  					}
   875  
   876  					boxedServerEntries, err := box(fileKey, serverEntriesPackage)
   877  					if err != nil {
   878  						return nil, errors.Trace(err)
   879  					}
   880  
   881  					if !omitMD5Sums {
   882  						md5sum := md5.Sum(boxedServerEntries)
   883  						fileSpec.MD5Sum = md5sum[:]
   884  					}
   885  
   886  					fileName := fmt.Sprintf(
   887  						OSL_FILENAME_FORMAT, hexEncodedOSLID)
   888  
   889  					paveFiles = append(paveFiles, &PaveFile{
   890  						Name:     fileName,
   891  						Contents: boxedServerEntries,
   892  					})
   893  
   894  					if logCallback != nil {
   895  						logCallback(&PaveLogInfo{
   896  							FileName:             fileName,
   897  							SchemeIndex:          schemeIndex,
   898  							PropagationChannelID: propagationChannelID,
   899  							OSLID:                hexEncodedOSLID,
   900  							OSLTime:              oslTime,
   901  							OSLDuration:          oslDuration,
   902  							ServerEntryCount:     serverEntryCount,
   903  						})
   904  					}
   905  				}
   906  
   907  				oslTime = oslTime.Add(oslDuration)
   908  			}
   909  		}
   910  	}
   911  
   912  	registryJSON, err := json.Marshal(registry)
   913  	if err != nil {
   914  		return nil, errors.Trace(err)
   915  	}
   916  
   917  	registryPackage, err := common.WriteAuthenticatedDataPackage(
   918  		base64.StdEncoding.EncodeToString(registryJSON),
   919  		signingPublicKey,
   920  		signingPrivateKey)
   921  	if err != nil {
   922  		return nil, errors.Trace(err)
   923  	}
   924  
   925  	paveFiles = append(paveFiles, &PaveFile{
   926  		Name:     REGISTRY_FILENAME,
   927  		Contents: registryPackage,
   928  	})
   929  
   930  	return paveFiles, nil
   931  }
   932  
   933  // CurrentOSLIDs returns a mapping from each propagation channel ID in the
   934  // specified scheme to the corresponding current time period, hex-encoded OSL ID.
   935  func (config *Config) CurrentOSLIDs(schemeIndex int) (map[string]string, error) {
   936  
   937  	config.ReloadableFile.RLock()
   938  	defer config.ReloadableFile.RUnlock()
   939  
   940  	if schemeIndex < 0 || schemeIndex >= len(config.Schemes) {
   941  		return nil, errors.TraceNew("invalid scheme index")
   942  	}
   943  
   944  	scheme := config.Schemes[schemeIndex]
   945  	now := time.Now().UTC()
   946  	oslDuration := scheme.GetOSLDuration()
   947  	oslTime := scheme.epoch.Add((now.Sub(scheme.epoch) / oslDuration) * oslDuration)
   948  
   949  	OSLIDs := make(map[string]string)
   950  	for _, propagationChannelID := range scheme.PropagationChannelIDs {
   951  		_, fileSpec, err := makeOSLFileSpec(scheme, propagationChannelID, oslTime)
   952  		if err != nil {
   953  			return nil, errors.Trace(err)
   954  		}
   955  		OSLIDs[propagationChannelID] = hex.EncodeToString(fileSpec.ID)
   956  	}
   957  
   958  	return OSLIDs, nil
   959  }
   960  
   961  // makeOSLFileSpec creates an OSL file key, splits it according to the
   962  // scheme's key splits, and sets the OSL ID as its first SLOK ID. The
   963  // returned key is used to encrypt the OSL payload and then discarded;
   964  // the key may be reassembled using the data in the KeyShares tree,
   965  // given sufficient SLOKs.
   966  func makeOSLFileSpec(
   967  	scheme *Scheme,
   968  	propagationChannelID string,
   969  	firstSLOKTime time.Time) ([]byte, *OSLFileSpec, error) {
   970  
   971  	ref := &slokReference{
   972  		PropagationChannelID: propagationChannelID,
   973  		SeedSpecID:           string(scheme.SeedSpecs[0].ID),
   974  		Time:                 firstSLOKTime,
   975  	}
   976  	firstSLOK := scheme.deriveSLOK(ref)
   977  	oslID := firstSLOK.ID
   978  
   979  	// Note: previously, fileKey was a random key. Now, the key
   980  	// is derived from the master key and OSL ID. This deterministic
   981  	// derivation ensures that repeated paves of the same OSL
   982  	// with the same ID and same content yields the same MD5Sum
   983  	// to avoid wasteful downloads.
   984  	//
   985  	// Similarly, the shareKeys generated in divideKey and the Shamir
   986  	// key splitting random polynomials are now both determinisitcally
   987  	// generated from a seeded CSPRNG. This ensures that the OSL
   988  	// registry remains identical for repeated paves of the same config
   989  	// and parameters.
   990  	//
   991  	// The split structure is added to the deterministic key
   992  	// derivation so that changes to the split configuration will not
   993  	// expose the same key material to different SLOK combinations.
   994  
   995  	splitStructure := make([]byte, 16*(1+len(scheme.SeedPeriodKeySplits)))
   996  	i := 0
   997  	binary.LittleEndian.PutUint64(splitStructure[i:], uint64(len(scheme.SeedSpecs)))
   998  	binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(scheme.SeedSpecThreshold))
   999  	i += 16
  1000  	for _, keySplit := range scheme.SeedPeriodKeySplits {
  1001  		binary.LittleEndian.PutUint64(splitStructure[i:], uint64(keySplit.Total))
  1002  		binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(keySplit.Threshold))
  1003  		i += 16
  1004  	}
  1005  
  1006  	fileKey := deriveKeyHKDF(
  1007  		scheme.MasterKey,
  1008  		splitStructure,
  1009  		[]byte("osl-file-key"),
  1010  		oslID)
  1011  
  1012  	splitKeyMaterialSeed := deriveKeyHKDF(
  1013  		scheme.MasterKey,
  1014  		splitStructure,
  1015  		[]byte("osl-file-split-key-material-seed"),
  1016  		oslID)
  1017  
  1018  	keyMaterialReader, err := newSeededKeyMaterialReader(splitKeyMaterialSeed)
  1019  	if err != nil {
  1020  		return nil, nil, errors.Trace(err)
  1021  	}
  1022  
  1023  	keyShares, err := divideKey(
  1024  		scheme,
  1025  		keyMaterialReader,
  1026  		fileKey,
  1027  		scheme.SeedPeriodKeySplits,
  1028  		propagationChannelID,
  1029  		&firstSLOKTime)
  1030  	if err != nil {
  1031  		return nil, nil, errors.Trace(err)
  1032  	}
  1033  
  1034  	fileSpec := &OSLFileSpec{
  1035  		ID:        oslID,
  1036  		KeyShares: keyShares,
  1037  	}
  1038  
  1039  	return fileKey, fileSpec, nil
  1040  }
  1041  
  1042  // divideKey recursively constructs a KeyShares tree.
  1043  func divideKey(
  1044  	scheme *Scheme,
  1045  	keyMaterialReader io.Reader,
  1046  	key []byte,
  1047  	keySplits []KeySplit,
  1048  	propagationChannelID string,
  1049  	nextSLOKTime *time.Time) (*KeyShares, error) {
  1050  
  1051  	keySplitIndex := len(keySplits) - 1
  1052  	keySplit := keySplits[keySplitIndex]
  1053  
  1054  	shares, err := shamirSplit(
  1055  		key,
  1056  		keySplit.Total,
  1057  		keySplit.Threshold,
  1058  		keyMaterialReader)
  1059  	if err != nil {
  1060  		return nil, errors.Trace(err)
  1061  	}
  1062  
  1063  	var boxedShares [][]byte
  1064  	var keyShares []*KeyShares
  1065  
  1066  	for _, share := range shares {
  1067  
  1068  		var shareKey [KEY_LENGTH_BYTES]byte
  1069  
  1070  		n, err := keyMaterialReader.Read(shareKey[:])
  1071  		if err == nil && n != len(shareKey) {
  1072  			err = std_errors.New("unexpected length")
  1073  		}
  1074  		if err != nil {
  1075  			return nil, errors.Trace(err)
  1076  		}
  1077  
  1078  		if keySplitIndex > 0 {
  1079  			keyShare, err := divideKey(
  1080  				scheme,
  1081  				keyMaterialReader,
  1082  				shareKey[:],
  1083  				keySplits[0:keySplitIndex],
  1084  				propagationChannelID,
  1085  				nextSLOKTime)
  1086  			if err != nil {
  1087  				return nil, errors.Trace(err)
  1088  			}
  1089  			keyShares = append(keyShares, keyShare)
  1090  		} else {
  1091  			keyShare, err := divideKeyWithSeedSpecSLOKs(
  1092  				scheme,
  1093  				keyMaterialReader,
  1094  				shareKey[:],
  1095  				propagationChannelID,
  1096  				nextSLOKTime)
  1097  			if err != nil {
  1098  				return nil, errors.Trace(err)
  1099  			}
  1100  			keyShares = append(keyShares, keyShare)
  1101  
  1102  			*nextSLOKTime = nextSLOKTime.Add(time.Duration(scheme.SeedPeriodNanoseconds))
  1103  		}
  1104  		boxedShare, err := box(shareKey[:], share)
  1105  		if err != nil {
  1106  			return nil, errors.Trace(err)
  1107  		}
  1108  		boxedShares = append(boxedShares, boxedShare)
  1109  	}
  1110  
  1111  	return &KeyShares{
  1112  		Threshold:   keySplit.Threshold,
  1113  		BoxedShares: boxedShares,
  1114  		SLOKIDs:     nil,
  1115  		KeyShares:   keyShares,
  1116  	}, nil
  1117  }
  1118  
  1119  func divideKeyWithSeedSpecSLOKs(
  1120  	scheme *Scheme,
  1121  	keyMaterialReader io.Reader,
  1122  	key []byte,
  1123  	propagationChannelID string,
  1124  	nextSLOKTime *time.Time) (*KeyShares, error) {
  1125  
  1126  	var boxedShares [][]byte
  1127  	var slokIDs [][]byte
  1128  
  1129  	shares, err := shamirSplit(
  1130  		key,
  1131  		len(scheme.SeedSpecs),
  1132  		scheme.SeedSpecThreshold,
  1133  		keyMaterialReader)
  1134  	if err != nil {
  1135  		return nil, errors.Trace(err)
  1136  	}
  1137  
  1138  	for index, seedSpec := range scheme.SeedSpecs {
  1139  
  1140  		ref := &slokReference{
  1141  			PropagationChannelID: propagationChannelID,
  1142  			SeedSpecID:           string(seedSpec.ID),
  1143  			Time:                 *nextSLOKTime,
  1144  		}
  1145  		slok := scheme.deriveSLOK(ref)
  1146  
  1147  		boxedShare, err := box(slok.Key, shares[index])
  1148  		if err != nil {
  1149  			return nil, errors.Trace(err)
  1150  		}
  1151  		boxedShares = append(boxedShares, boxedShare)
  1152  
  1153  		slokIDs = append(slokIDs, slok.ID)
  1154  	}
  1155  
  1156  	return &KeyShares{
  1157  		Threshold:   scheme.SeedSpecThreshold,
  1158  		BoxedShares: boxedShares,
  1159  		SLOKIDs:     slokIDs,
  1160  		KeyShares:   nil,
  1161  	}, nil
  1162  }
  1163  
  1164  // reassembleKey recursively traverses a KeyShares tree, determining
  1165  // whether there exists suffient SLOKs to reassemble the root key and
  1166  // performing the key assembly as required.
  1167  func (keyShares *KeyShares) reassembleKey(lookup SLOKLookup, unboxKey bool) (bool, []byte, error) {
  1168  
  1169  	if (len(keyShares.SLOKIDs) > 0 && len(keyShares.KeyShares) > 0) ||
  1170  		(len(keyShares.SLOKIDs) > 0 && len(keyShares.SLOKIDs) != len(keyShares.BoxedShares)) ||
  1171  		(len(keyShares.KeyShares) > 0 && len(keyShares.KeyShares) != len(keyShares.BoxedShares)) {
  1172  		return false, nil, errors.TraceNew("unexpected KeyShares format")
  1173  	}
  1174  
  1175  	shareCount := 0
  1176  	var shares [][]byte
  1177  	if unboxKey {
  1178  		// Note: shamirCombine infers share indices from slice offset, so the full
  1179  		// keyShares.Total slots are allocated and missing shares are left nil.
  1180  		shares = make([][]byte, len(keyShares.BoxedShares))
  1181  	}
  1182  	if len(keyShares.SLOKIDs) > 0 {
  1183  		for i := 0; i < len(keyShares.SLOKIDs) && shareCount < keyShares.Threshold; i++ {
  1184  			slokKey := lookup(keyShares.SLOKIDs[i])
  1185  			if slokKey == nil {
  1186  				continue
  1187  			}
  1188  			shareCount += 1
  1189  			if unboxKey {
  1190  				share, err := unbox(slokKey, keyShares.BoxedShares[i])
  1191  				if err != nil {
  1192  					return false, nil, errors.Trace(err)
  1193  				}
  1194  				shares[i] = share
  1195  			}
  1196  		}
  1197  	} else {
  1198  		for i := 0; i < len(keyShares.KeyShares) && shareCount < keyShares.Threshold; i++ {
  1199  			ok, key, err := keyShares.KeyShares[i].reassembleKey(lookup, unboxKey)
  1200  			if err != nil {
  1201  				return false, nil, errors.Trace(err)
  1202  			}
  1203  			if !ok {
  1204  				continue
  1205  			}
  1206  			shareCount += 1
  1207  			if unboxKey {
  1208  				share, err := unbox(key, keyShares.BoxedShares[i])
  1209  				if err != nil {
  1210  					return false, nil, errors.Trace(err)
  1211  				}
  1212  				shares[i] = share
  1213  			}
  1214  		}
  1215  	}
  1216  
  1217  	if shareCount < keyShares.Threshold {
  1218  		return false, nil, nil
  1219  	}
  1220  
  1221  	if !unboxKey {
  1222  		return true, nil, nil
  1223  	}
  1224  
  1225  	joinedKey := shamirCombine(shares)
  1226  
  1227  	return true, joinedKey, nil
  1228  }
  1229  
  1230  // GetOSLRegistryURL returns the URL for an OSL registry. Clients
  1231  // call this when fetching the registry from out-of-band
  1232  // distribution sites.
  1233  // Clients are responsible for tracking whether the remote file has
  1234  // changed or not before downloading.
  1235  func GetOSLRegistryURL(baseURL string) string {
  1236  	u, err := url.Parse(baseURL)
  1237  	if err != nil {
  1238  		return ""
  1239  	}
  1240  	u.Path = path.Join(u.Path, REGISTRY_FILENAME)
  1241  	return u.String()
  1242  }
  1243  
  1244  // GetOSLRegistryFilename returns an appropriate filename for
  1245  // the resumable download destination for the OSL registry.
  1246  func GetOSLRegistryFilename(baseDirectory string) string {
  1247  	return filepath.Join(baseDirectory, REGISTRY_FILENAME)
  1248  }
  1249  
  1250  // GetOSLFileURL returns the URL for an OSL file. Once the client
  1251  // has determined, from GetSeededOSLIDs, which OSLs it has sufficiently
  1252  // seeded, it calls this to fetch the OSLs for download and decryption.
  1253  // Clients are responsible for tracking whether the remote file has
  1254  // changed or not before downloading.
  1255  func GetOSLFileURL(baseURL string, oslID []byte) string {
  1256  	u, err := url.Parse(baseURL)
  1257  	if err != nil {
  1258  		return ""
  1259  	}
  1260  	u.Path = path.Join(
  1261  		u.Path, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
  1262  	return u.String()
  1263  }
  1264  
  1265  // GetOSLFilename returns an appropriate filename for the resumable
  1266  // download destination for the OSL file.
  1267  func GetOSLFilename(baseDirectory string, oslID []byte) string {
  1268  	return filepath.Join(
  1269  		baseDirectory, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
  1270  }
  1271  
  1272  // SLOKLookup is a callback to lookup SLOK keys by ID.
  1273  type SLOKLookup func([]byte) []byte
  1274  
  1275  // RegistryStreamer authenticates and processes a JSON encoded OSL registry.
  1276  // The streamer processes the registry without loading the entire file
  1277  // into memory, parsing each OSL file spec in turn and returning those
  1278  // OSL file specs for which the client has sufficient SLOKs to reassemble
  1279  // the OSL key and decrypt.
  1280  //
  1281  // At this stage, SLOK reassembly simply does SLOK ID lookups and threshold
  1282  // counting and does not derive keys for every OSL. This allows the client
  1283  // to defer key derivation until NewOSLReader for cases where it has not
  1284  // already imported the OSL.
  1285  //
  1286  // The client's propagation channel ID is used implicitly: it determines the
  1287  // base URL used to download the registry and OSL files. If the client has
  1288  // seeded SLOKs from a propagation channel ID different than the one associated
  1289  // with its present base URL, they will not appear in the registry and not
  1290  // be used.
  1291  type RegistryStreamer struct {
  1292  	jsonDecoder *json.Decoder
  1293  	lookup      SLOKLookup
  1294  }
  1295  
  1296  // NewRegistryStreamer creates a new RegistryStreamer.
  1297  func NewRegistryStreamer(
  1298  	registryFileContent io.ReadSeeker,
  1299  	signingPublicKey string,
  1300  	lookup SLOKLookup) (*RegistryStreamer, error) {
  1301  
  1302  	payloadReader, err := common.NewAuthenticatedDataPackageReader(
  1303  		registryFileContent, signingPublicKey)
  1304  	if err != nil {
  1305  		return nil, errors.Trace(err)
  1306  	}
  1307  
  1308  	base64Decoder := base64.NewDecoder(base64.StdEncoding, payloadReader)
  1309  
  1310  	// A json.Decoder is used to stream the JSON payload, which
  1311  	// is expected to be of the following form, corresponding
  1312  	// to the Registry struct type:
  1313  	//
  1314  	// {"FileSpecs" : [{...}, {...}, ..., {...}]}
  1315  
  1316  	jsonDecoder := json.NewDecoder(base64Decoder)
  1317  
  1318  	err = expectJSONDelimiter(jsonDecoder, "{")
  1319  	if err != nil {
  1320  		return nil, errors.Trace(err)
  1321  	}
  1322  
  1323  	token, err := jsonDecoder.Token()
  1324  	if err != nil {
  1325  		return nil, errors.Trace(err)
  1326  	}
  1327  
  1328  	name, ok := token.(string)
  1329  
  1330  	if !ok {
  1331  		return nil, errors.Trace(
  1332  			fmt.Errorf("unexpected token type: %T", token))
  1333  	}
  1334  
  1335  	if name != "FileSpecs" {
  1336  		return nil, errors.Trace(
  1337  			fmt.Errorf("unexpected field name: %s", name))
  1338  	}
  1339  
  1340  	err = expectJSONDelimiter(jsonDecoder, "[")
  1341  	if err != nil {
  1342  		return nil, errors.Trace(err)
  1343  	}
  1344  
  1345  	return &RegistryStreamer{
  1346  		jsonDecoder: jsonDecoder,
  1347  		lookup:      lookup,
  1348  	}, nil
  1349  }
  1350  
  1351  // Next returns the next OSL file spec that the client
  1352  // has sufficient SLOKs to decrypt. The client calls
  1353  // NewOSLReader with the file spec to process that OSL.
  1354  // Next returns nil at EOF.
  1355  func (s *RegistryStreamer) Next() (*OSLFileSpec, error) {
  1356  
  1357  	for {
  1358  		if s.jsonDecoder.More() {
  1359  
  1360  			var fileSpec OSLFileSpec
  1361  			err := s.jsonDecoder.Decode(&fileSpec)
  1362  			if err != nil {
  1363  				return nil, errors.Trace(err)
  1364  			}
  1365  
  1366  			ok, _, err := fileSpec.KeyShares.reassembleKey(s.lookup, false)
  1367  			if err != nil {
  1368  				return nil, errors.Trace(err)
  1369  			}
  1370  
  1371  			if ok {
  1372  				return &fileSpec, nil
  1373  			}
  1374  
  1375  		} else {
  1376  
  1377  			// Expect the end of the FileSpecs array.
  1378  			err := expectJSONDelimiter(s.jsonDecoder, "]")
  1379  			if err != nil {
  1380  				return nil, errors.Trace(err)
  1381  			}
  1382  
  1383  			// Expect the end of the Registry object.
  1384  			err = expectJSONDelimiter(s.jsonDecoder, "}")
  1385  			if err != nil {
  1386  				return nil, errors.Trace(err)
  1387  			}
  1388  
  1389  			// Expect the end of the registry content.
  1390  			_, err = s.jsonDecoder.Token()
  1391  			if err != io.EOF {
  1392  				return nil, errors.Trace(err)
  1393  			}
  1394  
  1395  			return nil, nil
  1396  		}
  1397  	}
  1398  }
  1399  
  1400  func expectJSONDelimiter(jsonDecoder *json.Decoder, delimiter string) error {
  1401  	token, err := jsonDecoder.Token()
  1402  	if err != nil {
  1403  		return errors.Trace(err)
  1404  	}
  1405  
  1406  	delim, ok := token.(json.Delim)
  1407  
  1408  	if !ok {
  1409  		return errors.Tracef("unexpected token type: %T", token)
  1410  	}
  1411  
  1412  	if delim.String() != delimiter {
  1413  		return errors.Tracef("unexpected delimiter: %s", delim.String())
  1414  	}
  1415  
  1416  	return nil
  1417  }
  1418  
  1419  // NewOSLReader decrypts, authenticates and streams an OSL payload.
  1420  func NewOSLReader(
  1421  	oslFileContent io.ReadSeeker,
  1422  	fileSpec *OSLFileSpec,
  1423  	lookup SLOKLookup,
  1424  	signingPublicKey string) (io.Reader, error) {
  1425  
  1426  	ok, fileKey, err := fileSpec.KeyShares.reassembleKey(lookup, true)
  1427  	if err != nil {
  1428  		return nil, errors.Trace(err)
  1429  	}
  1430  	if !ok {
  1431  		return nil, errors.TraceNew("unseeded OSL")
  1432  	}
  1433  
  1434  	if len(fileKey) != KEY_LENGTH_BYTES {
  1435  		return nil, errors.TraceNew("invalid key length")
  1436  	}
  1437  
  1438  	var nonce [24]byte
  1439  	var key [KEY_LENGTH_BYTES]byte
  1440  	copy(key[:], fileKey)
  1441  
  1442  	unboxer, err := secretbox.NewOpenReadSeeker(oslFileContent, &nonce, &key)
  1443  	if err != nil {
  1444  		return nil, errors.Trace(err)
  1445  	}
  1446  
  1447  	return common.NewAuthenticatedDataPackageReader(
  1448  		unboxer,
  1449  		signingPublicKey)
  1450  }
  1451  
  1452  // zeroReader reads an unlimited stream of zeroes.
  1453  type zeroReader struct {
  1454  }
  1455  
  1456  func (z *zeroReader) Read(p []byte) (int, error) {
  1457  	for i := 0; i < len(p); i++ {
  1458  		p[i] = 0
  1459  	}
  1460  	return len(p), nil
  1461  }
  1462  
  1463  // newSeededKeyMaterialReader constructs a CSPRNG using AES-CTR.
  1464  // The seed is the AES key and the IV is fixed and constant.
  1465  // Using same seed will always produce the same output stream.
  1466  // The data stream is intended to be used to deterministically
  1467  // generate key material and is not intended as a general
  1468  // purpose CSPRNG.
  1469  func newSeededKeyMaterialReader(seed []byte) (io.Reader, error) {
  1470  
  1471  	if len(seed) != KEY_LENGTH_BYTES {
  1472  		return nil, errors.TraceNew("invalid key length")
  1473  	}
  1474  
  1475  	aesCipher, err := aes.NewCipher(seed)
  1476  	if err != nil {
  1477  		return nil, errors.Trace(err)
  1478  	}
  1479  
  1480  	var iv [aes.BlockSize]byte
  1481  
  1482  	return &cipher.StreamReader{
  1483  		S: cipher.NewCTR(aesCipher, iv[:]),
  1484  		R: new(zeroReader),
  1485  	}, nil
  1486  }
  1487  
  1488  // deriveKeyHKDF implements HKDF-Expand as defined in https://tools.ietf.org/html/rfc5869
  1489  // where masterKey = PRK, context = info, and L = 32; SHA-256 is used so HashLen = 32
  1490  func deriveKeyHKDF(masterKey []byte, context ...[]byte) []byte {
  1491  
  1492  	// TODO: use golang.org/x/crypto/hkdf?
  1493  
  1494  	mac := hmac.New(sha256.New, masterKey)
  1495  	for _, item := range context {
  1496  		mac.Write([]byte(item))
  1497  	}
  1498  	mac.Write([]byte{byte(0x01)})
  1499  	return mac.Sum(nil)
  1500  }
  1501  
  1502  // isValidShamirSplit checks sss.Split constraints
  1503  func isValidShamirSplit(total, threshold int) bool {
  1504  	if total < 1 || total > 254 || threshold < 1 || threshold > total {
  1505  		return false
  1506  	}
  1507  	return true
  1508  }
  1509  
  1510  // shamirSplit is a helper wrapper for sss.Split
  1511  func shamirSplit(
  1512  	secret []byte,
  1513  	total, threshold int,
  1514  	randReader io.Reader) ([][]byte, error) {
  1515  
  1516  	if !isValidShamirSplit(total, threshold) {
  1517  		return nil, errors.TraceNew("invalid parameters")
  1518  	}
  1519  
  1520  	if threshold == 1 {
  1521  		// Special case: each share is simply the secret
  1522  		shares := make([][]byte, total)
  1523  		for i := 0; i < total; i++ {
  1524  			shares[i] = secret
  1525  		}
  1526  		return shares, nil
  1527  	}
  1528  
  1529  	shareMap, err := sss.SplitUsingReader(
  1530  		byte(total), byte(threshold), secret, randReader)
  1531  	if err != nil {
  1532  		return nil, errors.Trace(err)
  1533  	}
  1534  
  1535  	shares := make([][]byte, total)
  1536  	for i := 0; i < total; i++ {
  1537  		// Note: sss.Combine index starts at 1
  1538  		shares[i] = shareMap[byte(i)+1]
  1539  	}
  1540  
  1541  	return shares, nil
  1542  }
  1543  
  1544  // shamirCombine is a helper wrapper for sss.Combine
  1545  func shamirCombine(shares [][]byte) []byte {
  1546  
  1547  	if len(shares) == 1 {
  1548  		// Special case: each share is simply the secret
  1549  		return shares[0]
  1550  	}
  1551  
  1552  	// Convert a sparse list into a map
  1553  	shareMap := make(map[byte][]byte)
  1554  	for index, share := range shares {
  1555  		if share != nil {
  1556  			// Note: sss.Combine index starts at 1
  1557  			shareMap[byte(index)+1] = share
  1558  		}
  1559  	}
  1560  
  1561  	return sss.Combine(shareMap)
  1562  }
  1563  
  1564  // box is a helper wrapper for secretbox.Seal.
  1565  // A constant nonce is used, which is secure so long as
  1566  // each key is used to encrypt only one message.
  1567  func box(key, plaintext []byte) ([]byte, error) {
  1568  	if len(key) != KEY_LENGTH_BYTES {
  1569  		return nil, errors.TraceNew("invalid key length")
  1570  	}
  1571  	var nonce [24]byte
  1572  	var secretboxKey [KEY_LENGTH_BYTES]byte
  1573  	copy(secretboxKey[:], key)
  1574  	box := secretbox.Seal(nil, plaintext, &nonce, &secretboxKey)
  1575  	return box, nil
  1576  }
  1577  
  1578  // unbox is a helper wrapper for secretbox.Open
  1579  func unbox(key, box []byte) ([]byte, error) {
  1580  	if len(key) != KEY_LENGTH_BYTES {
  1581  		return nil, errors.TraceNew("invalid key length")
  1582  	}
  1583  	var nonce [24]byte
  1584  	var secretboxKey [KEY_LENGTH_BYTES]byte
  1585  	copy(secretboxKey[:], key)
  1586  	plaintext, ok := secretbox.Open(nil, box, &nonce, &secretboxKey)
  1587  	if !ok {
  1588  		return nil, errors.TraceNew("unbox failed")
  1589  	}
  1590  	return plaintext, nil
  1591  }