github.com/transparency-dev/armored-witness-applet@v0.1.1/trusted_applet/internal/storage/persistence.go (about)

     1  // Copyright 2022 The Armored Witness Applet authors. All Rights Reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package storage
    16  
    17  import (
    18  	"errors"
    19  	"fmt"
    20  	"sync"
    21  
    22  	"github.com/transparency-dev/armored-witness-applet/trusted_applet/internal/storage/slots"
    23  	"github.com/transparency-dev/witness/omniwitness"
    24  	"google.golang.org/grpc/codes"
    25  	"google.golang.org/grpc/status"
    26  	"gopkg.in/yaml.v3"
    27  	"k8s.io/klog/v2"
    28  )
    29  
    30  const (
    31  	mappingConfigSlot = 0
    32  )
    33  
    34  // SlotPersistence is an implementation of the witness Persistence
    35  // interface based on Slots.
    36  type SlotPersistence struct {
    37  	// mu protects access to everything below.
    38  	mu sync.RWMutex
    39  
    40  	// part is the underlying storage partition we're using to persist
    41  	// data.
    42  	part *slots.Partition
    43  
    44  	// directorySlot is a reference to the zeroth slot in a partition.
    45  	// This slot is used to maintain a mapping of log ID to slot index
    46  	// where state for that log is stored.
    47  	directorySlot *slots.Slot
    48  	// directoryWriteToken is the token received when we read the mapping config
    49  	// from the mapSlot above. It'll be used when we want to store an updated
    50  	// mapping config.
    51  	directoryWriteToken uint32
    52  
    53  	// idToSlot maintains the mapping from LogID to slot index used to store
    54  	// checkpoints from that log.
    55  	idToSlot slotMap
    56  
    57  	// freeSlots is a list of unused slot indices available to be mapped to logIDs.
    58  	freeSlots []uint
    59  }
    60  
    61  // slotMap defines the structure of the mapping config stored in slot zero.
    62  type slotMap map[string]uint
    63  
    64  // NewSlotPersistence creates a new SlotPersistence instance.
    65  // As per the Persistence interface, Init must be called before it's used to
    66  // read or write any data.
    67  func NewSlotPersistence(part *slots.Partition) *SlotPersistence {
    68  	return &SlotPersistence{
    69  		part:     part,
    70  		idToSlot: make(map[string]uint),
    71  	}
    72  }
    73  
    74  // Init sets up the persistence layer. This should be idempotent,
    75  // and will be called once per process startup.
    76  func (p *SlotPersistence) Init() error {
    77  	p.mu.Lock()
    78  	defer p.mu.Unlock()
    79  
    80  	s, err := p.part.Open(mappingConfigSlot)
    81  	if err != nil {
    82  		return fmt.Errorf("failed to open mapping slot: %v", err)
    83  	}
    84  	p.directorySlot = s
    85  	if err := p.populateMap(); err != nil {
    86  		return fmt.Errorf("failed to populate logID → slot map: %v", err)
    87  	}
    88  	return nil
    89  }
    90  
    91  // Logs returns the IDs of all logs that have checkpoints that can
    92  // be read.
    93  func (p *SlotPersistence) Logs() ([]string, error) {
    94  	p.mu.RLock()
    95  	defer p.mu.RUnlock()
    96  
    97  	r := make([]string, 0, len(p.idToSlot))
    98  	for k := range p.idToSlot {
    99  		r = append(r, k)
   100  	}
   101  	return r, nil
   102  }
   103  
   104  // ReadOps returns read-only operations for the given log ID. This
   105  // method only makes sense for IDs returned by Logs().
   106  func (p *SlotPersistence) ReadOps(logID string) (omniwitness.LogStateReadOps, error) {
   107  	p.mu.RLock()
   108  	defer p.mu.RUnlock()
   109  	i, ok := p.idToSlot[logID]
   110  	if !ok {
   111  		// TODO(al): work around undocumented assumptions in the storage interface;
   112  		// we have to return a ReadOp even if we don't know about the specified logID.
   113  		// the ReadOps we return must then return an error of codes.NotFound when GetLatest
   114  		// is called so that the witness will eventually call WriteOps/Update with the
   115  		// same logID and create the record.
   116  		return &slotOps{}, nil
   117  	}
   118  	s, err := p.part.Open(i)
   119  	if err != nil {
   120  		return nil, fmt.Errorf("internal error opening slot %d associated with log ID %q: %v", i, logID, err)
   121  	}
   122  	return &slotOps{slot: s}, nil
   123  }
   124  
   125  // WriteOps shows intent to write data for the given logID. The
   126  // returned operations must have Close() called when the intent
   127  // is complete.
   128  // There is no requirement that the ID is present in Logs(); if
   129  // the ID is not there and this operation succeeds in committing
   130  // a checkpoint, then Logs() will return the new ID afterwards.
   131  func (p *SlotPersistence) WriteOps(logID string) (omniwitness.LogStateWriteOps, error) {
   132  	// Lock rather than RLock because we might add a new log mapping here.
   133  	p.mu.Lock()
   134  	defer p.mu.Unlock()
   135  	i, ok := p.idToSlot[logID]
   136  	if !ok {
   137  		var err error
   138  		i, err = p.addLog(logID)
   139  		if err != nil {
   140  			klog.Warningf("Failed to add mapping: %q", err)
   141  			return nil, fmt.Errorf("unable to assign slot for log ID %q: %v", logID, err)
   142  		}
   143  	}
   144  	klog.V(2).Infof("mapping %q -> %d", logID, i)
   145  	s, err := p.part.Open(i)
   146  	if err != nil {
   147  		klog.Warningf("failed to open %d: %v", i, err)
   148  		return nil, fmt.Errorf("internal error opening slot %d associated with log ID %q: %v", i, logID, err)
   149  	}
   150  	return &slotOps{slot: s}, nil
   151  }
   152  
   153  type slotOps struct {
   154  	mu         sync.RWMutex
   155  	slot       *slots.Slot
   156  	writeToken uint32
   157  }
   158  
   159  type logRecord struct {
   160  	Checkpoint []byte
   161  	Proof      []byte
   162  }
   163  
   164  // GetLatest returns the latest checkpoint and its compact range (if applicable).
   165  // If no checkpoint exists, it must return codes.NotFound.
   166  func (s *slotOps) GetLatest() ([]byte, []byte, error) {
   167  	s.mu.RLock()
   168  	defer s.mu.RUnlock()
   169  
   170  	// TODO(al): workaround for storage assumption - see comment in ReadOps above.
   171  	if s.slot == nil {
   172  		return nil, nil, status.Error(codes.NotFound, "no checkpoint for log")
   173  	}
   174  
   175  	b, t, err := s.slot.Read()
   176  	if err != nil {
   177  		klog.Warningf("Read failed: %v", err)
   178  		return nil, nil, fmt.Errorf("failed to read data: %v", err)
   179  	}
   180  	s.writeToken = t
   181  	if len(b) == 0 {
   182  		klog.Warningf("No checkpoint")
   183  		return nil, nil, status.Error(codes.NotFound, "no checkpoint for log")
   184  	}
   185  	lr := logRecord{}
   186  	if err := yaml.Unmarshal(b, &lr); err != nil {
   187  		klog.Warningf("Unmarshal failed: %v", err)
   188  		return nil, nil, fmt.Errorf("failed to unmarshal data: %v", err)
   189  	}
   190  	klog.V(2).Infof("read:\n%s", lr.Checkpoint)
   191  	return lr.Checkpoint, lr.Proof, nil
   192  }
   193  
   194  // Set sets a new checkpoint and (optional) compact range
   195  // for the log. This commits the state to persistence.
   196  // After this call, only Close() should be called on this object.
   197  func (s *slotOps) Set(checkpointRaw []byte, compactRange []byte) error {
   198  	s.mu.Lock()
   199  	defer s.mu.Unlock()
   200  
   201  	lr := logRecord{
   202  		Checkpoint: checkpointRaw,
   203  		Proof:      compactRange,
   204  	}
   205  
   206  	klog.V(2).Infof("writing with token %d:\n%s", s.writeToken, lr.Checkpoint)
   207  
   208  	lrRaw, err := yaml.Marshal(&lr)
   209  	if err != nil {
   210  		klog.Warningf("marshal failed: %v", err)
   211  		return fmt.Errorf("failed to marshal data: %v", err)
   212  	}
   213  
   214  	if err := s.slot.CheckAndWrite(s.writeToken, lrRaw); err != nil {
   215  		klog.Warningf("Write failed: %v", err)
   216  		return fmt.Errorf("failed to write data: %v", err)
   217  	}
   218  	return nil
   219  }
   220  
   221  // Terminates the write operation, freeing all resources.
   222  // This method MUST be called.
   223  func (s *slotOps) Close() error {
   224  	s.mu.Lock()
   225  	defer s.mu.Unlock()
   226  	s.slot = nil
   227  	return nil
   228  }
   229  
   230  // populateMap reads the logID -> slot mapping from storage.
   231  // Must be called with p.mu write-locked.
   232  func (p *SlotPersistence) populateMap() error {
   233  	b, t, err := p.directorySlot.Read()
   234  	if err != nil {
   235  		return fmt.Errorf("failed to read persistence mapping: %v", err)
   236  	}
   237  	if err := yaml.Unmarshal(b, &p.idToSlot); err != nil {
   238  		return fmt.Errorf("failed to unmarshal persistence mapping: %v", err)
   239  	}
   240  	// We read the logID<->Slot config, so save the token for if/when we want to
   241  	// store an updated mapping.
   242  	p.directoryWriteToken = t
   243  
   244  	// Precalculate the list of available slots.
   245  	slotState := make([]bool, p.part.NumSlots())
   246  	for _, idx := range p.idToSlot {
   247  		if idx == mappingConfigSlot {
   248  			return errors.New("internal-error, reserved slot 0 has been used")
   249  		}
   250  		slotState[idx] = true
   251  	}
   252  
   253  	// Slot 0 is reserved for the mapping config, so mark it used here:
   254  	slotState[mappingConfigSlot] = true
   255  
   256  	p.freeSlots = make([]uint, 0, p.part.NumSlots())
   257  	for idx, used := range slotState {
   258  		if !used {
   259  			p.freeSlots = append(p.freeSlots, uint(idx))
   260  		}
   261  	}
   262  	return nil
   263  }
   264  
   265  // storeDirectory writes the current logID -> slot map to storage.
   266  // Must be called with p.mu at leaest read-locked.
   267  func (p *SlotPersistence) storeDirectory() error {
   268  	smRaw, err := yaml.Marshal(p.idToSlot)
   269  	if err != nil {
   270  		return fmt.Errorf("failed to marshal mapping: %v", err)
   271  	}
   272  	if err := p.directorySlot.CheckAndWrite(p.directoryWriteToken, smRaw); err != nil {
   273  		return fmt.Errorf("failed to store mapping: %v", err)
   274  	}
   275  	// TODO(al): CheckAndWrite should return the next token rather than us knowing
   276  	// how the token changes after a successful write.
   277  	p.directoryWriteToken++
   278  	return nil
   279  }
   280  
   281  // addLog assigns a slot to a new log ID.
   282  // Must be called with p.mu write-locked.
   283  func (p *SlotPersistence) addLog(id string) (uint, error) {
   284  	if idx, ok := p.idToSlot[id]; ok {
   285  		return idx, nil
   286  	}
   287  	if len(p.freeSlots) == 0 {
   288  		return 0, errors.New("no free slot available")
   289  	}
   290  	f := p.freeSlots[0]
   291  	p.freeSlots = p.freeSlots[1:]
   292  	p.idToSlot[id] = f
   293  	if err := p.storeDirectory(); err != nil {
   294  		return 0, fmt.Errorf("failed to storeMap: %v", err)
   295  	}
   296  	klog.V(1).Infof("Added new mapping %q -> %d", id, f)
   297  	return f, nil
   298  }