vitess.io/vitess@v0.16.2/go/vt/topo/memorytopo/memorytopo.go (about)

     1  /*
     2  Copyright 2019 The Vitess Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  // Package memorytopo contains an implementation of the topo.Factory /
    18  // topo.Conn interfaces based on an in-memory tree of data.
    19  // It is constructed with an immutable set of cells.
    20  package memorytopo
    21  
    22  import (
    23  	"context"
    24  	"errors"
    25  	"math/rand"
    26  	"strings"
    27  	"sync"
    28  	"time"
    29  
    30  	"vitess.io/vitess/go/vt/log"
    31  	"vitess.io/vitess/go/vt/topo"
    32  
    33  	topodatapb "vitess.io/vitess/go/vt/proto/topodata"
    34  )
    35  
    36  const (
    37  	// Path components
    38  	electionsPath = "elections"
    39  )
    40  
    41  var ErrConnectionClosed = errors.New("connection closed")
    42  
    43  const (
    44  	// UnreachableServerAddr is a sentinel value for CellInfo.ServerAddr.
    45  	// If a memorytopo topo.Conn is created with this serverAddr then every
    46  	// method on that Conn which takes a context will simply block until the
    47  	// context finishes, and return ctx.Err(), in order to simulate an
    48  	// unreachable local cell for testing.
    49  	UnreachableServerAddr = "unreachable"
    50  )
    51  
    52  var (
    53  	nextWatchIndex = 0
    54  )
    55  
    56  // Factory is a memory-based implementation of topo.Factory.  It
    57  // takes a file-system like approach, with directories at each level
    58  // being an actual directory node. This is meant to be closer to
    59  // file-system like servers, like ZooKeeper or Chubby. etcd or Consul
    60  // implementations would be closer to a node-based implementation.
    61  //
    62  // It contains a single tree of nodes. Each cell topo.Conn will use
    63  // a sub-directory in that tree.
    64  type Factory struct {
    65  	// mu protects the following fields.
    66  	mu sync.Mutex
    67  	// cells is the toplevel map that has one entry per cell.
    68  	cells map[string]*node
    69  	// generation is used to generate unique incrementing version
    70  	// numbers.  We want a global counter so when creating a file,
    71  	// then deleting it, then re-creating it, we don't restart the
    72  	// version at 1. It is initialized with a random number,
    73  	// so if we have two implementations, the numbers won't match.
    74  	generation uint64
    75  	// err is used for testing purposes to force queries / watches
    76  	// to return the given error
    77  	err error
    78  }
    79  
    80  // HasGlobalReadOnlyCell is part of the topo.Factory interface.
    81  func (f *Factory) HasGlobalReadOnlyCell(serverAddr, root string) bool {
    82  	return false
    83  }
    84  
    85  // Create is part of the topo.Factory interface.
    86  func (f *Factory) Create(cell, serverAddr, root string) (topo.Conn, error) {
    87  	f.mu.Lock()
    88  	defer f.mu.Unlock()
    89  	if _, ok := f.cells[cell]; !ok {
    90  		return nil, topo.NewError(topo.NoNode, cell)
    91  	}
    92  	return &Conn{
    93  		factory:    f,
    94  		cell:       cell,
    95  		serverAddr: serverAddr,
    96  	}, nil
    97  }
    98  
    99  // SetError forces the given error to be returned from all calls and propagates
   100  // the error to all active watches.
   101  func (f *Factory) SetError(err error) {
   102  	f.mu.Lock()
   103  	defer f.mu.Unlock()
   104  
   105  	f.err = err
   106  	if err != nil {
   107  		for _, node := range f.cells {
   108  			node.PropagateWatchError(err)
   109  		}
   110  	}
   111  }
   112  
   113  // Lock blocks all requests to the topo and is exposed to allow tests to
   114  // simulate an unresponsive topo server
   115  func (f *Factory) Lock() {
   116  	f.mu.Lock()
   117  }
   118  
   119  // Unlock unblocks all requests to the topo and is exposed to allow tests to
   120  // simulate an unresponsive topo server
   121  func (f *Factory) Unlock() {
   122  	f.mu.Unlock()
   123  }
   124  
   125  // Conn implements the topo.Conn interface. It remembers the cell and serverAddr,
   126  // and points at the Factory that has all the data.
   127  type Conn struct {
   128  	factory    *Factory
   129  	cell       string
   130  	serverAddr string
   131  	closed     bool
   132  }
   133  
   134  // dial returns immediately, unless the Conn points to the sentinel
   135  // UnreachableServerAddr, in which case it will block until the context expires
   136  // and return the context's error.
   137  func (c *Conn) dial(ctx context.Context) error {
   138  	if c.closed {
   139  		return ErrConnectionClosed
   140  	}
   141  	if c.serverAddr == UnreachableServerAddr {
   142  		<-ctx.Done()
   143  		return ctx.Err()
   144  	}
   145  
   146  	return nil
   147  }
   148  
   149  // Close is part of the topo.Conn interface.
   150  func (c *Conn) Close() {
   151  	c.closed = true
   152  }
   153  
   154  type watch struct {
   155  	contents  chan *topo.WatchData
   156  	recursive chan *topo.WatchDataRecursive
   157  	lock      chan string
   158  }
   159  
   160  // node contains a directory or a file entry.
   161  // Exactly one of contents or children is not nil.
   162  type node struct {
   163  	name     string
   164  	version  uint64
   165  	contents []byte
   166  	children map[string]*node
   167  
   168  	// parent is a pointer to the parent node.
   169  	// It is set to nil in toplevel and cell node.
   170  	parent *node
   171  
   172  	// watches is a map of all watches for this node.
   173  	watches map[int]watch
   174  
   175  	// lock is nil when the node is not locked.
   176  	// otherwise it has a channel that is closed by unlock.
   177  	lock chan struct{}
   178  
   179  	// lockContents is the contents of the locks.
   180  	// For regular locks, it has the contents that was passed in.
   181  	// For primary election, it has the id of the election leader.
   182  	lockContents string
   183  }
   184  
   185  func (n *node) isDirectory() bool {
   186  	return n.children != nil
   187  }
   188  
   189  func (n *node) recurseContents(callback func(n *node)) {
   190  	if n.isDirectory() {
   191  		for _, child := range n.children {
   192  			child.recurseContents(callback)
   193  		}
   194  	} else {
   195  		callback(n)
   196  	}
   197  }
   198  
   199  func (n *node) propagateRecursiveWatch(ev *topo.WatchDataRecursive) {
   200  	for parent := n.parent; parent != nil; parent = parent.parent {
   201  		for _, w := range parent.watches {
   202  			if w.recursive != nil {
   203  				w.recursive <- ev
   204  			}
   205  		}
   206  	}
   207  }
   208  
   209  // PropagateWatchError propagates the given error to all watches on this node
   210  // and recursively applies to all children
   211  func (n *node) PropagateWatchError(err error) {
   212  	for _, ch := range n.watches {
   213  		if ch.contents == nil {
   214  			continue
   215  		}
   216  		ch.contents <- &topo.WatchData{
   217  			Err: err,
   218  		}
   219  	}
   220  
   221  	for _, c := range n.children {
   222  		c.PropagateWatchError(err)
   223  	}
   224  }
   225  
   226  // NewServerAndFactory returns a new MemoryTopo and the backing factory for all
   227  // the cells. It will create one cell for each parameter passed in.  It will log.Exit out
   228  // in case of a problem.
   229  func NewServerAndFactory(cells ...string) (*topo.Server, *Factory) {
   230  	f := &Factory{
   231  		cells:      make(map[string]*node),
   232  		generation: uint64(rand.Int63n(1 << 60)),
   233  	}
   234  	f.cells[topo.GlobalCell] = f.newDirectory(topo.GlobalCell, nil)
   235  
   236  	ctx := context.Background()
   237  	ts, err := topo.NewWithFactory(f, "" /*serverAddress*/, "" /*root*/)
   238  	if err != nil {
   239  		log.Exitf("topo.NewWithFactory() failed: %v", err)
   240  	}
   241  	for _, cell := range cells {
   242  		f.cells[cell] = f.newDirectory(cell, nil)
   243  		if err := ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}); err != nil {
   244  			log.Exitf("ts.CreateCellInfo(%v) failed: %v", cell, err)
   245  		}
   246  	}
   247  	return ts, f
   248  }
   249  
   250  // NewServer returns the new server
   251  func NewServer(cells ...string) *topo.Server {
   252  	server, _ := NewServerAndFactory(cells...)
   253  	return server
   254  }
   255  
   256  func (f *Factory) getNextVersion() uint64 {
   257  	f.generation++
   258  	return f.generation
   259  }
   260  
   261  func (f *Factory) newFile(name string, contents []byte, parent *node) *node {
   262  	return &node{
   263  		name:     name,
   264  		version:  f.getNextVersion(),
   265  		contents: contents,
   266  		parent:   parent,
   267  		watches:  make(map[int]watch),
   268  	}
   269  }
   270  
   271  func (f *Factory) newDirectory(name string, parent *node) *node {
   272  	return &node{
   273  		name:     name,
   274  		version:  f.getNextVersion(),
   275  		children: make(map[string]*node),
   276  		parent:   parent,
   277  		watches:  make(map[int]watch),
   278  	}
   279  }
   280  
   281  func (f *Factory) nodeByPath(cell, filePath string) *node {
   282  	n, ok := f.cells[cell]
   283  	if !ok {
   284  		return nil
   285  	}
   286  
   287  	parts := strings.Split(filePath, "/")
   288  	for _, part := range parts {
   289  		if part == "" {
   290  			// Skip empty parts, usually happens at the end.
   291  			continue
   292  		}
   293  		if n.children == nil {
   294  			// This is a file.
   295  			return nil
   296  		}
   297  		child, ok := n.children[part]
   298  		if !ok {
   299  			// Path doesn't exist.
   300  			return nil
   301  		}
   302  		n = child
   303  	}
   304  	return n
   305  }
   306  
   307  func (f *Factory) getOrCreatePath(cell, filePath string) *node {
   308  	n, ok := f.cells[cell]
   309  	if !ok {
   310  		return nil
   311  	}
   312  
   313  	parts := strings.Split(filePath, "/")
   314  	for _, part := range parts {
   315  		if part == "" {
   316  			// Skip empty parts, usually happens at the end.
   317  			continue
   318  		}
   319  		if n.children == nil {
   320  			// This is a file.
   321  			return nil
   322  		}
   323  		child, ok := n.children[part]
   324  		if !ok {
   325  			// Path doesn't exist, create it.
   326  			child = f.newDirectory(part, n)
   327  			n.children[part] = child
   328  		}
   329  		n = child
   330  	}
   331  	return n
   332  }
   333  
   334  // recursiveDelete deletes a node and its parent directory if empty.
   335  func (f *Factory) recursiveDelete(n *node) {
   336  	parent := n.parent
   337  	if parent == nil {
   338  		return
   339  	}
   340  	delete(parent.children, n.name)
   341  	if len(parent.children) == 0 {
   342  		f.recursiveDelete(parent)
   343  	}
   344  }
   345  
   346  func init() {
   347  	rand.Seed(time.Now().UnixNano())
   348  }