github.com/swiftstack/ProxyFS@v0.0.0-20210203235616-4017c267d62f/pfsagentd/file_inode.go (about)

     1  // Copyright (c) 2015-2021, NVIDIA CORPORATION.
     2  // SPDX-License-Identifier: Apache-2.0
     3  
     4  package main
     5  
     6  import (
     7  	"container/list"
     8  	"fmt"
     9  	"io"
    10  	"net/http"
    11  	"strconv"
    12  	"strings"
    13  	"sync"
    14  	"sync/atomic"
    15  	"time"
    16  
    17  	"github.com/swiftstack/sortedmap"
    18  
    19  	"github.com/swiftstack/ProxyFS/inode"
    20  	"github.com/swiftstack/ProxyFS/jrpcfs"
    21  )
    22  
    23  func (fileInode *fileInodeStruct) doFlushIfNecessary() {
    24  	var (
    25  		chunkedPutContext        *chunkedPutContextStruct
    26  		chunkedPutContextElement *list.Element
    27  		flushWG                  sync.WaitGroup
    28  	)
    29  
    30  	// grantedLock = fileInode.getExclusiveLock()
    31  
    32  	if 0 == fileInode.chunkedPutList.Len() {
    33  		// No Chunked PUTs in flight... so we can just exit
    34  		// grantedLock.release()
    35  		return
    36  	}
    37  
    38  	// At least one Chunked PUT is in flight... so we know we'll have to block later
    39  
    40  	flushWG.Add(1)
    41  	_ = fileInode.chunkedPutFlushWaiterList.PushBack(&flushWG)
    42  
    43  	// Now see if we need to initiate the flush
    44  
    45  	if fileInode.flushInProgress {
    46  		// We do not need to send a flush
    47  	} else {
    48  		// No explicit flush is in progress... so make it appear so
    49  
    50  		fileInode.flushInProgress = true
    51  
    52  		// See if the last Chunked PUT is already flushing anyway
    53  
    54  		chunkedPutContextElement = fileInode.chunkedPutList.Back()
    55  		chunkedPutContext = chunkedPutContextElement.Value.(*chunkedPutContextStruct)
    56  
    57  		if chunkedPutContextStateOpen == chunkedPutContext.state {
    58  			// We need to trigger a Flush
    59  			chunkedPutContext.state = chunkedPutContextStateClosing
    60  			close(chunkedPutContext.sendChan)
    61  		}
    62  	}
    63  
    64  	// grantedLock.release()
    65  
    66  	// Finally, wait for the flush to complete
    67  
    68  	flushWG.Wait()
    69  }
    70  
    71  func pruneFileInodeDirtyListIfNecessary() {
    72  	var (
    73  		fileInode        *fileInodeStruct
    74  		fileInodeElement *list.Element
    75  	)
    76  
    77  	for {
    78  		globals.Lock()
    79  		if uint64(globals.fileInodeDirtyList.Len()) < globals.config.DirtyFileLimit {
    80  			// There is room for fileInode about to be added to the list
    81  			globals.Unlock()
    82  			return
    83  		}
    84  		fileInodeElement = globals.fileInodeDirtyList.Front()
    85  		globals.Unlock()
    86  		fileInode = fileInodeElement.Value.(*fileInodeStruct)
    87  		fileInode.doFlushIfNecessary()
    88  	}
    89  }
    90  
    91  func emptyFileInodeDirtyListAndLogSegmentChan() {
    92  	var (
    93  		fileInode                         *fileInodeStruct
    94  		fileInodeDirtyLogSegmentChanIndex uint64
    95  		fileInodeElement                  *list.Element
    96  	)
    97  
    98  	for {
    99  		globals.Lock()
   100  
   101  		if 0 == globals.fileInodeDirtyList.Len() {
   102  			// The list is now empty
   103  
   104  			globals.Unlock()
   105  
   106  			// And globals.config.DirtyLogSegmentLimit should be fully stocked with globals.config.DirtyLogSegmentLimit elements... so empty it also
   107  
   108  			for fileInodeDirtyLogSegmentChanIndex = 0; fileInodeDirtyLogSegmentChanIndex < globals.config.DirtyLogSegmentLimit; fileInodeDirtyLogSegmentChanIndex++ {
   109  				_ = <-globals.fileInodeDirtyLogSegmentChan
   110  			}
   111  
   112  			// And we are done...
   113  
   114  			return
   115  		}
   116  
   117  		// Kick off a flush if the first (then next) element of globals.fileInodeDirtyList
   118  
   119  		fileInodeElement = globals.fileInodeDirtyList.Front()
   120  
   121  		globals.Unlock()
   122  
   123  		fileInode = fileInodeElement.Value.(*fileInodeStruct)
   124  		fileInode.doFlushIfNecessary()
   125  	}
   126  }
   127  
   128  func (chunkedPutContext *chunkedPutContextStruct) sendDaemon() {
   129  	var (
   130  		expirationDelay           time.Duration
   131  		expirationTime            time.Time
   132  		fileInode                 *fileInodeStruct
   133  		flushWaiterListElement    *list.Element
   134  		nextChunkedPutContext     *chunkedPutContextStruct
   135  		nextChunkedPutListElement *list.Element
   136  		sendChanOpenOrNonEmpty    bool
   137  	)
   138  
   139  	fileInode = chunkedPutContext.fileInode
   140  
   141  	// Kick off Chunked PUT
   142  
   143  	chunkedPutContext.Add(1)
   144  	go chunkedPutContext.performChunkedPut()
   145  
   146  	// Start MaxFlushTime timer
   147  
   148  	expirationTime = time.Now().Add(globals.config.MaxFlushTime)
   149  
   150  	// Loop awaiting chunks (that could be explicit flushes) or expirationTime
   151  
   152  	for {
   153  		expirationDelay = expirationTime.Sub(time.Now())
   154  
   155  		select {
   156  		case <-time.After(expirationDelay):
   157  			// MaxFlushTime-triggered flush requested
   158  
   159  			// grantedLock = fileInode.getExclusiveLock()
   160  			chunkedPutContext.state = chunkedPutContextStateClosing
   161  			// grantedLock.release()
   162  
   163  			goto PerformFlush
   164  		case _, sendChanOpenOrNonEmpty = <-chunkedPutContext.sendChan:
   165  			// Send chunk to *chunkedPutContextStruct.Read()
   166  
   167  			select {
   168  			case chunkedPutContext.wakeChan <- struct{}{}:
   169  				// We just notified Read()
   170  			default:
   171  				// We didn't need to notify Read()
   172  			}
   173  
   174  			if !sendChanOpenOrNonEmpty {
   175  				goto PerformFlush
   176  			}
   177  		}
   178  	}
   179  
   180  PerformFlush:
   181  
   182  	// Tell *chunkedPutContextStruct.Read() to finish & wait for it to finish
   183  
   184  	close(chunkedPutContext.wakeChan)
   185  	chunkedPutContext.Wait()
   186  
   187  	// Chunked PUT is complete
   188  
   189  	// grantedLock = fileInode.getExclusiveLock()
   190  
   191  	chunkedPutContext.state = chunkedPutContextStateClosed
   192  
   193  	// But first, make sure sendChan is drained
   194  
   195  	for {
   196  		select {
   197  		case _, sendChanOpenOrNonEmpty = <-chunkedPutContext.sendChan:
   198  			if sendChanOpenOrNonEmpty {
   199  				continue
   200  			} else {
   201  				goto EscapeSendChanDrain
   202  			}
   203  		default:
   204  			goto EscapeSendChanDrain
   205  		}
   206  	}
   207  
   208  EscapeSendChanDrain:
   209  
   210  	// Can we tell ProxyFS about it and dispose of it?
   211  
   212  	if nil == chunkedPutContext.chunkedPutListElement.Prev() {
   213  		// We can record this chunkedPutContext as having completed
   214  
   215  		nextChunkedPutListElement = chunkedPutContext.chunkedPutListElement.Next()
   216  
   217  		chunkedPutContext.complete()
   218  
   219  		// Check to see subsequent chunkedPutContext's are also closed and able to be completed
   220  
   221  		for nil != nextChunkedPutListElement {
   222  			nextChunkedPutContext = nextChunkedPutListElement.Value.(*chunkedPutContextStruct)
   223  
   224  			if chunkedPutContextStateClosed != nextChunkedPutContext.state {
   225  				// Ran into an un-closed chunkedPutContext...so simply exit the loop
   226  				break
   227  			}
   228  
   229  			// We can similarly reccord this chunkedPutContext as having completed
   230  
   231  			nextChunkedPutListElement = nextChunkedPutContext.chunkedPutListElement.Next()
   232  
   233  			nextChunkedPutContext.complete()
   234  		}
   235  	}
   236  
   237  	if 0 == fileInode.chunkedPutList.Len() {
   238  		// Indicate flush is complete
   239  
   240  		fileInode.flushInProgress = false
   241  
   242  		for fileInode.chunkedPutFlushWaiterList.Len() > 0 {
   243  			flushWaiterListElement = fileInode.chunkedPutFlushWaiterList.Front()
   244  			_ = fileInode.chunkedPutFlushWaiterList.Remove(flushWaiterListElement)
   245  			flushWaiterListElement.Value.(*sync.WaitGroup).Done()
   246  		}
   247  
   248  		// Remove globals.fileInode from globals.fileInodeDirtyList
   249  
   250  		globals.Lock()
   251  		globals.fileInodeDirtyList.Remove(fileInode.dirtyListElement)
   252  		fileInode.dirtyListElement = nil
   253  		globals.Unlock()
   254  	}
   255  
   256  	// grantedLock.release()
   257  }
   258  
   259  func (chunkedPutContext *chunkedPutContextStruct) performChunkedPut() {
   260  	var (
   261  		chunkedPutRequest            *http.Request
   262  		containerAndObjectNames      string
   263  		containerAndObjectNamesSplit []string
   264  		err                          error
   265  		ok                           bool
   266  		physPathSplit                []string
   267  		provisionObjectReply         *jrpcfs.ProvisionObjectReply
   268  		provisionObjectRequest       *jrpcfs.ProvisionObjectRequest
   269  		statusCode                   int
   270  		swiftStorageURL              string
   271  	)
   272  
   273  	provisionObjectRequest = &jrpcfs.ProvisionObjectRequest{
   274  		MountID: globals.mountID,
   275  	}
   276  
   277  	provisionObjectReply = &jrpcfs.ProvisionObjectReply{}
   278  
   279  	err = globals.retryRPCClient.Send("RpcProvisionObject", provisionObjectRequest, provisionObjectReply)
   280  	if nil != err {
   281  		logFatalf("*chunkedPutContextStruct.performChunkedPut() call to Server.RpcProvisionObject failed: %v", err)
   282  	}
   283  
   284  	physPathSplit = strings.SplitAfterN(provisionObjectReply.PhysPath, "/", 4)
   285  	containerAndObjectNames = physPathSplit[len(physPathSplit)-1]
   286  	containerAndObjectNamesSplit = strings.Split(containerAndObjectNames, "/")
   287  	chunkedPutContext.containerName = containerAndObjectNamesSplit[0]
   288  	chunkedPutContext.objectName = containerAndObjectNamesSplit[1]
   289  
   290  	swiftStorageURL = fetchStorageURL()
   291  
   292  	chunkedPutRequest, err = http.NewRequest(http.MethodPut, swiftStorageURL+"/"+chunkedPutContext.containerName+"/"+chunkedPutContext.objectName, chunkedPutContext)
   293  	if nil != err {
   294  		logFatalf("*chunkedPutContextStruct.performChunkedPut() call to http.NewRequest() failed: %v", err)
   295  	}
   296  
   297  	chunkedPutRequest.Header.Add("Transfer-Encoding", "chunked")
   298  
   299  	chunkedPutContext.pos = 0
   300  
   301  	_, _, ok, statusCode = doHTTPRequest(chunkedPutRequest, http.StatusCreated)
   302  	if !ok {
   303  		logFatalf("*chunkedPutContextStruct.performChunkedPut() failed with unexpected statusCode: %v", statusCode)
   304  	}
   305  
   306  	globals.stats.LogSegmentPutBytes.Add(uint64(len(chunkedPutContext.buf)))
   307  
   308  	chunkedPutContext.Done()
   309  }
   310  
   311  func (chunkedPutContext *chunkedPutContextStruct) complete() {
   312  	var (
   313  		curExtentAsMultiObjectExtent  *multiObjectExtentStruct
   314  		curExtentAsSingleObjectExtent *singleObjectExtentStruct
   315  		curExtentAsValue              sortedmap.Value
   316  		curExtentIndex                int
   317  		err                           error
   318  		extentMapLen                  int
   319  		fileInode                     *fileInodeStruct
   320  		ok                            bool
   321  		wroteReply                    *jrpcfs.WroteReply
   322  		wroteRequest                  *jrpcfs.WroteRequest
   323  	)
   324  
   325  	fileInode = chunkedPutContext.fileInode
   326  
   327  	extentMapLen, err = chunkedPutContext.extentMap.Len()
   328  	if nil != err {
   329  		logFatalf("*chunkedPutContextStruct.complete() failed chunkedPutContext.extentMap.Len(): %v", err)
   330  	}
   331  
   332  	// Now that LogSegment Chunked PUT has completed, update FileInode in ProxyFS and our fileInode.extentMap
   333  
   334  	wroteRequest = &jrpcfs.WroteRequest{
   335  		InodeHandle: jrpcfs.InodeHandle{
   336  			MountID:     globals.mountID,
   337  			InodeNumber: int64(fileInode.InodeNumber),
   338  		},
   339  		ContainerName: chunkedPutContext.containerName,
   340  		ObjectName:    chunkedPutContext.objectName,
   341  		FileOffset:    make([]uint64, extentMapLen),
   342  		ObjectOffset:  make([]uint64, extentMapLen),
   343  		Length:        make([]uint64, extentMapLen),
   344  		WroteTimeNs:   uint64(time.Now().UnixNano()),
   345  	}
   346  
   347  	for curExtentIndex = 0; curExtentIndex < extentMapLen; curExtentIndex++ {
   348  		_, curExtentAsValue, ok, err = chunkedPutContext.extentMap.GetByIndex(curExtentIndex)
   349  		if nil != err {
   350  			logFatalf("*chunkedPutContextStruct.complete() failed chunkedPutContext.extentMap.GetByIndex(): %v", err)
   351  		}
   352  		if !ok {
   353  			logFatalf("*chunkedPutContextStruct.complete() chunkedPutContext.extentMap.GetByIndex() returned !ok")
   354  		}
   355  		curExtentAsSingleObjectExtent = curExtentAsValue.(*singleObjectExtentStruct)
   356  
   357  		wroteRequest.FileOffset[curExtentIndex] = curExtentAsSingleObjectExtent.fileOffset
   358  		wroteRequest.ObjectOffset[curExtentIndex] = curExtentAsSingleObjectExtent.objectOffset
   359  		wroteRequest.Length[curExtentIndex] = curExtentAsSingleObjectExtent.length
   360  
   361  		curExtentAsMultiObjectExtent = &multiObjectExtentStruct{
   362  			fileOffset:    curExtentAsSingleObjectExtent.fileOffset,
   363  			containerName: chunkedPutContext.containerName,
   364  			objectName:    chunkedPutContext.objectName,
   365  			objectOffset:  curExtentAsSingleObjectExtent.objectOffset,
   366  			length:        curExtentAsSingleObjectExtent.length,
   367  		}
   368  
   369  		fileInode.updateExtentMap(curExtentAsMultiObjectExtent)
   370  	}
   371  
   372  	wroteReply = &jrpcfs.WroteReply{}
   373  
   374  	err = globals.retryRPCClient.Send("RpcWrote", wroteRequest, wroteReply)
   375  	if nil != err {
   376  		// logFatalf("*chunkedPutContextStruct.complete() failed Server.RpcWrote: %v", err)
   377  		logWarnf("TODO (i.e. convert to logFatalf) *chunkedPutContextStruct.complete() failed Server.RpcWrote: %v", err)
   378  	}
   379  
   380  	// Remove this chunkedPutContext from fileInode.chunkedPutList
   381  
   382  	_ = fileInode.chunkedPutList.Remove(chunkedPutContext.chunkedPutListElement)
   383  
   384  	// fileInode.dereference()
   385  
   386  	// Finally, yield our chunkedPutContext quota
   387  
   388  	globals.fileInodeDirtyLogSegmentChan <- struct{}{}
   389  }
   390  
   391  func (chunkedPutContext *chunkedPutContextStruct) Read(p []byte) (n int, err error) {
   392  	var (
   393  		wakeChanOpenOrNonEmpty bool
   394  	)
   395  
   396  	_, wakeChanOpenOrNonEmpty = <-chunkedPutContext.wakeChan
   397  
   398  	// grantedLock = chunkedPutContext.fileInode.getExclusiveLock()
   399  
   400  	n = len(chunkedPutContext.buf) - chunkedPutContext.pos
   401  
   402  	if n < 0 {
   403  		logFatalf("*chunkedPutContextStruct.Read() called with pos past beyond len(chunkedPutContext.buf)")
   404  	}
   405  
   406  	if n > 0 {
   407  		// Return any unsent data in buf that will fit in p immediately
   408  
   409  		if n > len(p) {
   410  			n = len(p)
   411  		}
   412  
   413  		copy(p, chunkedPutContext.buf[chunkedPutContext.pos:chunkedPutContext.pos+n])
   414  
   415  		chunkedPutContext.pos += n
   416  
   417  		// grantedLock.release()
   418  
   419  		err = nil
   420  		return
   421  	}
   422  
   423  	// grantedLock.release()
   424  
   425  	// At this point, n == 0... do we need to send EOF?
   426  
   427  	if wakeChanOpenOrNonEmpty {
   428  		// Nope... we were awoken to send bytes but we'd already sent them
   429  
   430  		err = nil
   431  		return
   432  	}
   433  
   434  	// Time to send EOF
   435  
   436  	err = io.EOF
   437  
   438  	return
   439  }
   440  
   441  func (chunkedPutContext *chunkedPutContextStruct) Close() (err error) {
   442  	// Make sure Read() gets a chance to cleanly exit
   443  
   444  	if chunkedPutContext.inRead {
   445  		select {
   446  		case chunkedPutContext.wakeChan <- struct{}{}:
   447  			// We just notified Read()
   448  		default:
   449  			// We didn't need to notify Read()
   450  		}
   451  
   452  		for chunkedPutContext.inRead {
   453  			time.Sleep(chunkedPutContextExitReadPollingRate)
   454  		}
   455  	}
   456  
   457  	// To ensure retry resends all the data, reset pos
   458  
   459  	chunkedPutContext.pos = 0
   460  
   461  	err = nil
   462  	return
   463  }
   464  
   465  // populateExtentMap ensures that the range [fileOffset:fileOffset+length) is covered by
   466  // fileInode.extentMap whether or not it is needed. Extents won't be needed if there is
   467  // some fileInode.chunkedPutListElement that logically supercedes it.
   468  //
   469  func (fileInode *fileInodeStruct) populateExtentMap(fileOffset uint64, length uint64) (err error) {
   470  	var (
   471  		curExtent        *multiObjectExtentStruct
   472  		curExtentAsValue sortedmap.Value
   473  		curExtentIndex   int
   474  		curFileOffset    uint64
   475  		exhausted        bool
   476  		ok               bool
   477  	)
   478  
   479  	if nil == fileInode.extentMap {
   480  		// Create an empty ExtentMap... and perform initial population
   481  
   482  		fileInode.extentMap = sortedmap.NewLLRBTree(sortedmap.CompareUint64, fileInode)
   483  
   484  		exhausted, err = fileInode.populateExtentMapHelper(fileOffset)
   485  		if nil != err {
   486  			fileInode.extentMap = nil
   487  			return
   488  		}
   489  		if exhausted {
   490  			return
   491  		}
   492  	}
   493  
   494  	// Handle cases where [fileOffset:fileOffset+length) references beyond FileSize
   495  
   496  	if fileOffset >= fileInode.cachedStat.Size {
   497  		// The entire [fileOffset:fileOffset+length) lies beyond FileSize... so just return
   498  
   499  		err = nil
   500  		return
   501  	}
   502  	if (fileOffset + length) > fileInode.cachedStat.Size {
   503  		// Truncate length since ExtentMap doesn't contain any extent beyond FileSize
   504  
   505  		length = fileInode.cachedStat.Size - fileOffset
   506  	}
   507  
   508  Restart:
   509  
   510  	curFileOffset = fileOffset
   511  
   512  	curExtentIndex, _, err = fileInode.extentMap.BisectLeft(curFileOffset)
   513  	if nil != err {
   514  		logFatalf("populateExtentMap() couldn't fetch extent [Case 1]: %v", err)
   515  	}
   516  
   517  	// Note it is possible for curExtentIndex == -1 if no extents are at or precede fileOffset
   518  
   519  	for curFileOffset < (fileOffset + length) {
   520  		_, curExtentAsValue, ok, err = fileInode.extentMap.GetByIndex(curExtentIndex)
   521  		if nil != err {
   522  			logFatalf("populateExtentMap() couldn't fetch extent [Case 2]: %v", err)
   523  		}
   524  
   525  		if !ok {
   526  			// No extent at curExtentIndex - so populate from here if possible and restart
   527  
   528  			exhausted, err = fileInode.populateExtentMapHelper(curFileOffset)
   529  			if nil != err {
   530  				fileInode.extentMap = nil
   531  				return
   532  			}
   533  			if exhausted {
   534  				return
   535  			}
   536  			goto Restart
   537  		}
   538  
   539  		curExtent, ok = curExtentAsValue.(*multiObjectExtentStruct)
   540  		if !ok {
   541  			logFatalf("populateExtentMap() couldn't fetch extent [Case 3]: %v", err)
   542  		}
   543  
   544  		if curFileOffset < curExtent.fileOffset {
   545  			// Next extent starts after curFileOffset - so populate the hole if possible and restart
   546  
   547  			exhausted, err = fileInode.populateExtentMapHelper(curFileOffset)
   548  			if nil != err {
   549  				fileInode.extentMap = nil
   550  				return
   551  			}
   552  			if exhausted {
   553  				return
   554  			}
   555  			goto Restart
   556  		}
   557  
   558  		if curFileOffset >= (curExtent.fileOffset + curExtent.length) {
   559  			// Handle case where BisectLeft pointed at an extent before fileOffset
   560  			// and this extent ends before fileOffset - so populate from there if possible and restart
   561  
   562  			exhausted, err = fileInode.populateExtentMapHelper(curExtent.fileOffset + curExtent.length)
   563  			if nil != err {
   564  				fileInode.extentMap = nil
   565  				return
   566  			}
   567  			if exhausted {
   568  				return
   569  			}
   570  			goto Restart
   571  		}
   572  
   573  		// Advance to next extent to check for contiguity
   574  
   575  		curFileOffset = curExtent.fileOffset + curExtent.length
   576  		curExtentIndex++
   577  	}
   578  
   579  	return
   580  }
   581  
   582  // populateExtentMapHelper fetches an ExtentMapChunk anchored by fileOffset and inserts
   583  // it into fileInode.extentMap using updateExtentMap().
   584  //
   585  // If returned exhausted is true, there are no more extentMapEntry's to populate
   586  //
   587  func (fileInode *fileInodeStruct) populateExtentMapHelper(fileOffset uint64) (exhausted bool, err error) {
   588  	var (
   589  		curExtent                  *multiObjectExtentStruct
   590  		curExtentAsValue           sortedmap.Value
   591  		curExtentMapChunkIndex     int
   592  		curFileOffset              uint64
   593  		extentMapEntry             *inode.ExtentMapEntryStruct
   594  		extentMapLength            int
   595  		fetchExtentMapChunkReply   *jrpcfs.FetchExtentMapChunkReply
   596  		fetchExtentMapChunkRequest *jrpcfs.FetchExtentMapChunkRequest
   597  		ok                         bool
   598  	)
   599  
   600  	fetchExtentMapChunkRequest = &jrpcfs.FetchExtentMapChunkRequest{
   601  		InodeHandle: jrpcfs.InodeHandle{
   602  			MountID:     globals.mountID,
   603  			InodeNumber: int64(fileInode.InodeNumber),
   604  		},
   605  		FileOffset:                 fileOffset,
   606  		MaxEntriesFromFileOffset:   int64(globals.config.FetchExtentsFromFileOffset),
   607  		MaxEntriesBeforeFileOffset: int64(globals.config.FetchExtentsBeforeFileOffset),
   608  	}
   609  
   610  	fetchExtentMapChunkReply = &jrpcfs.FetchExtentMapChunkReply{}
   611  
   612  	err = globals.retryRPCClient.Send("RpcFetchExtentMapChunk", fetchExtentMapChunkRequest, fetchExtentMapChunkReply)
   613  	if nil != err {
   614  		return
   615  	}
   616  
   617  	if 0 == len(fetchExtentMapChunkReply.ExtentMapEntry) {
   618  		exhausted = true
   619  		return
   620  	}
   621  
   622  	exhausted = false
   623  
   624  	curFileOffset = fetchExtentMapChunkReply.FileOffsetRangeStart
   625  
   626  	for curExtentMapChunkIndex = range fetchExtentMapChunkReply.ExtentMapEntry {
   627  		extentMapEntry = &fetchExtentMapChunkReply.ExtentMapEntry[curExtentMapChunkIndex]
   628  
   629  		if curFileOffset < extentMapEntry.FileOffset {
   630  			// We need to insert a preceeding zero-fill extent first
   631  
   632  			curExtent = &multiObjectExtentStruct{
   633  				fileOffset:    curFileOffset,
   634  				containerName: "",
   635  				objectName:    "",
   636  				objectOffset:  0,
   637  				length:        extentMapEntry.FileOffset - curFileOffset,
   638  			}
   639  
   640  			fileInode.updateExtentMap(curExtent)
   641  		}
   642  
   643  		// Insert the actual extent
   644  
   645  		curExtent = &multiObjectExtentStruct{
   646  			fileOffset:    extentMapEntry.FileOffset,
   647  			containerName: extentMapEntry.ContainerName,
   648  			objectName:    extentMapEntry.ObjectName,
   649  			objectOffset:  extentMapEntry.LogSegmentOffset,
   650  			length:        extentMapEntry.Length,
   651  		}
   652  
   653  		fileInode.updateExtentMap(curExtent)
   654  
   655  		curFileOffset = extentMapEntry.FileOffset + extentMapEntry.Length
   656  	}
   657  
   658  	if curFileOffset < fetchExtentMapChunkReply.FileOffsetRangeEnd {
   659  		// We need to insert a trailing  zero-fill extent
   660  
   661  		curExtent = &multiObjectExtentStruct{
   662  			fileOffset:    curFileOffset,
   663  			containerName: "",
   664  			objectName:    "",
   665  			objectOffset:  0,
   666  			length:        fetchExtentMapChunkReply.FileOffsetRangeEnd - curFileOffset,
   667  		}
   668  
   669  		_, err = fileInode.extentMap.Put(curExtent.fileOffset, curExtent)
   670  		if nil != err {
   671  			logFatalf("populateExtentMap() couldn't insert zero-fill extent [Case 2]: %v", err)
   672  		}
   673  	}
   674  
   675  	// Finally, we need to trim, as necessary, excess extents
   676  
   677  	extentMapLength, err = fileInode.extentMap.Len()
   678  	if nil != err {
   679  		logFatalf("populateExtentMap() couldn't get number of extents: %v", err)
   680  	}
   681  
   682  	for {
   683  		if 0 == extentMapLength {
   684  			// Handle case where we have no extents left at all
   685  
   686  			return
   687  		}
   688  
   689  		_, curExtentAsValue, ok, err = fileInode.extentMap.GetByIndex(extentMapLength - 1)
   690  		if nil != err {
   691  			logFatalf("populateExtentMap() couldn't get last extent [Case 1]: %v", err)
   692  		}
   693  		if !ok {
   694  			logFatalf("populateExtentMap() couldn't get last extent [Case 2]")
   695  		}
   696  
   697  		curExtent, ok = curExtentAsValue.(*multiObjectExtentStruct)
   698  		if !ok {
   699  			logFatalf("populateExtentMap() couldn't get last extent [Case 3]")
   700  		}
   701  
   702  		if (curExtent.fileOffset + curExtent.length) <= fetchExtentMapChunkReply.FileSize {
   703  			// Last extent does not extend beyond FileSize... so we are done
   704  
   705  			return
   706  		}
   707  
   708  		if curExtent.fileOffset < fetchExtentMapChunkReply.FileSize {
   709  			// Last extent crossed FileSize boundary... truncate it and we are done
   710  
   711  			curExtent.length = fileInode.cachedStat.Size - curExtent.fileOffset
   712  
   713  			return
   714  		}
   715  
   716  		// Last extent completely beyond FileSize... just delete it and loop
   717  
   718  		fileInode.extentMap.DeleteByIndex(extentMapLength - 1)
   719  
   720  		extentMapLength--
   721  	}
   722  }
   723  
   724  // updateExtentMap is called to update the ExtentMap and, as necessary, the FileSize with
   725  // the supplied multiObjectExtent. This func is used during fetching of ExtentMap chunks
   726  // by *fileInodeStruct.populateExtentMapHelper() and at completion of a Chunked PUT
   727  // by *chunkedPutContextStruct.complete().
   728  //
   729  func (fileInode *fileInodeStruct) updateExtentMap(newExtent *multiObjectExtentStruct) {
   730  	var (
   731  		curExtent           *multiObjectExtentStruct
   732  		curExtentAsValue    sortedmap.Value
   733  		curExtentIndex      int
   734  		curExtentLostLength uint64
   735  		err                 error
   736  		found               bool
   737  		ok                  bool
   738  		prevExtent          *multiObjectExtentStruct
   739  		prevExtentAsValue   sortedmap.Value
   740  		prevExtentIndex     int
   741  		prevExtentNewLength uint64
   742  		splitExtent         *multiObjectExtentStruct
   743  	)
   744  
   745  	if nil == fileInode.extentMap {
   746  		// Create an empty ExtentMap
   747  
   748  		fileInode.extentMap = sortedmap.NewLLRBTree(sortedmap.CompareUint64, fileInode)
   749  	}
   750  
   751  	// Locate prevExtent (if any)
   752  
   753  	prevExtentIndex, found, err = fileInode.extentMap.BisectLeft(newExtent.fileOffset)
   754  	if nil != err {
   755  		logFatalf("updateExtentMap() couldn't find prevExtent [Case 1]: %v", err)
   756  	}
   757  
   758  	if found {
   759  		// Make prevExtentIndex truly point to the previous (and non-overlapping) extent (if any)
   760  
   761  		prevExtentIndex--
   762  	} else {
   763  		if prevExtentIndex < 0 {
   764  			// No prevExtent exists... so it cannot need to be split
   765  		} else {
   766  			// A prevExtent exists... but does it need to be split?
   767  
   768  			_, prevExtentAsValue, ok, err = fileInode.extentMap.GetByIndex(prevExtentIndex)
   769  			if nil != err {
   770  				logFatalf("updateExtentMap() couldn't find prevExtent [Case 2]: %v", err)
   771  			}
   772  			if !ok {
   773  				logFatalf("updateExtentMap() couldn't find prevExtent [Case 3]")
   774  			}
   775  			prevExtent, ok = prevExtentAsValue.(*multiObjectExtentStruct)
   776  			if !ok {
   777  				logFatalf("updateExtentMap() couldn't find prevExtent [Case 4]")
   778  			}
   779  
   780  			if (prevExtent.fileOffset + prevExtent.length) > newExtent.fileOffset {
   781  				// Existing prevExtent does overlap... so we need to split it now
   782  
   783  				prevExtentNewLength = newExtent.fileOffset - prevExtent.fileOffset
   784  
   785  				splitExtent = &multiObjectExtentStruct{
   786  					fileOffset:    newExtent.fileOffset,
   787  					containerName: prevExtent.containerName,
   788  					objectName:    prevExtent.objectName,
   789  					objectOffset:  prevExtent.objectOffset + prevExtentNewLength,
   790  					length:        prevExtent.length - prevExtentNewLength,
   791  				}
   792  
   793  				prevExtent.length = prevExtentNewLength
   794  
   795  				ok, err = fileInode.extentMap.Put(splitExtent.fileOffset, splitExtent)
   796  				if nil != err {
   797  					logFatalf("updateExtentMap() couldn't split prevExtent [Case 1]: %v", err)
   798  				}
   799  				if !ok {
   800  					logFatalf("updateExtentMap() couldn't split prevExtent [Case 2]")
   801  				}
   802  			} else {
   803  				// Existing prevExtent does not overlap
   804  			}
   805  		}
   806  	}
   807  
   808  	// Now loop thru extents after prevExtent replaced (partially or fully) by newExtent
   809  
   810  	curExtentIndex = prevExtentIndex + 1
   811  
   812  	for {
   813  		_, curExtentAsValue, ok, err = fileInode.extentMap.GetByIndex(curExtentIndex)
   814  		if nil != err {
   815  			logFatalf("updateExtentMap() couldn't find curExtent [Case 1]: %v", err)
   816  		}
   817  
   818  		if !ok {
   819  			// Simple case where we walked off the end of the ExtentMap
   820  
   821  			break
   822  		}
   823  
   824  		curExtent, ok = curExtentAsValue.(*multiObjectExtentStruct)
   825  		if !ok {
   826  			logFatalf("updateExtentMap() couldn't find curExtent [Case 2]: %v", err)
   827  		}
   828  
   829  		if (newExtent.fileOffset + newExtent.length) <= curExtent.fileOffset {
   830  			// Simple case where we walked to an Extent that start after newExtent
   831  
   832  			break
   833  		}
   834  
   835  		// At this point, we know we are either going to split curExtent
   836  		// or replace it... so we must remove it from the ExtentMap regardless
   837  		// (since splitting curExtent would change it's fileOffset Key)
   838  
   839  		ok, err = fileInode.extentMap.DeleteByIndex(curExtentIndex)
   840  		if nil != err {
   841  			logFatalf("updateExtentMap() couldn't delete curExtent [Case 1]: %v", err)
   842  		}
   843  		if !ok {
   844  			logFatalf("updateExtentMap() couldn't delete curExtent [Case 2]: %v", err)
   845  		}
   846  
   847  		if (curExtent.fileOffset + curExtent.length) > (newExtent.fileOffset + newExtent.length) {
   848  			// We need to split curExtent because newExtent only partially replaces it
   849  
   850  			curExtentLostLength = (newExtent.fileOffset + newExtent.length) - curExtent.fileOffset
   851  
   852  			splitExtent = &multiObjectExtentStruct{
   853  				fileOffset:    curExtent.fileOffset + curExtentLostLength,
   854  				containerName: curExtent.containerName,
   855  				objectName:    curExtent.objectName,
   856  				objectOffset:  curExtent.objectOffset + curExtentLostLength,
   857  				length:        curExtent.length - curExtentLostLength,
   858  			}
   859  
   860  			ok, err = fileInode.extentMap.Put(splitExtent.fileOffset, splitExtent)
   861  			if nil != err {
   862  				logFatalf("updateExtentMap() couldn't split curExtent [Case 1]: %v", err)
   863  			}
   864  			if !ok {
   865  				logFatalf("updateExtentMap() couldn't split curExtent [Case 2]")
   866  			}
   867  
   868  			// We also know that we are done scanning
   869  
   870  			break
   871  		}
   872  	}
   873  
   874  	// We can finally insert newExtent without fear of colliding with existing extents
   875  
   876  	ok, err = fileInode.extentMap.Put(newExtent.fileOffset, newExtent)
   877  	if nil != err {
   878  		logFatalf("updateExtentMap() couldn't insert newExtent [Case 1]: %v", err)
   879  	}
   880  	if !ok {
   881  		logFatalf("updateExtentMap() couldn't insert newExtent [Case 2]")
   882  	}
   883  
   884  	if (newExtent.fileOffset + newExtent.length) > fileInode.cachedStat.Size {
   885  		fileInode.cachedStat.Size = newExtent.fileOffset + newExtent.length
   886  	}
   887  }
   888  
   889  // getReadPlan returns a slice of extents and their span (to aid in the make([]byte,) call needed
   890  // by the caller to provision the slice into which they will copy the extents). Each extent will
   891  // be one of three types:
   892  //
   893  //   singleObjectExtentWithLinkStruct - a reference to a portion of a LogSegment being written by a chunkedPutContextStruct
   894  //   multiObjectExtentStruct          - a reference to a portion of a LogSegment described by a fileInodeStruct.extentMap
   895  //   multiObjectExtentStruct          - a description of a zero-filled extent (.objectName == "")
   896  //
   897  func (fileInode *fileInodeStruct) getReadPlan(fileOffset uint64, length uint64) (readPlan []interface{}, readPlanSpan uint64) {
   898  	var (
   899  		chunkedPutContext          *chunkedPutContextStruct
   900  		chunkedPutContextAsElement *list.Element
   901  		curExtentAsValue           sortedmap.Value
   902  		curExtentIndex             int
   903  		curFileOffset              uint64
   904  		curMultiObjectExtent       *multiObjectExtentStruct
   905  		err                        error
   906  		multiObjectReadPlanStep    *multiObjectExtentStruct
   907  		ok                         bool
   908  		remainingLength            uint64
   909  	)
   910  
   911  	// First assemble readPlan based upon fileInode.extentMap
   912  
   913  	readPlan = make([]interface{}, 0, 1)
   914  
   915  	curFileOffset = fileOffset
   916  	remainingLength = length
   917  
   918  	curExtentIndex, _, err = fileInode.extentMap.BisectLeft(fileOffset)
   919  	if nil != err {
   920  		logFatalf("getReadPlan() couldn't find curExtent: %v", err)
   921  	}
   922  
   923  	for remainingLength > 0 {
   924  		_, curExtentAsValue, ok, err = fileInode.extentMap.GetByIndex(curExtentIndex)
   925  		if nil != err {
   926  			logFatalf("getReadPlan() couldn't find curExtent [Case 1]: %v", err)
   927  		}
   928  
   929  		if !ok {
   930  			// Crossed EOF - stop here
   931  
   932  			break
   933  		}
   934  
   935  		curMultiObjectExtent, ok = curExtentAsValue.(*multiObjectExtentStruct)
   936  		if !ok {
   937  			logFatalf("getReadPlan() couldn't find curExtent [Case 2]: %v", err)
   938  		}
   939  
   940  		if (curMultiObjectExtent.fileOffset + curMultiObjectExtent.length) <= curFileOffset {
   941  			// curExtent ends at or before curFileOffset - stop here
   942  
   943  			break
   944  		}
   945  
   946  		multiObjectReadPlanStep = &multiObjectExtentStruct{
   947  			fileOffset:    curFileOffset,
   948  			containerName: curMultiObjectExtent.containerName,
   949  			objectName:    curMultiObjectExtent.objectName, // May be == ""
   950  			objectOffset:  curMultiObjectExtent.objectOffset + (curFileOffset - curMultiObjectExtent.fileOffset),
   951  			length:        curMultiObjectExtent.length - (curFileOffset - curMultiObjectExtent.fileOffset),
   952  		}
   953  
   954  		if remainingLength < multiObjectReadPlanStep.length {
   955  			// This is the last readPlanStep and needs to be truncated
   956  
   957  			multiObjectReadPlanStep.length = remainingLength
   958  		}
   959  
   960  		if 0 == multiObjectReadPlanStep.length {
   961  			// Reached EOF - stop here
   962  
   963  			break
   964  		}
   965  
   966  		readPlan = append(readPlan, multiObjectReadPlanStep)
   967  
   968  		curFileOffset += multiObjectReadPlanStep.length
   969  		remainingLength -= multiObjectReadPlanStep.length
   970  
   971  		curExtentIndex++
   972  	}
   973  
   974  	// Compute tentative readPlanSpan
   975  
   976  	if 0 == len(readPlan) {
   977  		readPlanSpan = 0
   978  	} else {
   979  		multiObjectReadPlanStep = readPlan[len(readPlan)-1].(*multiObjectExtentStruct)
   980  		readPlanSpan = (multiObjectReadPlanStep.fileOffset + multiObjectReadPlanStep.length) - fileOffset
   981  	}
   982  
   983  	// But we must apply, in order, any changes due to chunkedPutContextStruct's
   984  
   985  	chunkedPutContextAsElement = fileInode.chunkedPutList.Front()
   986  	for nil != chunkedPutContextAsElement {
   987  		chunkedPutContext = chunkedPutContextAsElement.Value.(*chunkedPutContextStruct)
   988  		readPlan, readPlanSpan = chunkedPutContext.getReadPlanHelper(fileOffset, length, readPlan)
   989  		chunkedPutContextAsElement = chunkedPutContextAsElement.Next()
   990  	}
   991  
   992  	// And we are done...
   993  
   994  	return
   995  }
   996  
   997  func (chunkedPutContext *chunkedPutContextStruct) getReadPlanHelper(fileOffset uint64, length uint64, inReadPlan []interface{}) (outReadPlan []interface{}, outReadPlanSpan uint64) {
   998  	var (
   999  		curFileOffset                       uint64
  1000  		err                                 error
  1001  		found                               bool
  1002  		inReadPlanStepAsInterface           interface{}
  1003  		inReadPlanStepAsMultiObjectExtent   *multiObjectExtentStruct
  1004  		inReadPlanStepAsSingleObjectExtent  *singleObjectExtentWithLinkStruct
  1005  		inReadPlanStepFileOffset            uint64
  1006  		inReadPlanStepLength                uint64
  1007  		ok                                  bool
  1008  		outReadPlanStepAsInterface          interface{}
  1009  		outReadPlanStepAsMultiObjectExtent  *multiObjectExtentStruct
  1010  		outReadPlanStepAsSingleObjectExtent *singleObjectExtentWithLinkStruct
  1011  		overlapExtent                       *singleObjectExtentStruct
  1012  		overlapExtentAsValue                sortedmap.Value
  1013  		overlapExtentWithLink               *singleObjectExtentWithLinkStruct
  1014  		overlapExtentIndex                  int
  1015  		postExtent                          *singleObjectExtentStruct
  1016  		postExtentAsValue                   sortedmap.Value
  1017  		postExtentIndex                     int
  1018  		postOverlapLength                   uint64
  1019  		preExtent                           *singleObjectExtentStruct
  1020  		preExtentAsValue                    sortedmap.Value
  1021  		preExtentIndex                      int
  1022  		preOverlapLength                    uint64
  1023  		remainingLength                     uint64
  1024  		wasMultiObjectReadPlanStep          bool
  1025  	)
  1026  
  1027  	outReadPlan = make([]interface{}, 0, len(inReadPlan))
  1028  
  1029  	for _, inReadPlanStepAsInterface = range inReadPlan {
  1030  		// Compute overlap with chunkedPutContext.extentMap
  1031  
  1032  		inReadPlanStepAsMultiObjectExtent, wasMultiObjectReadPlanStep = inReadPlanStepAsInterface.(*multiObjectExtentStruct)
  1033  		if wasMultiObjectReadPlanStep {
  1034  			inReadPlanStepAsSingleObjectExtent = nil
  1035  			inReadPlanStepFileOffset = inReadPlanStepAsMultiObjectExtent.fileOffset
  1036  			inReadPlanStepLength = inReadPlanStepAsMultiObjectExtent.length
  1037  		} else {
  1038  			inReadPlanStepAsMultiObjectExtent = nil
  1039  			inReadPlanStepAsSingleObjectExtent = inReadPlanStepAsInterface.(*singleObjectExtentWithLinkStruct)
  1040  			inReadPlanStepFileOffset = inReadPlanStepAsSingleObjectExtent.fileOffset
  1041  			inReadPlanStepLength = inReadPlanStepAsSingleObjectExtent.length
  1042  		}
  1043  
  1044  		preExtentIndex, found, err = chunkedPutContext.extentMap.BisectLeft(inReadPlanStepFileOffset)
  1045  		if nil != err {
  1046  			logFatalf("getReadPlanHelper() couldn't find preExtentIndex: %v", err)
  1047  		}
  1048  		if found {
  1049  			// Back up preExtentIndex... we know previous extent (if any) doesn't overlap
  1050  			preExtentIndex--
  1051  		} else {
  1052  			// But preExtentIndex might point to extent overlapping
  1053  			if 0 <= preExtentIndex {
  1054  				_, preExtentAsValue, _, err = chunkedPutContext.extentMap.GetByIndex(preExtentIndex)
  1055  				if nil != err {
  1056  					logFatalf("getReadPlanHelper() couldn't fetch preExtent: %v", err)
  1057  				}
  1058  				preExtent = preExtentAsValue.(*singleObjectExtentStruct)
  1059  				if (preExtent.fileOffset + preExtent.length) > inReadPlanStepFileOffset {
  1060  					preExtentIndex--
  1061  				}
  1062  			}
  1063  		}
  1064  		postExtentIndex, _, err = chunkedPutContext.extentMap.BisectRight(inReadPlanStepFileOffset + inReadPlanStepLength)
  1065  		if nil != err {
  1066  			logFatalf("getReadPlanHelper() couldn't find postExtentIndex [Case 1]: %v", err)
  1067  		}
  1068  
  1069  		if 1 == (postExtentIndex - preExtentIndex) {
  1070  			// No overlap... replicate inReadPlanStep as is
  1071  
  1072  			outReadPlan = append(outReadPlan, inReadPlanStepAsInterface)
  1073  
  1074  			continue
  1075  		}
  1076  
  1077  		// Apply overlapping extents from chunkedPutContext.extentMap with inReadPlanStep
  1078  
  1079  		curFileOffset = inReadPlanStepFileOffset
  1080  		remainingLength = inReadPlanStepLength
  1081  
  1082  		for overlapExtentIndex = preExtentIndex + 1; overlapExtentIndex < postExtentIndex; overlapExtentIndex++ {
  1083  			_, overlapExtentAsValue, _, err = chunkedPutContext.extentMap.GetByIndex(overlapExtentIndex)
  1084  			if nil != err {
  1085  				logFatalf("getReadPlanHelper() couldn't find overlapExtentIndex: %v", err)
  1086  			}
  1087  			overlapExtent = overlapExtentAsValue.(*singleObjectExtentStruct)
  1088  
  1089  			if overlapExtent.fileOffset < curFileOffset {
  1090  				preOverlapLength = curFileOffset - overlapExtent.fileOffset
  1091  			} else {
  1092  				preOverlapLength = 0
  1093  			}
  1094  			if (overlapExtent.fileOffset + overlapExtent.length) > (curFileOffset + remainingLength) {
  1095  				postOverlapLength = (overlapExtent.fileOffset + overlapExtent.length) - (curFileOffset + remainingLength)
  1096  			} else {
  1097  				postOverlapLength = 0
  1098  			}
  1099  
  1100  			overlapExtentWithLink = &singleObjectExtentWithLinkStruct{
  1101  				fileOffset:        overlapExtent.fileOffset + preOverlapLength,
  1102  				objectOffset:      overlapExtent.objectOffset + preOverlapLength,
  1103  				length:            overlapExtent.length - (preOverlapLength + postOverlapLength),
  1104  				chunkedPutContext: chunkedPutContext,
  1105  			}
  1106  
  1107  			if curFileOffset < overlapExtentWithLink.fileOffset {
  1108  				// Append non-overlapped portion of inReadPlanStep preceeding overlapExtentWithLink
  1109  
  1110  				if wasMultiObjectReadPlanStep {
  1111  					outReadPlanStepAsMultiObjectExtent = &multiObjectExtentStruct{
  1112  						fileOffset:    curFileOffset,
  1113  						containerName: inReadPlanStepAsMultiObjectExtent.containerName,
  1114  						objectName:    inReadPlanStepAsMultiObjectExtent.objectName,
  1115  						objectOffset:  inReadPlanStepAsMultiObjectExtent.objectOffset + (curFileOffset - inReadPlanStepAsMultiObjectExtent.fileOffset),
  1116  						length:        overlapExtentWithLink.fileOffset - curFileOffset,
  1117  					}
  1118  
  1119  					outReadPlan = append(outReadPlan, outReadPlanStepAsMultiObjectExtent)
  1120  
  1121  					curFileOffset += outReadPlanStepAsMultiObjectExtent.length
  1122  					remainingLength -= outReadPlanStepAsMultiObjectExtent.length
  1123  				} else {
  1124  					outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1125  						fileOffset:        curFileOffset,
  1126  						objectOffset:      inReadPlanStepAsSingleObjectExtent.objectOffset + (curFileOffset - inReadPlanStepAsSingleObjectExtent.fileOffset),
  1127  						length:            overlapExtentWithLink.fileOffset - curFileOffset,
  1128  						chunkedPutContext: inReadPlanStepAsSingleObjectExtent.chunkedPutContext,
  1129  					}
  1130  
  1131  					outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1132  
  1133  					curFileOffset += outReadPlanStepAsSingleObjectExtent.length
  1134  					remainingLength -= outReadPlanStepAsSingleObjectExtent.length
  1135  				}
  1136  			}
  1137  
  1138  			// Append overlapExtentWithLink
  1139  
  1140  			outReadPlan = append(outReadPlan, overlapExtentWithLink)
  1141  
  1142  			curFileOffset += overlapExtentWithLink.length
  1143  			remainingLength -= overlapExtentWithLink.length
  1144  		}
  1145  
  1146  		if 0 < remainingLength {
  1147  			// Append non-overlapped trailing portion of inReadPlanStep in outReadPlan
  1148  
  1149  			if wasMultiObjectReadPlanStep {
  1150  				outReadPlanStepAsMultiObjectExtent = &multiObjectExtentStruct{
  1151  					fileOffset:    curFileOffset,
  1152  					containerName: inReadPlanStepAsMultiObjectExtent.containerName,
  1153  					objectName:    inReadPlanStepAsMultiObjectExtent.objectName,
  1154  					objectOffset:  inReadPlanStepAsMultiObjectExtent.objectOffset + (curFileOffset - inReadPlanStepAsMultiObjectExtent.fileOffset),
  1155  					length:        remainingLength,
  1156  				}
  1157  
  1158  				outReadPlan = append(outReadPlan, outReadPlanStepAsMultiObjectExtent)
  1159  			} else {
  1160  				outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1161  					fileOffset:        curFileOffset,
  1162  					objectOffset:      inReadPlanStepAsSingleObjectExtent.objectOffset + (curFileOffset - inReadPlanStepAsSingleObjectExtent.fileOffset),
  1163  					length:            remainingLength,
  1164  					chunkedPutContext: inReadPlanStepAsSingleObjectExtent.chunkedPutContext,
  1165  				}
  1166  
  1167  				outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1168  			}
  1169  		}
  1170  	}
  1171  
  1172  	// Compute tentative outReadPlanSpan
  1173  
  1174  	if 0 == len(outReadPlan) {
  1175  		outReadPlanSpan = 0
  1176  	} else {
  1177  		outReadPlanStepAsInterface = outReadPlan[len(outReadPlan)-1]
  1178  		outReadPlanStepAsMultiObjectExtent, wasMultiObjectReadPlanStep = outReadPlanStepAsInterface.(*multiObjectExtentStruct)
  1179  		if wasMultiObjectReadPlanStep {
  1180  			outReadPlanSpan = (outReadPlanStepAsMultiObjectExtent.fileOffset + outReadPlanStepAsMultiObjectExtent.length) - fileOffset
  1181  		} else {
  1182  			outReadPlanStepAsSingleObjectExtent = outReadPlanStepAsInterface.(*singleObjectExtentWithLinkStruct)
  1183  			outReadPlanSpan = (outReadPlanStepAsSingleObjectExtent.fileOffset + outReadPlanStepAsSingleObjectExtent.length) - fileOffset
  1184  		}
  1185  	}
  1186  
  1187  	if outReadPlanSpan == length {
  1188  		return
  1189  	}
  1190  
  1191  	// inReadPlan was limited by incoming FileSize... can we extend it?
  1192  
  1193  	curFileOffset = fileOffset + outReadPlanSpan
  1194  
  1195  	postExtentIndex, found, err = chunkedPutContext.extentMap.BisectLeft(curFileOffset)
  1196  	if nil != err {
  1197  		logFatalf("getReadPlanHelper() couldn't find postExtentIndex [Case 2]: %v", err)
  1198  	}
  1199  	if found {
  1200  		// We know this extent, if it exists, does not overlap
  1201  
  1202  		_, postExtentAsValue, ok, err = chunkedPutContext.extentMap.GetByIndex(postExtentIndex)
  1203  		if nil != err {
  1204  			logFatalf("getReadPlanHelper() couldn't find postExtent [Case 1]: %v", err)
  1205  		}
  1206  		if !ok {
  1207  			return
  1208  		}
  1209  
  1210  		postExtent = postExtentAsValue.(*singleObjectExtentStruct)
  1211  	} else {
  1212  		// So this extent, if it exists, must overlap... and possibly extend beyond
  1213  
  1214  		_, postExtentAsValue, ok, err = chunkedPutContext.extentMap.GetByIndex(postExtentIndex)
  1215  		if nil != err {
  1216  			logFatalf("getReadPlanHelper() couldn't find postExtent [Case 2]: %v", err)
  1217  		}
  1218  
  1219  		if ok {
  1220  			overlapExtent = postExtentAsValue.(*singleObjectExtentStruct)
  1221  
  1222  			if (overlapExtent.fileOffset + overlapExtent.length) > curFileOffset {
  1223  				// Create a postExtent equivalent to the non-overlapping tail of overlapExtent
  1224  
  1225  				postExtent = &singleObjectExtentStruct{
  1226  					fileOffset:   curFileOffset,
  1227  					objectOffset: overlapExtent.objectOffset + (curFileOffset - overlapExtent.fileOffset),
  1228  					length:       overlapExtent.length - (curFileOffset - overlapExtent.fileOffset),
  1229  				}
  1230  			} else {
  1231  				// Create a zero-length postExtent instead
  1232  
  1233  				postExtent = &singleObjectExtentStruct{
  1234  					fileOffset:   curFileOffset,
  1235  					objectOffset: 0,
  1236  					length:       0,
  1237  				}
  1238  			}
  1239  		} else {
  1240  			// Create a zero-length postExtent instead
  1241  
  1242  			postExtent = &singleObjectExtentStruct{
  1243  				fileOffset:   curFileOffset,
  1244  				objectOffset: 0,
  1245  				length:       0,
  1246  			}
  1247  		}
  1248  	}
  1249  
  1250  	// Now enter a loop until either outReadPlanSpan reaches length or we reach FileSize
  1251  	// Each loop iteration, postExtent either starts at or after curFileSize (requiring zero-fill)
  1252  
  1253  	for {
  1254  		if 0 < postExtent.length {
  1255  			if postExtent.fileOffset > curFileOffset {
  1256  				// We must "zero-fill" to MIN(postExtent.fileOffset, fileInode.cachedStat.Size)
  1257  
  1258  				if postExtent.fileOffset >= (fileOffset + length) {
  1259  					// postExtent starts beyond fileOffset+length, so just append zero-fill step & return
  1260  					outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1261  						fileOffset:        curFileOffset,
  1262  						objectOffset:      0,
  1263  						length:            length - outReadPlanSpan,
  1264  						chunkedPutContext: nil,
  1265  					}
  1266  
  1267  					outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1268  					outReadPlanSpan = length
  1269  
  1270  					return
  1271  				}
  1272  
  1273  				// postExtent starts after curFileOffset but before fileOffset+length, so insert zero-fill step first
  1274  
  1275  				outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1276  					fileOffset:        curFileOffset,
  1277  					objectOffset:      0,
  1278  					length:            postExtent.fileOffset - curFileOffset,
  1279  					chunkedPutContext: nil,
  1280  				}
  1281  
  1282  				outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1283  
  1284  				curFileOffset += outReadPlanStepAsSingleObjectExtent.length
  1285  				outReadPlanSpan += outReadPlanStepAsSingleObjectExtent.length
  1286  			}
  1287  
  1288  			// Now append a step for some or all of postExtent
  1289  
  1290  			if (postExtent.fileOffset + postExtent.length) >= (fileOffset + length) {
  1291  				// postExtent will take us to (and beyond) fileOffset+length, so insert proper portion & return
  1292  
  1293  				outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1294  					fileOffset:        postExtent.fileOffset,
  1295  					objectOffset:      postExtent.objectOffset,
  1296  					length:            (fileOffset + length) - postExtent.fileOffset,
  1297  					chunkedPutContext: chunkedPutContext,
  1298  				}
  1299  
  1300  				outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1301  				outReadPlanSpan = length
  1302  
  1303  				return
  1304  			}
  1305  
  1306  			// The entire postExtent will "fit"... and not exhaust fileOffset+length
  1307  
  1308  			outReadPlanStepAsSingleObjectExtent = &singleObjectExtentWithLinkStruct{
  1309  				fileOffset:        postExtent.fileOffset,
  1310  				objectOffset:      postExtent.objectOffset,
  1311  				length:            postExtent.length,
  1312  				chunkedPutContext: chunkedPutContext,
  1313  			}
  1314  
  1315  			outReadPlan = append(outReadPlan, outReadPlanStepAsSingleObjectExtent)
  1316  
  1317  			curFileOffset += outReadPlanStepAsSingleObjectExtent.length
  1318  			outReadPlanSpan += outReadPlanStepAsSingleObjectExtent.length
  1319  		}
  1320  
  1321  		// Index to next postExtent
  1322  
  1323  		postExtentIndex++
  1324  
  1325  		_, postExtentAsValue, ok, err = chunkedPutContext.extentMap.GetByIndex(postExtentIndex)
  1326  		if nil != err {
  1327  			logFatalf("getReadPlanHelper() couldn't find postExtent [Case 3]: %v", err)
  1328  		}
  1329  		if !ok {
  1330  			return
  1331  		}
  1332  
  1333  		postExtent = postExtentAsValue.(*singleObjectExtentStruct)
  1334  	}
  1335  }
  1336  
  1337  func fetchLogSegmentCacheLine(containerName string, objectName string, offset uint64) (logSegmentCacheElement *logSegmentCacheElementStruct) {
  1338  	var (
  1339  		err                                     error
  1340  		getRequest                              *http.Request
  1341  		logSegmentCacheElementGetEndime         time.Time
  1342  		logSegmentCacheElementGetStartTime      time.Time
  1343  		logSegmentCacheElementKey               logSegmentCacheElementKeyStruct
  1344  		logSegmentCacheElementToEvict           *logSegmentCacheElementStruct
  1345  		logSegmentCacheElementToEvictKey        logSegmentCacheElementKeyStruct
  1346  		logSegmentCacheElementToEvictLRUElement *list.Element
  1347  		logSegmentEnd                           uint64
  1348  		logSegmentStart                         uint64
  1349  		ok                                      bool
  1350  		statusCode                              int
  1351  		swiftStorageURL                         string
  1352  		url                                     string
  1353  	)
  1354  
  1355  	// Compute Key for LogSegment Cache lookup
  1356  
  1357  	logSegmentCacheElementKey.logSegmentNumber, err = strconv.ParseUint(objectName, 16, 64)
  1358  	if nil != err {
  1359  		logWarnf("fetchLogSegmentCacheLine() passed un-parseable objectName: \"%s\" (err: %v)", objectName, err)
  1360  		logSegmentCacheElement = nil
  1361  		return
  1362  	}
  1363  
  1364  	logSegmentCacheElementKey.cacheLineTag = offset / globals.config.ReadCacheLineSize
  1365  
  1366  	// Perform lookup
  1367  
  1368  	globals.Lock()
  1369  
  1370  	logSegmentCacheElement, ok = globals.logSegmentCacheMap[logSegmentCacheElementKey]
  1371  
  1372  	if ok {
  1373  		// Found it... so indicate cache hit & move it to MRU end of LRU
  1374  
  1375  		_ = atomic.AddUint64(&globals.metrics.ReadCacheHits, 1)
  1376  
  1377  		globals.logSegmentCacheLRU.MoveToBack(logSegmentCacheElement.cacheLRUElement)
  1378  
  1379  		globals.Unlock()
  1380  
  1381  		// It may not be here yet, so wait for it to be
  1382  
  1383  		logSegmentCacheElement.Wait()
  1384  
  1385  		// GET may have failed... LogSegment disappeared
  1386  
  1387  		if logSegmentCacheElementStateGetFailed == logSegmentCacheElement.state {
  1388  			logInfof("LogSegment %s/%s at Offset 0x%016X not available", containerName, objectName, logSegmentCacheElementKey.cacheLineTag*globals.config.ReadCacheLineSize)
  1389  			logSegmentCacheElement = nil
  1390  		}
  1391  
  1392  		// In any case, we can now return
  1393  
  1394  		return
  1395  	}
  1396  
  1397  	// Cache miss
  1398  
  1399  	_ = atomic.AddUint64(&globals.metrics.ReadCacheMisses, 1)
  1400  
  1401  	// Make room for new LogSegment Cache Line if necessary
  1402  
  1403  	for uint64(globals.logSegmentCacheLRU.Len()) >= globals.config.ReadCacheLineCount {
  1404  		logSegmentCacheElementToEvictLRUElement = globals.logSegmentCacheLRU.Front()
  1405  		logSegmentCacheElementToEvict = logSegmentCacheElementToEvictLRUElement.Value.(*logSegmentCacheElementStruct)
  1406  		logSegmentCacheElementToEvictKey.logSegmentNumber, err = strconv.ParseUint(logSegmentCacheElementToEvict.objectName, 16, 64)
  1407  		logSegmentCacheElementToEvictKey.cacheLineTag = logSegmentCacheElementToEvict.startingOffset / globals.config.ReadCacheLineSize
  1408  		if nil != err {
  1409  			logFatalf("fetchLogSegmentCacheLine() evicting hit un-parseable objectName: \"%s\" (err: %v)", logSegmentCacheElementToEvict.objectName, err)
  1410  		}
  1411  		delete(globals.logSegmentCacheMap, logSegmentCacheElementToEvictKey)
  1412  		globals.logSegmentCacheLRU.Remove(logSegmentCacheElementToEvictLRUElement)
  1413  	}
  1414  
  1415  	// Create new LogSegment Cache Line...as yet unfilled
  1416  
  1417  	logSegmentCacheElement = &logSegmentCacheElementStruct{
  1418  		state:          logSegmentCacheElementStateGetIssued,
  1419  		containerName:  containerName,
  1420  		objectName:     objectName,
  1421  		startingOffset: logSegmentCacheElementKey.cacheLineTag * globals.config.ReadCacheLineSize,
  1422  	}
  1423  
  1424  	logSegmentCacheElement.Add(1)
  1425  
  1426  	// Make it findable while we fill it (to avoid multiple copies)
  1427  
  1428  	globals.logSegmentCacheMap[logSegmentCacheElementKey] = logSegmentCacheElement
  1429  	logSegmentCacheElement.cacheLRUElement = globals.logSegmentCacheLRU.PushBack(logSegmentCacheElement)
  1430  
  1431  	globals.Unlock()
  1432  
  1433  	// Issue GET for it
  1434  
  1435  	swiftStorageURL = fetchStorageURL()
  1436  
  1437  	url = swiftStorageURL + "/" + containerName + "/" + objectName
  1438  
  1439  	logSegmentStart = logSegmentCacheElementKey.cacheLineTag * globals.config.ReadCacheLineSize
  1440  	logSegmentEnd = logSegmentStart + globals.config.ReadCacheLineSize - 1
  1441  
  1442  	getRequest, err = http.NewRequest(http.MethodGet, url, nil)
  1443  	if nil != err {
  1444  		logFatalf("unable to create GET http.Request (,%s,): %v", url)
  1445  	}
  1446  
  1447  	getRequest.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", logSegmentStart, logSegmentEnd))
  1448  
  1449  	logSegmentCacheElementGetStartTime = time.Now()
  1450  
  1451  	_, logSegmentCacheElement.buf, ok, statusCode = doHTTPRequest(getRequest, http.StatusOK, http.StatusPartialContent)
  1452  	if !ok {
  1453  		logFatalf("fetchLogSegmentCacheLine() failed with unexpected statusCode: %v", statusCode)
  1454  	}
  1455  
  1456  	logSegmentCacheElementGetEndime = time.Now()
  1457  
  1458  	globals.Lock()
  1459  
  1460  	if ok {
  1461  		logSegmentCacheElement.state = logSegmentCacheElementStateGetSuccessful
  1462  
  1463  		globals.stats.LogSegmentGetUsec.Add(uint64(logSegmentCacheElementGetEndime.Sub(logSegmentCacheElementGetStartTime) / time.Microsecond))
  1464  	} else {
  1465  		logSegmentCacheElement.state = logSegmentCacheElementStateGetFailed
  1466  
  1467  		// Remove it from the LogSegment Cache as well
  1468  
  1469  		delete(globals.logSegmentCacheMap, logSegmentCacheElementKey)
  1470  		globals.logSegmentCacheLRU.Remove(logSegmentCacheElement.cacheLRUElement)
  1471  	}
  1472  
  1473  	globals.Unlock()
  1474  
  1475  	// Signal any (other) waiters GET completed (either successfully or not) before returning
  1476  
  1477  	logSegmentCacheElement.Done()
  1478  
  1479  	return
  1480  }
  1481  
  1482  func (chunkedPutContext *chunkedPutContextStruct) mergeSingleObjectExtent(newExtent *singleObjectExtentStruct) {
  1483  	var (
  1484  		curExtent        *singleObjectExtentStruct
  1485  		curExtentAsValue sortedmap.Value
  1486  		curExtentIndex   int
  1487  		err              error
  1488  		extentMapLen     int
  1489  		found            bool
  1490  		ok               bool
  1491  		postLength       uint64
  1492  		preLength        uint64
  1493  		splitExtent      *singleObjectExtentStruct
  1494  	)
  1495  
  1496  	// See if we can simply extend last element
  1497  
  1498  	extentMapLen, err = chunkedPutContext.extentMap.Len()
  1499  	if nil != err {
  1500  		logFatalf("mergeSingleObjectExtent() failed to Len(): %v", err)
  1501  	}
  1502  
  1503  	if 0 < extentMapLen {
  1504  		_, curExtentAsValue, _, err = chunkedPutContext.extentMap.GetByIndex(extentMapLen - 1)
  1505  		if nil != err {
  1506  			logFatalf("mergeSingleObjectExtent() failed to GetByIndex() [Case 1]: %v", err)
  1507  		}
  1508  		curExtent = curExtentAsValue.(*singleObjectExtentStruct)
  1509  
  1510  		if (curExtent.fileOffset + curExtent.length) == newExtent.fileOffset {
  1511  			if (curExtent.objectOffset + curExtent.length) == newExtent.objectOffset {
  1512  				// Simply extend curExtent (coalescing newExtent into it)
  1513  
  1514  				curExtent.length += newExtent.length
  1515  
  1516  				return
  1517  			}
  1518  		}
  1519  	}
  1520  
  1521  	// See if newExtent collides with a first curExtent
  1522  
  1523  	curExtentIndex, found, err = chunkedPutContext.extentMap.BisectLeft(newExtent.fileOffset)
  1524  	if nil != err {
  1525  		logFatalf("mergeSingleObjectExtent() failed to BisectLeft(): %v", err)
  1526  	}
  1527  
  1528  	if found {
  1529  		// curExtent exists and starts precisely at newExtent.fileOffset... fully overlapped by newExtent?
  1530  
  1531  		_, curExtentAsValue, _, err = chunkedPutContext.extentMap.GetByIndex(curExtentIndex)
  1532  		if nil != err {
  1533  			logFatalf("mergeSingleObjectExtent() failed to GetByIndex() [Case 2]: %v", err)
  1534  		}
  1535  		curExtent = curExtentAsValue.(*singleObjectExtentStruct)
  1536  
  1537  		if (curExtent.fileOffset + curExtent.length) <= (newExtent.fileOffset + newExtent.length) {
  1538  			// curExtent fully overlapped by newExtent... just drop it
  1539  
  1540  			_, err = chunkedPutContext.extentMap.DeleteByIndex(curExtentIndex)
  1541  			if nil != err {
  1542  				logFatalf("mergeSingleObjectExtent() failed to DeleteByIndex() [Case 1]: %v", err)
  1543  			}
  1544  
  1545  			// curExtentIndex left pointing to subsequent extent (if any) for loop below
  1546  		} else {
  1547  			// curExtent is overlapped "on the left" by newExtent... so tuncate and move curExtent
  1548  
  1549  			postLength = (curExtent.fileOffset + curExtent.length) - (newExtent.fileOffset + newExtent.length)
  1550  
  1551  			_, err = chunkedPutContext.extentMap.DeleteByIndex(curExtentIndex)
  1552  			if nil != err {
  1553  				logFatalf("mergeSingleObjectExtent() failed to DeleteByIndex() curExtent [Case 2]: %v", err)
  1554  			}
  1555  
  1556  			splitExtent = &singleObjectExtentStruct{
  1557  				fileOffset:   newExtent.fileOffset + newExtent.length,
  1558  				objectOffset: curExtent.objectOffset + preLength + newExtent.length,
  1559  				length:       postLength,
  1560  			}
  1561  
  1562  			_, err = chunkedPutContext.extentMap.Put(splitExtent.fileOffset, splitExtent)
  1563  			if nil != err {
  1564  				logFatalf("mergeSingleObjectExtent() failed to Put() splitExtent [Case 1]: %v", err)
  1565  			}
  1566  
  1567  			// From here, we know we can just insert newExtent and we are done
  1568  
  1569  			_, err = chunkedPutContext.extentMap.Put(newExtent.fileOffset, newExtent)
  1570  			if nil != err {
  1571  				logFatalf("mergeSingleObjectExtent() failed to Put() newExtent [Case 1]: %v", err)
  1572  			}
  1573  
  1574  			return
  1575  		}
  1576  	} else { // !found
  1577  		if 0 > curExtentIndex {
  1578  			// curExtent does not exist (so cannot overlap)... so set curExtentIndex to point to first extent (if any) for loop below
  1579  
  1580  			curExtentIndex = 0
  1581  		} else { // 0 <= curExtentIndex
  1582  			// curExtent exists and starts strictly before newExtent.fileOffset... any overlap with newExtent?
  1583  
  1584  			_, curExtentAsValue, _, err = chunkedPutContext.extentMap.GetByIndex(curExtentIndex)
  1585  			if nil != err {
  1586  				logFatalf("mergeSingleObjectExtent() failed to GetByIndex() [Case 3]: %v", err)
  1587  			}
  1588  			curExtent = curExtentAsValue.(*singleObjectExtentStruct)
  1589  
  1590  			if (curExtent.fileOffset + curExtent.length) > newExtent.fileOffset {
  1591  				// curExtent definitely collides with newExtent... can we just truncate or do we need to split
  1592  
  1593  				preLength = newExtent.fileOffset - curExtent.fileOffset
  1594  
  1595  				if (curExtent.fileOffset + curExtent.length) <= (newExtent.fileOffset + newExtent.length) {
  1596  					// curExtent ends at or before newExtent ends... so simply truncate curExtent
  1597  
  1598  					curExtent.length = preLength
  1599  
  1600  					// Set curExtentIndex to point to following extent (if any) for loop below
  1601  
  1602  					curExtentIndex++
  1603  				} else {
  1604  					// curExtent is overlapped "in the middle" by newExtent... so split curExtent "around" newExtent
  1605  
  1606  					postLength = (curExtent.fileOffset + curExtent.length) - (newExtent.fileOffset + newExtent.length)
  1607  
  1608  					curExtent.length = preLength
  1609  
  1610  					splitExtent = &singleObjectExtentStruct{
  1611  						fileOffset:   newExtent.fileOffset + newExtent.length,
  1612  						objectOffset: curExtent.objectOffset + preLength + newExtent.length,
  1613  						length:       postLength,
  1614  					}
  1615  
  1616  					_, err = chunkedPutContext.extentMap.Put(splitExtent.fileOffset, splitExtent)
  1617  					if nil != err {
  1618  						logFatalf("mergeSingleObjectExtent() failed to Put() splitExtent [Case 2]: %v", err)
  1619  					}
  1620  
  1621  					// From here, we know we can just insert newExtent and we are done
  1622  
  1623  					_, err = chunkedPutContext.extentMap.Put(newExtent.fileOffset, newExtent)
  1624  					if nil != err {
  1625  						logFatalf("mergeSingleObjectExtent() failed to Put() newExtent [Case 2]: %v", err)
  1626  					}
  1627  
  1628  					return
  1629  				}
  1630  			} else {
  1631  				// curExtent does not overlap newExtent... set curExtentIndex to point to following extent (in any) for loop below
  1632  
  1633  				curExtentIndex++
  1634  			}
  1635  		}
  1636  	}
  1637  
  1638  	// At this point, the special case of the first extent starting at or before newExtent has been
  1639  	// cleared from overlapping with newExtent... so now we have to loop from curExtentIndex looking
  1640  	// for additional extents to either delete entirely or truncate "on the left" in order to ensure
  1641  	// that no other extents overlap with newExtent
  1642  
  1643  	for {
  1644  		_, curExtentAsValue, ok, err = chunkedPutContext.extentMap.GetByIndex(curExtentIndex)
  1645  		if nil != err {
  1646  			logFatalf("mergeSingleObjectExtent() failed to GetByIndex() [Case 4]: %v", err)
  1647  		}
  1648  
  1649  		if !ok {
  1650  			// No more extents, so we know we are done removing overlapped extents
  1651  
  1652  			break
  1653  		}
  1654  
  1655  		curExtent = curExtentAsValue.(*singleObjectExtentStruct)
  1656  
  1657  		if curExtent.fileOffset >= (newExtent.fileOffset + newExtent.length) {
  1658  			// This and all subsequent extents are "beyond" newExtent so cannot overlap
  1659  
  1660  			break
  1661  		}
  1662  
  1663  		if (curExtent.fileOffset + curExtent.length) <= (newExtent.fileOffset + newExtent.length) {
  1664  			// curExtent completely overlapped by newExtent... so simply delete it
  1665  
  1666  			_, err = chunkedPutContext.extentMap.DeleteByIndex(curExtentIndex)
  1667  			if nil != err {
  1668  				logFatalf("mergeSingleObjectExtent() failed to DeleteByIndex() [Case 3]: %v", err)
  1669  			}
  1670  
  1671  			// curExtentIndex left pointing to subsequent extent (if any) for next loop iteration
  1672  		} else {
  1673  			// curExtent is overlapped "on the left" by newExtent... so truncate and move curExtent
  1674  
  1675  			postLength = (curExtent.fileOffset + curExtent.length) - (newExtent.fileOffset + newExtent.length)
  1676  
  1677  			_, err = chunkedPutContext.extentMap.DeleteByIndex(curExtentIndex)
  1678  			if nil != err {
  1679  				logFatalf("mergeSingleObjectExtent() failed to DeleteByIndex() curExtent [Case 4]: %v", err)
  1680  			}
  1681  
  1682  			splitExtent = &singleObjectExtentStruct{
  1683  				fileOffset:   newExtent.fileOffset + newExtent.length,
  1684  				objectOffset: curExtent.objectOffset + (curExtent.length - postLength),
  1685  				length:       postLength,
  1686  			}
  1687  
  1688  			_, err = chunkedPutContext.extentMap.Put(splitExtent.fileOffset, splitExtent)
  1689  			if nil != err {
  1690  				logFatalf("mergeSingleObjectExtent() failed to Put() splitExtent [Case 3]: %v", err)
  1691  			}
  1692  
  1693  			// From here, we know we can just insert newExtent and we are done
  1694  
  1695  			_, err = chunkedPutContext.extentMap.Put(newExtent.fileOffset, newExtent)
  1696  			if nil != err {
  1697  				logFatalf("mergeSingleObjectExtent() failed to Put() newExtent [Case 3]: %v", err)
  1698  			}
  1699  
  1700  			return
  1701  		}
  1702  	}
  1703  
  1704  	// Having ensured there are no overlapping extents, it is safe to insert newExtent
  1705  
  1706  	_, err = chunkedPutContext.extentMap.Put(newExtent.fileOffset, newExtent)
  1707  	if nil != err {
  1708  		logFatalf("mergeSingleObjectExtent() failed to Put() newExtent [Case 4]: %v", err)
  1709  	}
  1710  }
  1711  
  1712  func pruneExtentMap(extentMap sortedmap.LLRBTree, newSize uint64) {
  1713  	var (
  1714  		err                                error
  1715  		extentAsMultiObjectExtent          *multiObjectExtentStruct
  1716  		extentAsSingleObjectExtent         *singleObjectExtentStruct
  1717  		extentAsSingleObjectExtentWithLink *singleObjectExtentWithLinkStruct
  1718  		extentAsValue                      sortedmap.Value
  1719  		index                              int
  1720  		ok                                 bool
  1721  	)
  1722  
  1723  	// Nothing to do if extentMap hasn't been populated yet
  1724  
  1725  	if nil == extentMap {
  1726  		return
  1727  	}
  1728  
  1729  	// First, destroy any extents starting at or beyond newSize
  1730  
  1731  	index, _, err = extentMap.BisectRight(newSize)
  1732  	if nil != err {
  1733  		logFatalf("extentMap.BisectLeft() failed: %v", err)
  1734  	}
  1735  
  1736  	ok = true
  1737  	for ok {
  1738  		ok, err = extentMap.DeleteByIndex(index)
  1739  		if nil != err {
  1740  			logFatalf("extentMap.DeleteByIndex(index+1) failed: %v", err)
  1741  		}
  1742  	}
  1743  
  1744  	// Next, back up and look at (new) tailing extent
  1745  
  1746  	index--
  1747  
  1748  	if 0 > index {
  1749  		// No extents remain in extentmap, so just return empty extentMap
  1750  
  1751  		return
  1752  	}
  1753  
  1754  	// See if trailing extent map entries need to be truncated
  1755  
  1756  	_, extentAsValue, _, err = extentMap.GetByIndex(index)
  1757  	if nil != err {
  1758  		logFatalf("extentMap.GetByIndex(index) failed: %v", err)
  1759  	}
  1760  
  1761  	extentAsMultiObjectExtent, ok = extentAsValue.(*multiObjectExtentStruct)
  1762  	if ok {
  1763  		if (extentAsMultiObjectExtent.fileOffset + extentAsMultiObjectExtent.length) > newSize {
  1764  			extentAsMultiObjectExtent.length = newSize - extentAsMultiObjectExtent.fileOffset
  1765  		}
  1766  	} else {
  1767  		extentAsSingleObjectExtent, ok = extentAsValue.(*singleObjectExtentStruct)
  1768  		if ok {
  1769  			if (extentAsSingleObjectExtent.fileOffset + extentAsSingleObjectExtent.length) > newSize {
  1770  				extentAsSingleObjectExtent.length = newSize - extentAsSingleObjectExtent.fileOffset
  1771  			}
  1772  		} else {
  1773  			extentAsSingleObjectExtentWithLink, ok = extentAsValue.(*singleObjectExtentWithLinkStruct)
  1774  			if ok {
  1775  				if (extentAsSingleObjectExtentWithLink.fileOffset + extentAsSingleObjectExtentWithLink.length) > newSize {
  1776  					extentAsSingleObjectExtentWithLink.length = newSize - extentAsSingleObjectExtentWithLink.fileOffset
  1777  				}
  1778  			} else {
  1779  				logFatalf("extentAsValue.(*{multi|single|single}ObjectExtent{||WithLink}Struct) returned !ok")
  1780  			}
  1781  		}
  1782  	}
  1783  
  1784  	return
  1785  }
  1786  
  1787  // DumpKey formats the Key (multiObjectExtentStruct.fileOffset) for fileInodeStruct.ExtentMap
  1788  func (fileInode *fileInodeStruct) DumpKey(key sortedmap.Key) (keyAsString string, err error) {
  1789  	var (
  1790  		keyAsU64 uint64
  1791  		ok       bool
  1792  	)
  1793  
  1794  	keyAsU64, ok = key.(uint64)
  1795  	if ok {
  1796  		keyAsString = fmt.Sprintf("0x%016X", keyAsU64)
  1797  	} else {
  1798  		err = fmt.Errorf("Failure of *fileInodeStruct.DumpKey(%v)", key)
  1799  	}
  1800  
  1801  	return
  1802  }
  1803  
  1804  // DumpKey formats the Value (multiObjectExtentStruct) for fileInodeStruct.ExtentMap
  1805  func (fileInode *fileInodeStruct) DumpValue(value sortedmap.Value) (valueAsString string, err error) {
  1806  	var (
  1807  		ok                             bool
  1808  		valueAsMultiObjextExtentStruct *multiObjectExtentStruct
  1809  	)
  1810  
  1811  	valueAsMultiObjextExtentStruct, ok = value.(*multiObjectExtentStruct)
  1812  	if ok {
  1813  		valueAsString = fmt.Sprintf(
  1814  			"{fileOffset:0x%016X,containerName:%s,objectName:%s,objectOffset:0x%016X,length:0x%016X}",
  1815  			valueAsMultiObjextExtentStruct.fileOffset,
  1816  			valueAsMultiObjextExtentStruct.containerName,
  1817  			valueAsMultiObjextExtentStruct.objectName,
  1818  			valueAsMultiObjextExtentStruct.objectOffset,
  1819  			valueAsMultiObjextExtentStruct.length)
  1820  	} else {
  1821  		err = fmt.Errorf("Failure of *fileInodeStruct.DumpValue(%v)", value)
  1822  	}
  1823  
  1824  	return
  1825  }
  1826  
  1827  // DumpKey formats the Key (singleObjectExtentStruct.fileOffset) for chunkedPutContextStruct.ExtentMap
  1828  func (chunkedPutContext *chunkedPutContextStruct) DumpKey(key sortedmap.Key) (keyAsString string, err error) {
  1829  	var (
  1830  		keyAsU64 uint64
  1831  		ok       bool
  1832  	)
  1833  
  1834  	keyAsU64, ok = key.(uint64)
  1835  	if ok {
  1836  		keyAsString = fmt.Sprintf("0x%016X", keyAsU64)
  1837  	} else {
  1838  		err = fmt.Errorf("Failure of *chunkedPutContextStruct.DumpKey(%v)", key)
  1839  	}
  1840  
  1841  	return
  1842  }
  1843  
  1844  // DumpKey formats the Value (singleObjectExtentStruct) for chunkedPutContextStruct.ExtentMap
  1845  func (chunkedPutContext *chunkedPutContextStruct) DumpValue(value sortedmap.Value) (valueAsString string, err error) {
  1846  	var (
  1847  		ok                              bool
  1848  		valueAsSingleObjectExtentStruct *singleObjectExtentStruct
  1849  	)
  1850  
  1851  	valueAsSingleObjectExtentStruct, ok = value.(*singleObjectExtentStruct)
  1852  	if ok {
  1853  		valueAsString = fmt.Sprintf(
  1854  			"{fileOffset:0x%016X,objectOffset:0x%016X,length:0x%016X}",
  1855  			valueAsSingleObjectExtentStruct.fileOffset,
  1856  			valueAsSingleObjectExtentStruct.objectOffset,
  1857  			valueAsSingleObjectExtentStruct.length)
  1858  	} else {
  1859  		err = fmt.Errorf("Failure of *chunkedPutContextStruct.DumpValue(%v)", value)
  1860  	}
  1861  
  1862  	return
  1863  }
  1864  
  1865  func dumpExtentMap(extentMap sortedmap.LLRBTree) {
  1866  	var (
  1867  		err                                error
  1868  		extentAsMultiObjectExtent          *multiObjectExtentStruct
  1869  		extentAsSingleObjectExtent         *singleObjectExtentStruct
  1870  		extentAsSingleObjectExtentWithLink *singleObjectExtentWithLinkStruct
  1871  		extentAsValue                      sortedmap.Value
  1872  		extentIndex                        int
  1873  		extentMapLen                       int
  1874  		ok                                 bool
  1875  	)
  1876  
  1877  	extentMapLen, err = extentMap.Len()
  1878  	if nil != err {
  1879  		logFatalf("dumpExtentMap() doing extentMap.Len() failed: %v", err)
  1880  	}
  1881  
  1882  	for extentIndex = 0; extentIndex < extentMapLen; extentIndex++ {
  1883  		_, extentAsValue, ok, err = extentMap.GetByIndex(extentIndex)
  1884  		if nil != err {
  1885  			logFatalf("dumpExtentMap() doing extentMap.GetByIndex() failed: %v", err)
  1886  		}
  1887  		if !ok {
  1888  			logFatalf("dumpExtentMap() doing extentMap.GetByIndex() returned !ok")
  1889  		}
  1890  
  1891  		extentAsMultiObjectExtent, ok = extentAsValue.(*multiObjectExtentStruct)
  1892  		if ok {
  1893  			logInfof("             MultiObjectExtent: %+v", extentAsMultiObjectExtent)
  1894  		} else {
  1895  			extentAsSingleObjectExtent, ok = extentAsValue.(*singleObjectExtentStruct)
  1896  			if ok {
  1897  				logInfof("            SingleObjectExtent: %+v", extentAsSingleObjectExtent)
  1898  			} else {
  1899  				extentAsSingleObjectExtentWithLink, ok = extentAsValue.(*singleObjectExtentWithLinkStruct)
  1900  				if ok {
  1901  					logInfof("    SingleObjectExtentWithLink: %+v [containerName:%s objectName:%s]", extentAsSingleObjectExtentWithLink, extentAsSingleObjectExtentWithLink.chunkedPutContext.containerName, extentAsSingleObjectExtentWithLink.chunkedPutContext.objectName)
  1902  				} else {
  1903  					logFatalf("dumpExtentMap() doing extentAsValue.(*{multi|single|single}ObjectExtent{||WithLink}Struct) returned !ok")
  1904  				}
  1905  			}
  1906  		}
  1907  	}
  1908  }
  1909  
  1910  func (fileInode *fileInodeStruct) dumpExtentMaps() {
  1911  	var (
  1912  		chunkedPutContext        *chunkedPutContextStruct
  1913  		chunkedPutContextElement *list.Element
  1914  		chunkedPutContextIndex   uint64
  1915  		ok                       bool
  1916  	)
  1917  
  1918  	logInfof("FileInode @%p ExtentMap:", fileInode)
  1919  
  1920  	dumpExtentMap(fileInode.extentMap)
  1921  
  1922  	chunkedPutContextIndex = 0
  1923  	chunkedPutContextElement = fileInode.chunkedPutList.Front()
  1924  
  1925  	for nil != chunkedPutContextElement {
  1926  		chunkedPutContext, ok = chunkedPutContextElement.Value.(*chunkedPutContextStruct)
  1927  		if !ok {
  1928  			logFatalf("dumpExtentMaps() doing chunkedPutContextElement.Value.(*chunkedPutContextStruct) returned !ok")
  1929  		}
  1930  
  1931  		logInfof("  ChunkedPutContext #%v @%p ExtentMap [containerName:%s objectName:%s]:", chunkedPutContextIndex, chunkedPutContext, chunkedPutContext.containerName, chunkedPutContext.objectName)
  1932  
  1933  		dumpExtentMap(chunkedPutContext.extentMap)
  1934  
  1935  		chunkedPutContextIndex++
  1936  		chunkedPutContextElement = chunkedPutContextElement.Next()
  1937  	}
  1938  }