github.com/siglens/siglens@v0.0.0-20240328180423-f7ce9ae441ed/pkg/segment/reader/segread/segreader.go (about)

     1  /*
     2  Copyright 2023.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package segread
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"os"
    23  	"sync"
    24  
    25  	"github.com/klauspost/compress/zstd"
    26  	"github.com/siglens/siglens/pkg/segment/structs"
    27  	"github.com/siglens/siglens/pkg/segment/utils"
    28  	toputils "github.com/siglens/siglens/pkg/utils"
    29  	log "github.com/sirupsen/logrus"
    30  )
    31  
    32  var uncompressedReadBufferPool = sync.Pool{
    33  	New: func() interface{} {
    34  		// The Pool's New function should generally only return pointer
    35  		// types, since a pointer can be put into the return interface
    36  		// value without an allocation:
    37  		slice := make([]byte, 0, utils.WIP_SIZE)
    38  		return &slice
    39  	},
    40  }
    41  
    42  var fileReadBufferPool = sync.Pool{
    43  	New: func() interface{} {
    44  		// The Pool's New function should generally only return pointer
    45  		// types, since a pointer can be put into the return interface
    46  		// value without an allocation:
    47  		slice := make([]byte, utils.FILE_READ_BUFFER_SIZE)
    48  		return &slice
    49  	},
    50  }
    51  
    52  // Use zstd.WithDecoderConcurrency(0) so that it can have GOMAXPROCS goroutines.
    53  // If this option is not given it defaults to 4 or GOMAXPROCS, whichever is
    54  // smaller.
    55  var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
    56  
    57  type SegmentFileReader struct {
    58  	ColName       string   // column name this file references
    59  	fileName      string   // file name to iterate
    60  	currFD        *os.File // current file descriptor
    61  	blockMetadata map[uint16]*structs.BlockMetadataHolder
    62  
    63  	currBlockNum             uint16
    64  	currRecordNum            uint16
    65  	currOffset               uint32
    66  	currUncompressedBlockLen uint32
    67  	currRecLen               uint32
    68  
    69  	isBlockLoaded        bool
    70  	currFileBuffer       []byte   // buffer re-used for file reads values
    71  	currUncompressBuffer []byte   // buffer for zstd uncompress
    72  	currRawBlockBuffer   []byte   // raw uncompressed block
    73  	encType              uint8    // encoding type for this block
    74  	deTlv                [][]byte // deTlv[dWordIdx] --> []byte (the TLV byte slice)
    75  	deRecToTlv           []uint16 // deRecToTlv[recNum] --> dWordIdx
    76  	blockSummaries       []*structs.BlockSummary
    77  }
    78  
    79  // returns a new SegmentFileReader and any errors encountered
    80  // The returned SegmentFileReader must call .Close() when finished using it to close the fd
    81  func InitNewSegFileReader(fd *os.File, colName string, blockMetadata map[uint16]*structs.BlockMetadataHolder,
    82  	qid uint64, blockSummaries []*structs.BlockSummary) (*SegmentFileReader, error) {
    83  	return &SegmentFileReader{
    84  		ColName:              colName,
    85  		fileName:             fd.Name(),
    86  		currFD:               fd,
    87  		blockMetadata:        blockMetadata,
    88  		currOffset:           0,
    89  		currFileBuffer:       *fileReadBufferPool.Get().(*[]byte),
    90  		currUncompressBuffer: *uncompressedReadBufferPool.Get().(*[]byte),
    91  		isBlockLoaded:        false,
    92  		encType:              255,
    93  		blockSummaries:       blockSummaries,
    94  		deTlv:                make([][]byte, 0),
    95  		deRecToTlv:           make([]uint16, 0),
    96  	}, nil
    97  }
    98  
    99  func (sfr *SegmentFileReader) Close() error {
   100  	if sfr.currFD == nil {
   101  		return errors.New("tried to close an unopened segment file reader")
   102  	}
   103  	sfr.returnBuffers()
   104  	return sfr.currFD.Close()
   105  }
   106  
   107  func (sfr *SegmentFileReader) returnBuffers() {
   108  	uncompressedReadBufferPool.Put(&sfr.currRawBlockBuffer)
   109  	fileReadBufferPool.Put(&sfr.currFileBuffer)
   110  }
   111  
   112  // returns a bool indicating if blockNum is valid, and any error encountered
   113  func (sfr *SegmentFileReader) readBlock(blockNum uint16) (bool, error) {
   114  	validBlock, err := sfr.loadBlockUsingBuffer(blockNum)
   115  	if err != nil {
   116  		log.Errorf("readBlock: error trying to read block %v in file %s. Error: %+v",
   117  			blockNum, sfr.fileName, err)
   118  		return true, err
   119  	}
   120  	if !validBlock {
   121  		return false, fmt.Errorf("column does not exist in block")
   122  	}
   123  
   124  	sfr.currBlockNum = blockNum
   125  	sfr.isBlockLoaded = true
   126  	return true, nil
   127  }
   128  
   129  // helper function to decompresses and loads block using passed buffers
   130  // returns the raw buffer, if the block is valid, and any error encountered
   131  // The block will not be valid if the column is not found in block metadata. This means that the column never existed for this block and only existed for other blocks
   132  func (sfr *SegmentFileReader) loadBlockUsingBuffer(blockNum uint16) (bool, error) {
   133  
   134  	blockMetata, blockExists := sfr.blockMetadata[blockNum]
   135  	if !blockExists {
   136  		return true, errors.New("block  number does not exist for this segment file reader")
   137  	}
   138  	colBlockLen, colExists := blockMetata.ColumnBlockLen[sfr.ColName]
   139  	if !colExists {
   140  		// This is an invalid block & not an error because this column never existed for this block if sfr.blockMetadata[blockNum] exists
   141  		return false, nil
   142  	}
   143  
   144  	colBlockOffset, colExists := blockMetata.ColumnBlockOffset[sfr.ColName]
   145  	if !colExists {
   146  		return false, nil
   147  	}
   148  
   149  	if uint32(len(sfr.currFileBuffer)) < colBlockLen {
   150  		newArr := make([]byte, colBlockLen-uint32(len(sfr.currFileBuffer)))
   151  		sfr.currFileBuffer = append(sfr.currFileBuffer, newArr...)
   152  	}
   153  	_, err := sfr.currFD.ReadAt(sfr.currFileBuffer[:colBlockLen], colBlockOffset)
   154  	if err != nil {
   155  		log.Errorf("loadBlockUsingBuffer read file error: %+v", err)
   156  		return true, err
   157  	}
   158  	oPtr := uint32(0)
   159  	sfr.encType = sfr.currFileBuffer[oPtr]
   160  	oPtr++
   161  
   162  	if sfr.encType == utils.ZSTD_COMLUNAR_BLOCK[0] {
   163  		err := sfr.unpackRawCsg(sfr.currFileBuffer[oPtr:colBlockLen], blockNum)
   164  		return true, err
   165  	} else if sfr.encType == utils.ZSTD_DICTIONARY_BLOCK[0] {
   166  		err := sfr.readDictEnc(sfr.currFileBuffer[oPtr:colBlockLen], blockNum)
   167  		return true, err
   168  	} else {
   169  		log.Errorf("received an unknown encoding type for %v column! expected zstd or dictenc got %+v",
   170  			sfr.ColName, sfr.encType)
   171  		return true, fmt.Errorf("received an unknown encoding type for %v column! expected zstd or dictenc got %+v",
   172  			sfr.ColName, sfr.encType)
   173  	}
   174  }
   175  
   176  // returns the raw bytes of the blockNum:recordNum combination in the current segfile
   177  // optimized for subsequent calls to have the same blockNum
   178  // returns : encodedVal, error
   179  func (sfr *SegmentFileReader) ReadRecordFromBlock(blockNum uint16, recordNum uint16) ([]byte, error) {
   180  
   181  	if !sfr.isBlockLoaded || sfr.currBlockNum != blockNum {
   182  		valid, err := sfr.readBlock(blockNum)
   183  		if !valid {
   184  			return nil, err
   185  		}
   186  		if err != nil {
   187  			log.Errorf("ReadRecordFromBlock: error loading blockNum: %v. Error: %+v", blockNum, err)
   188  			return nil, err
   189  		}
   190  	}
   191  
   192  	// if dict encoding, we use the dictmapping
   193  	if sfr.encType == utils.ZSTD_DICTIONARY_BLOCK[0] {
   194  		ret, err := sfr.deGetRec(recordNum)
   195  		return ret, err
   196  	}
   197  
   198  	if sfr.currRecordNum > recordNum {
   199  		// we have to start offset over and iterate until we reach recordNum bc we do not how to go backwards in a block
   200  		sfr.currOffset = 0
   201  		currRecLen, err := sfr.getCurrentRecordLength()
   202  		if err != nil {
   203  			log.Errorf("ReadRecordFromBlock: error resetting SegmentFileReader %s. Error: %+v",
   204  				sfr.fileName, err)
   205  			return nil, err
   206  		}
   207  		sfr.currRecLen = currRecLen
   208  		sfr.currRecordNum = 0
   209  	} else if sfr.currRecordNum == recordNum {
   210  		return sfr.currRawBlockBuffer[sfr.currOffset : sfr.currOffset+sfr.currRecLen], nil
   211  	}
   212  
   213  	for {
   214  		if sfr.currRecordNum == recordNum {
   215  			return sfr.currRawBlockBuffer[sfr.currOffset : sfr.currOffset+sfr.currRecLen], nil
   216  		} else if sfr.currRecordNum > recordNum {
   217  			break // we cannot go backwards
   218  		}
   219  		err := sfr.iterateNextRecord()
   220  		if err != nil {
   221  			break
   222  		}
   223  	}
   224  
   225  	errStr := fmt.Sprintf("ReadRecordFromBlock: reached end of block before matching recNum %+v, blockNum %+v, Currently at rec %+v. File %+v, colname %v", recordNum, blockNum,
   226  		sfr.currRecordNum, sfr.fileName, sfr.ColName)
   227  	log.Error(errStr)
   228  	log.Errorf("Current offset %+v, blkLen: %+v", sfr.currOffset, sfr.currUncompressedBlockLen)
   229  	return nil, errors.New(errStr)
   230  }
   231  
   232  // returns the new record number and if any errors are encountered
   233  // an error will be returned if no more records are available
   234  func (sfr *SegmentFileReader) iterateNextRecord() error {
   235  	nextOff := sfr.currOffset + sfr.currRecLen
   236  	if nextOff >= sfr.currUncompressedBlockLen {
   237  		log.Errorf("iterateNextRecord: reached end of block next Offset:%+v, curr uncompressed blklen: %+v", nextOff, sfr.currUncompressedBlockLen)
   238  		return errors.New("no more records to iterate")
   239  	}
   240  	sfr.currOffset = nextOff
   241  	currRecLen, err := sfr.getCurrentRecordLength()
   242  	if err != nil {
   243  		log.Errorf("iterateNextRecord: an error occurred while iterating to the next record %+v. Skipping...", err)
   244  		sfr.currOffset -= sfr.currRecLen
   245  		return err
   246  	}
   247  	sfr.currRecLen = currRecLen
   248  	sfr.currRecordNum = sfr.currRecordNum + 1
   249  	return nil
   250  }
   251  
   252  func (sfr *SegmentFileReader) getCurrentRecordLength() (uint32, error) {
   253  	var reclen uint32
   254  	switch sfr.currRawBlockBuffer[sfr.currOffset] {
   255  	case utils.VALTYPE_ENC_SMALL_STRING[0]:
   256  		// 1 byte for type, 2 for str-len, then str-len for actual string
   257  		reclen = 3 + uint32(toputils.BytesToUint16LittleEndian(sfr.currRawBlockBuffer[sfr.currOffset+1:]))
   258  	case utils.VALTYPE_ENC_BOOL[0]:
   259  		reclen = 2
   260  	case utils.VALTYPE_ENC_INT8[0]:
   261  		reclen = 2
   262  	case utils.VALTYPE_ENC_INT16[0]:
   263  		reclen = 3
   264  	case utils.VALTYPE_ENC_INT32[0]:
   265  		reclen = 5
   266  	case utils.VALTYPE_ENC_INT64[0]:
   267  		reclen = 9
   268  	case utils.VALTYPE_ENC_UINT8[0]:
   269  		reclen = 2
   270  	case utils.VALTYPE_ENC_UINT16[0]:
   271  		reclen = 3
   272  	case utils.VALTYPE_ENC_UINT32[0]:
   273  		reclen = 5
   274  	case utils.VALTYPE_ENC_UINT64[0]:
   275  		reclen = 9
   276  	case utils.VALTYPE_ENC_FLOAT64[0]:
   277  		reclen = 9
   278  	case utils.VALTYPE_ENC_BACKFILL[0]:
   279  		reclen = 1
   280  	case utils.VALTYPE_DICT_ARRAY[0]:
   281  		reclen = 3 + uint32(toputils.BytesToUint16LittleEndian(sfr.currRawBlockBuffer[sfr.currOffset+1:]))
   282  	case utils.VALTYPE_RAW_JSON[0]:
   283  		reclen = 3 + uint32(toputils.BytesToUint16LittleEndian(sfr.currRawBlockBuffer[sfr.currOffset+1:]))
   284  
   285  	default:
   286  		log.Errorf("getCurrentRecordLength: Received an unknown encoding type %+v at offset %+v", sfr.currRawBlockBuffer[sfr.currOffset], sfr.currOffset)
   287  		return 0, errors.New("received an unknown encoding type")
   288  	}
   289  	return reclen, nil
   290  }
   291  
   292  func (sfr *SegmentFileReader) IsBlkDictEncoded(blockNum uint16) (bool, error) {
   293  
   294  	if !sfr.isBlockLoaded || sfr.currBlockNum != blockNum {
   295  		valid, err := sfr.readBlock(blockNum)
   296  		if !valid {
   297  			return false, err
   298  		}
   299  		if err != nil {
   300  			log.Errorf("IsBlkDictEncoded: error loading blockNum: %v. Error: %+v", blockNum, err)
   301  			return false, err
   302  		}
   303  	}
   304  
   305  	if sfr.encType != utils.ZSTD_DICTIONARY_BLOCK[0] {
   306  		return false, nil
   307  	}
   308  
   309  	return true, nil
   310  }
   311  
   312  func (sfr *SegmentFileReader) readDictEnc(buf []byte, blockNum uint16) error {
   313  
   314  	idx := uint32(0)
   315  
   316  	// read num of dict words
   317  	numWords := toputils.BytesToUint16LittleEndian(buf[idx : idx+2])
   318  	idx += 2
   319  
   320  	if uint16(len(sfr.deTlv)) < numWords {
   321  		extLen := numWords - uint16(len(sfr.deTlv))
   322  		newArr := make([][]byte, extLen)
   323  		sfr.deTlv = append(sfr.deTlv, newArr...)
   324  	}
   325  
   326  	if uint16(len(sfr.deRecToTlv)) < sfr.blockSummaries[blockNum].RecCount {
   327  		extLen := sfr.blockSummaries[blockNum].RecCount - uint16(len(sfr.deRecToTlv))
   328  		newArr := make([]uint16, extLen)
   329  		sfr.deRecToTlv = append(sfr.deRecToTlv, newArr...)
   330  	}
   331  
   332  	var numRecs uint16
   333  	var soffW uint32
   334  	for w := uint16(0); w < numWords; w++ {
   335  
   336  		soffW = idx
   337  		// read dictWord 'T'
   338  		switch buf[idx] {
   339  		case utils.VALTYPE_ENC_SMALL_STRING[0]:
   340  			//  3 => 1 for 'T' and 2 for 'L' of string
   341  			idx += uint32(3 + toputils.BytesToUint16LittleEndian(buf[idx+1:idx+3]))
   342  		case utils.VALTYPE_ENC_INT64[0], utils.VALTYPE_ENC_FLOAT64[0]:
   343  			idx += 9 // 1 for T and 8 bytes for 'L' int64
   344  		case utils.VALTYPE_ENC_BACKFILL[0]:
   345  			idx += 1 // 1 for T
   346  		default:
   347  			return fmt.Errorf("readDictEnc unknown dictEnc: %v only supported flt/int64/str", buf[idx])
   348  		}
   349  
   350  		sfr.deTlv[w] = buf[soffW:idx]
   351  
   352  		// read num of records
   353  		numRecs = toputils.BytesToUint16LittleEndian(buf[idx : idx+2])
   354  		idx += 2
   355  
   356  		for i := uint16(0); i < numRecs; i++ {
   357  			// at this recNum's position in the array store the idx of the TLV byte slice
   358  			sfr.deRecToTlv[toputils.BytesToUint16LittleEndian(buf[idx:idx+2])] = w
   359  			idx += 2
   360  		}
   361  	}
   362  
   363  	return nil
   364  }
   365  
   366  func (sfr *SegmentFileReader) unpackRawCsg(buf []byte, blockNum uint16) error {
   367  
   368  	uncompressed, err := decoder.DecodeAll(buf[0:], sfr.currUncompressBuffer[:0])
   369  	if err != nil {
   370  		log.Errorf("unpackRawCsg decompress error: %+v", err)
   371  		return err
   372  	}
   373  	sfr.currRawBlockBuffer = uncompressed
   374  	sfr.currOffset = 0
   375  
   376  	currRecLen, err := sfr.getCurrentRecordLength()
   377  	if err != nil {
   378  		log.Errorf("unpackRawCsg: error getting record length for the first record in block %v in file %s. Error: %+v",
   379  			blockNum, sfr.fileName, err)
   380  		return err
   381  	}
   382  	sfr.currRecLen = currRecLen
   383  	sfr.currRecordNum = 0
   384  	sfr.currUncompressedBlockLen = uint32(len(sfr.currRawBlockBuffer))
   385  
   386  	return nil
   387  }
   388  
   389  func (sfr *SegmentFileReader) GetDictEncCvalsFromColFile(results map[uint16]map[string]interface{},
   390  	blockNum uint16, orderedRecNums []uint16) bool {
   391  
   392  	if !sfr.isBlockLoaded || sfr.currBlockNum != blockNum {
   393  		valid, err := sfr.readBlock(blockNum)
   394  		if !valid {
   395  			return false
   396  		}
   397  		if err != nil {
   398  			return false
   399  		}
   400  	}
   401  
   402  	if sfr.encType != utils.ZSTD_DICTIONARY_BLOCK[0] {
   403  		return false
   404  	}
   405  
   406  	return sfr.deToResults(results, orderedRecNums)
   407  }
   408  
   409  func (sfr *SegmentFileReader) deToResults(results map[uint16]map[string]interface{},
   410  	orderedRecNums []uint16) bool {
   411  
   412  	for _, rn := range orderedRecNums {
   413  		dwIdx := sfr.deRecToTlv[rn]
   414  		dWord := sfr.deTlv[dwIdx]
   415  		_, ok := results[rn]
   416  		if !ok {
   417  			results[rn] = make(map[string]interface{})
   418  		}
   419  		if dWord[0] == utils.VALTYPE_ENC_SMALL_STRING[0] {
   420  			results[rn][sfr.ColName] = string(dWord[3:])
   421  		} else if dWord[0] == utils.VALTYPE_ENC_INT64[0] {
   422  			results[rn][sfr.ColName] = toputils.BytesToInt64LittleEndian(dWord[1:])
   423  		} else if dWord[0] == utils.VALTYPE_ENC_FLOAT64[0] {
   424  			results[rn][sfr.ColName] = toputils.BytesToFloat64LittleEndian(dWord[1:])
   425  		} else if dWord[0] == utils.VALTYPE_ENC_BACKFILL[0] {
   426  			results[rn][sfr.ColName] = nil
   427  		} else {
   428  			log.Errorf("deToResults: de only supported for str/int64/float64")
   429  			return false
   430  		}
   431  	}
   432  	return true
   433  }
   434  
   435  func (sfr *SegmentFileReader) deGetRec(rn uint16) ([]byte, error) {
   436  
   437  	if rn >= uint16(len(sfr.deRecToTlv)) {
   438  		return nil, fmt.Errorf("recNum %+v does not exist, len=%+v", rn, len(sfr.deRecToTlv))
   439  	}
   440  	dwIdx := sfr.deRecToTlv[rn]
   441  	dWord := sfr.deTlv[dwIdx]
   442  	return dWord, nil
   443  }