github.com/scottcagno/storage@v1.8.0/pkg/bio/util.go (about)

     1  package bio
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/hex"
     6  	"fmt"
     7  )
     8  
     9  func allocate(size int) []byte {
    10  	return calloc(align(size, size-1))
    11  }
    12  
    13  func info(p *[]byte) {
    14  	if p == nil {
    15  		fmt.Println("ptr=nil, len=0, cap=0, data=nil")
    16  	}
    17  	fmt.Printf("ptr=%p, len=%d, cap=%d, data=%q\n", *p, len(*p), cap(*p), *p)
    18  }
    19  
    20  func longStr(s string, pre string, max int) string {
    21  	var ss string
    22  	for i := 0; i < len(s); i += max {
    23  		j := i + max
    24  		if j > len(s) {
    25  			j = len(s)
    26  		}
    27  		fmtr := fmt.Sprintf("%s| %%-%ds |\n", pre, max)
    28  		ss += fmt.Sprintf(fmtr, s[i:j])
    29  	}
    30  	return ss
    31  }
    32  
    33  func ChunkSliceIter(slice []int, chunkSize int, fn func(p []int) int) {
    34  	for beg := 0; beg < len(slice); beg += chunkSize {
    35  		end := beg + chunkSize
    36  		// necessary check to avoid slicing beyond
    37  		// slice capacity
    38  		if end > len(slice) {
    39  			end = len(slice)
    40  		}
    41  		n := fn(slice[beg:end])
    42  		_ = n
    43  	}
    44  }
    45  
    46  // this impl does not continuously modify the slice, and uses iteration
    47  func ChunkSliceV1(slice []int, chunkSize int) [][]int {
    48  	var chunks [][]int
    49  	for i := 0; i < len(slice); i += chunkSize {
    50  		end := i + chunkSize
    51  		// necessary check to avoid slicing beyond
    52  		// slice capacity
    53  		if end > len(slice) {
    54  			end = len(slice)
    55  		}
    56  		chunks = append(chunks, slice[i:end])
    57  	}
    58  	return chunks
    59  }
    60  
    61  // this impl continuously modifies the slice and calls break eventually
    62  func ChunkSliceV2(slice []int, chunkSize int) [][]int {
    63  	var chunks [][]int
    64  	for {
    65  		if len(slice) == 0 {
    66  			break
    67  		}
    68  		// necessary check to avoid slicing beyond
    69  		// slice capacity
    70  		if len(slice) < chunkSize {
    71  			chunkSize = len(slice)
    72  		}
    73  		chunks = append(chunks, slice[0:chunkSize])
    74  		slice = slice[chunkSize:]
    75  	}
    76  	return chunks
    77  }
    78  
    79  func calcBlocks(size int) int {
    80  	size = align(size, blockMask)
    81  	return size / (blockSize - headerSize)
    82  }
    83  
    84  func align(size, mask int) int {
    85  	return (size + mask) &^ (mask)
    86  }
    87  
    88  func calloc(size int) []byte {
    89  	return make([]byte, size, size)
    90  }
    91  
    92  func malloc(size int) []byte {
    93  	return make([]byte, 0, size)
    94  }
    95  
    96  func clear(p *[]byte) (int, int) {
    97  	*p = (*p)[:0]
    98  	return len(*p), cap(*p)
    99  }
   100  
   101  func free(p *[]byte) {
   102  	*p = nil
   103  }
   104  
   105  func decodeHeader(p []byte, h *header) (int, error) {
   106  	if p == nil || len(p) != 6 {
   107  		return -1, ErrInvalidSize
   108  	}
   109  	_ = p[5]
   110  	h.status = p[0]
   111  	h.kind = p[1]
   112  	h.part = p[2]
   113  	h.parts = p[3]
   114  	h.length = uint16(p[4]) | uint16(p[5])<<8
   115  	return len(p), nil
   116  }
   117  
   118  func encodeHeader(p []byte, h *header) (int, error) {
   119  	if p == nil || len(p) != 6 {
   120  		return -1, ErrInvalidSize
   121  	}
   122  	if h == nil {
   123  		// encode "zero value" header
   124  		h = new(header)
   125  		h.status = statusEmpty
   126  		h.kind = kindFull
   127  		h.part = 1
   128  		h.parts = 1
   129  		h.length = 0
   130  	}
   131  	_ = p[5]
   132  	p[0] = h.status
   133  	p[1] = h.kind
   134  	p[2] = h.part
   135  	p[3] = h.parts
   136  	p[4] = byte(h.length)
   137  	p[5] = byte(h.length >> 8)
   138  	return len(p), nil
   139  }
   140  
   141  func Info(w *Writer, b *bytes.Buffer) string {
   142  	buf := b.Bytes()
   143  	ss := fmt.Sprintf("writer:\n")
   144  	ss += fmt.Sprintf("buffered=%d, available=%d\n", w.bw.Buffered(), w.bw.Available())
   145  	for i := 0; i < b.Len(); i += blockSize {
   146  		ss += fmt.Sprintf("\tblock[%.2d]\n", i/blockSize)
   147  		hdr := new(header)
   148  		decodeHeader(buf[i:i+headerSize], hdr)
   149  		ss += fmt.Sprintf("\t\t%s\n", hdr)
   150  		dat := buf[i+headerSize : i+blockSize]
   151  		ss += fmt.Sprintf("\t\t%q\n", dat)
   152  	}
   153  	ss += fmt.Sprintf("\n---[ START HEXDUMP ]---\n%s\n---[ END HEXDUMP ]---\n", hex.Dump(buf))
   154  	return ss
   155  }