github.com/bir3/gocompiler@v0.9.2202/extra/compress/huff0/compress.go (about) 1 package huff0 2 3 import ( 4 "fmt" 5 "math" 6 "runtime" 7 "sync" 8 ) 9 10 // Compress1X will compress the input. 11 // The output can be decoded using Decompress1X. 12 // Supply a Scratch object. The scratch object contains state about re-use, 13 // So when sharing across independent encodes, be sure to set the re-use policy. 14 func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { 15 s, err = s.prepare(in) 16 if err != nil { 17 return nil, false, err 18 } 19 return compress(in, s, s.compress1X) 20 } 21 22 // Compress4X will compress the input. The input is split into 4 independent blocks 23 // and compressed similar to Compress1X. 24 // The output can be decoded using Decompress4X. 25 // Supply a Scratch object. The scratch object contains state about re-use, 26 // So when sharing across independent encodes, be sure to set the re-use policy. 27 func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { 28 s, err = s.prepare(in) 29 if err != nil { 30 return nil, false, err 31 } 32 if false { 33 // TODO: compress4Xp only slightly faster. 34 const parallelThreshold = 8 << 10 35 if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { 36 return compress(in, s, s.compress4X) 37 } 38 return compress(in, s, s.compress4Xp) 39 } 40 return compress(in, s, s.compress4X) 41 } 42 43 func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { 44 // Nuke previous table if we cannot reuse anyway. 45 if s.Reuse == ReusePolicyNone { 46 s.prevTable = s.prevTable[:0] 47 } 48 49 // Create histogram, if none was provided. 50 maxCount := s.maxCount 51 var canReuse = false 52 if maxCount == 0 { 53 maxCount, canReuse = s.countSimple(in) 54 } else { 55 canReuse = s.canUseTable(s.prevTable) 56 } 57 58 // We want the output size to be less than this: 59 wantSize := len(in) 60 if s.WantLogLess > 0 { 61 wantSize -= wantSize >> s.WantLogLess 62 } 63 64 // Reset for next run. 65 s.clearCount = true 66 s.maxCount = 0 67 if maxCount >= len(in) { 68 if maxCount > len(in) { 69 return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) 70 } 71 if len(in) == 1 { 72 return nil, false, ErrIncompressible 73 } 74 // One symbol, use RLE 75 return nil, false, ErrUseRLE 76 } 77 if maxCount == 1 || maxCount < (len(in)>>7) { 78 // Each symbol present maximum once or too well distributed. 79 return nil, false, ErrIncompressible 80 } 81 if s.Reuse == ReusePolicyMust && !canReuse { 82 // We must reuse, but we can't. 83 return nil, false, ErrIncompressible 84 } 85 if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { 86 keepTable := s.cTable 87 keepTL := s.actualTableLog 88 s.cTable = s.prevTable 89 s.actualTableLog = s.prevTableLog 90 s.Out, err = compressor(in) 91 s.cTable = keepTable 92 s.actualTableLog = keepTL 93 if err == nil && len(s.Out) < wantSize { 94 s.OutData = s.Out 95 return s.Out, true, nil 96 } 97 if s.Reuse == ReusePolicyMust { 98 return nil, false, ErrIncompressible 99 } 100 // Do not attempt to re-use later. 101 s.prevTable = s.prevTable[:0] 102 } 103 104 // Calculate new table. 105 err = s.buildCTable() 106 if err != nil { 107 return nil, false, err 108 } 109 110 if false && !s.canUseTable(s.cTable) { 111 panic("invalid table generated") 112 } 113 114 if s.Reuse == ReusePolicyAllow && canReuse { 115 hSize := len(s.Out) 116 oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) 117 newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) 118 if oldSize <= hSize+newSize || hSize+12 >= wantSize { 119 // Retain cTable even if we re-use. 120 keepTable := s.cTable 121 keepTL := s.actualTableLog 122 123 s.cTable = s.prevTable 124 s.actualTableLog = s.prevTableLog 125 s.Out, err = compressor(in) 126 127 // Restore ctable. 128 s.cTable = keepTable 129 s.actualTableLog = keepTL 130 if err != nil { 131 return nil, false, err 132 } 133 if len(s.Out) >= wantSize { 134 return nil, false, ErrIncompressible 135 } 136 s.OutData = s.Out 137 return s.Out, true, nil 138 } 139 } 140 141 // Use new table 142 err = s.cTable.write(s) 143 if err != nil { 144 s.OutTable = nil 145 return nil, false, err 146 } 147 s.OutTable = s.Out 148 149 // Compress using new table 150 s.Out, err = compressor(in) 151 if err != nil { 152 s.OutTable = nil 153 return nil, false, err 154 } 155 if len(s.Out) >= wantSize { 156 s.OutTable = nil 157 return nil, false, ErrIncompressible 158 } 159 // Move current table into previous. 160 s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] 161 s.OutData = s.Out[len(s.OutTable):] 162 return s.Out, false, nil 163 } 164 165 // EstimateSizes will estimate the data sizes 166 func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { 167 s, err = s.prepare(in) 168 if err != nil { 169 return 0, 0, 0, err 170 } 171 172 // Create histogram, if none was provided. 173 tableSz, dataSz, reuseSz = -1, -1, -1 174 maxCount := s.maxCount 175 var canReuse = false 176 if maxCount == 0 { 177 maxCount, canReuse = s.countSimple(in) 178 } else { 179 canReuse = s.canUseTable(s.prevTable) 180 } 181 182 // We want the output size to be less than this: 183 wantSize := len(in) 184 if s.WantLogLess > 0 { 185 wantSize -= wantSize >> s.WantLogLess 186 } 187 188 // Reset for next run. 189 s.clearCount = true 190 s.maxCount = 0 191 if maxCount >= len(in) { 192 if maxCount > len(in) { 193 return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) 194 } 195 if len(in) == 1 { 196 return 0, 0, 0, ErrIncompressible 197 } 198 // One symbol, use RLE 199 return 0, 0, 0, ErrUseRLE 200 } 201 if maxCount == 1 || maxCount < (len(in)>>7) { 202 // Each symbol present maximum once or too well distributed. 203 return 0, 0, 0, ErrIncompressible 204 } 205 206 // Calculate new table. 207 err = s.buildCTable() 208 if err != nil { 209 return 0, 0, 0, err 210 } 211 212 if false && !s.canUseTable(s.cTable) { 213 panic("invalid table generated") 214 } 215 216 tableSz, err = s.cTable.estTableSize(s) 217 if err != nil { 218 return 0, 0, 0, err 219 } 220 if canReuse { 221 reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) 222 } 223 dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) 224 225 // Restore 226 return tableSz, dataSz, reuseSz, nil 227 } 228 229 func (s *Scratch) compress1X(src []byte) ([]byte, error) { 230 return s.compress1xDo(s.Out, src) 231 } 232 233 func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { 234 var bw = bitWriter{out: dst} 235 236 // N is length divisible by 4. 237 n := len(src) 238 n -= n & 3 239 cTable := s.cTable[:256] 240 241 // Encode last bytes. 242 for i := len(src) & 3; i > 0; i-- { 243 bw.encSymbol(cTable, src[n+i-1]) 244 } 245 n -= 4 246 if s.actualTableLog <= 8 { 247 for ; n >= 0; n -= 4 { 248 tmp := src[n : n+4] 249 // tmp should be len 4 250 bw.flush32() 251 bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) 252 } 253 } else { 254 for ; n >= 0; n -= 4 { 255 tmp := src[n : n+4] 256 // tmp should be len 4 257 bw.flush32() 258 bw.encTwoSymbols(cTable, tmp[3], tmp[2]) 259 bw.flush32() 260 bw.encTwoSymbols(cTable, tmp[1], tmp[0]) 261 } 262 } 263 err := bw.close() 264 return bw.out, err 265 } 266 267 var sixZeros [6]byte 268 269 func (s *Scratch) compress4X(src []byte) ([]byte, error) { 270 if len(src) < 12 { 271 return nil, ErrIncompressible 272 } 273 segmentSize := (len(src) + 3) / 4 274 275 // Add placeholder for output length 276 offsetIdx := len(s.Out) 277 s.Out = append(s.Out, sixZeros[:]...) 278 279 for i := 0; i < 4; i++ { 280 toDo := src 281 if len(toDo) > segmentSize { 282 toDo = toDo[:segmentSize] 283 } 284 src = src[len(toDo):] 285 286 var err error 287 idx := len(s.Out) 288 s.Out, err = s.compress1xDo(s.Out, toDo) 289 if err != nil { 290 return nil, err 291 } 292 if len(s.Out)-idx > math.MaxUint16 { 293 // We cannot store the size in the jump table 294 return nil, ErrIncompressible 295 } 296 // Write compressed length as little endian before block. 297 if i < 3 { 298 // Last length is not written. 299 length := len(s.Out) - idx 300 s.Out[i*2+offsetIdx] = byte(length) 301 s.Out[i*2+offsetIdx+1] = byte(length >> 8) 302 } 303 } 304 305 return s.Out, nil 306 } 307 308 // compress4Xp will compress 4 streams using separate goroutines. 309 func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { 310 if len(src) < 12 { 311 return nil, ErrIncompressible 312 } 313 // Add placeholder for output length 314 s.Out = s.Out[:6] 315 316 segmentSize := (len(src) + 3) / 4 317 var wg sync.WaitGroup 318 var errs [4]error 319 wg.Add(4) 320 for i := 0; i < 4; i++ { 321 toDo := src 322 if len(toDo) > segmentSize { 323 toDo = toDo[:segmentSize] 324 } 325 src = src[len(toDo):] 326 327 // Separate goroutine for each block. 328 go func(i int) { 329 s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) 330 wg.Done() 331 }(i) 332 } 333 wg.Wait() 334 for i := 0; i < 4; i++ { 335 if errs[i] != nil { 336 return nil, errs[i] 337 } 338 o := s.tmpOut[i] 339 if len(o) > math.MaxUint16 { 340 // We cannot store the size in the jump table 341 return nil, ErrIncompressible 342 } 343 // Write compressed length as little endian before block. 344 if i < 3 { 345 // Last length is not written. 346 s.Out[i*2] = byte(len(o)) 347 s.Out[i*2+1] = byte(len(o) >> 8) 348 } 349 350 // Write output. 351 s.Out = append(s.Out, o...) 352 } 353 return s.Out, nil 354 } 355 356 // countSimple will create a simple histogram in s.count. 357 // Returns the biggest count. 358 // Does not update s.clearCount. 359 func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { 360 reuse = true 361 for _, v := range in { 362 s.count[v]++ 363 } 364 m := uint32(0) 365 if len(s.prevTable) > 0 { 366 for i, v := range s.count[:] { 367 if v == 0 { 368 continue 369 } 370 if v > m { 371 m = v 372 } 373 s.symbolLen = uint16(i) + 1 374 if i >= len(s.prevTable) { 375 reuse = false 376 } else if s.prevTable[i].nBits == 0 { 377 reuse = false 378 } 379 } 380 return int(m), reuse 381 } 382 for i, v := range s.count[:] { 383 if v == 0 { 384 continue 385 } 386 if v > m { 387 m = v 388 } 389 s.symbolLen = uint16(i) + 1 390 } 391 return int(m), false 392 } 393 394 func (s *Scratch) canUseTable(c cTable) bool { 395 if len(c) < int(s.symbolLen) { 396 return false 397 } 398 for i, v := range s.count[:s.symbolLen] { 399 if v != 0 && c[i].nBits == 0 { 400 return false 401 } 402 } 403 return true 404 } 405 406 //lint:ignore U1000 used for debugging 407 func (s *Scratch) validateTable(c cTable) bool { 408 if len(c) < int(s.symbolLen) { 409 return false 410 } 411 for i, v := range s.count[:s.symbolLen] { 412 if v != 0 { 413 if c[i].nBits == 0 { 414 return false 415 } 416 if c[i].nBits > s.actualTableLog { 417 return false 418 } 419 } 420 } 421 return true 422 } 423 424 // minTableLog provides the minimum logSize to safely represent a distribution. 425 func (s *Scratch) minTableLog() uint8 { 426 minBitsSrc := highBit32(uint32(s.br.remain())) + 1 427 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 428 if minBitsSrc < minBitsSymbols { 429 return uint8(minBitsSrc) 430 } 431 return uint8(minBitsSymbols) 432 } 433 434 // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog 435 func (s *Scratch) optimalTableLog() { 436 tableLog := s.TableLog 437 minBits := s.minTableLog() 438 maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 439 if maxBitsSrc < tableLog { 440 // Accuracy can be reduced 441 tableLog = maxBitsSrc 442 } 443 if minBits > tableLog { 444 tableLog = minBits 445 } 446 // Need a minimum to safely represent all symbol values 447 if tableLog < minTablelog { 448 tableLog = minTablelog 449 } 450 if tableLog > tableLogMax { 451 tableLog = tableLogMax 452 } 453 s.actualTableLog = tableLog 454 } 455 456 type cTableEntry struct { 457 val uint16 458 nBits uint8 459 // We have 8 bits extra 460 } 461 462 const huffNodesMask = huffNodesLen - 1 463 464 func (s *Scratch) buildCTable() error { 465 s.optimalTableLog() 466 s.huffSort() 467 if cap(s.cTable) < maxSymbolValue+1 { 468 s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) 469 } else { 470 s.cTable = s.cTable[:s.symbolLen] 471 for i := range s.cTable { 472 s.cTable[i] = cTableEntry{} 473 } 474 } 475 476 var startNode = int16(s.symbolLen) 477 nonNullRank := s.symbolLen - 1 478 479 nodeNb := startNode 480 huffNode := s.nodes[1 : huffNodesLen+1] 481 482 // This overlays the slice above, but allows "-1" index lookups. 483 // Different from reference implementation. 484 huffNode0 := s.nodes[0 : huffNodesLen+1] 485 486 for huffNode[nonNullRank].count() == 0 { 487 nonNullRank-- 488 } 489 490 lowS := int16(nonNullRank) 491 nodeRoot := nodeNb + lowS - 1 492 lowN := nodeNb 493 huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) 494 huffNode[lowS].setParent(nodeNb) 495 huffNode[lowS-1].setParent(nodeNb) 496 nodeNb++ 497 lowS -= 2 498 for n := nodeNb; n <= nodeRoot; n++ { 499 huffNode[n].setCount(1 << 30) 500 } 501 // fake entry, strong barrier 502 huffNode0[0].setCount(1 << 31) 503 504 // create parents 505 for nodeNb <= nodeRoot { 506 var n1, n2 int16 507 if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { 508 n1 = lowS 509 lowS-- 510 } else { 511 n1 = lowN 512 lowN++ 513 } 514 if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { 515 n2 = lowS 516 lowS-- 517 } else { 518 n2 = lowN 519 lowN++ 520 } 521 522 huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) 523 huffNode0[n1+1].setParent(nodeNb) 524 huffNode0[n2+1].setParent(nodeNb) 525 nodeNb++ 526 } 527 528 // distribute weights (unlimited tree height) 529 huffNode[nodeRoot].setNbBits(0) 530 for n := nodeRoot - 1; n >= startNode; n-- { 531 huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) 532 } 533 for n := uint16(0); n <= nonNullRank; n++ { 534 huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) 535 } 536 s.actualTableLog = s.setMaxHeight(int(nonNullRank)) 537 maxNbBits := s.actualTableLog 538 539 // fill result into tree (val, nbBits) 540 if maxNbBits > tableLogMax { 541 return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) 542 } 543 var nbPerRank [tableLogMax + 1]uint16 544 var valPerRank [16]uint16 545 for _, v := range huffNode[:nonNullRank+1] { 546 nbPerRank[v.nbBits()]++ 547 } 548 // determine stating value per rank 549 { 550 min := uint16(0) 551 for n := maxNbBits; n > 0; n-- { 552 // get starting value within each rank 553 valPerRank[n] = min 554 min += nbPerRank[n] 555 min >>= 1 556 } 557 } 558 559 // push nbBits per symbol, symbol order 560 for _, v := range huffNode[:nonNullRank+1] { 561 s.cTable[v.symbol()].nBits = v.nbBits() 562 } 563 564 // assign value within rank, symbol order 565 t := s.cTable[:s.symbolLen] 566 for n, val := range t { 567 nbits := val.nBits & 15 568 v := valPerRank[nbits] 569 t[n].val = v 570 valPerRank[nbits] = v + 1 571 } 572 573 return nil 574 } 575 576 // huffSort will sort symbols, decreasing order. 577 func (s *Scratch) huffSort() { 578 type rankPos struct { 579 base uint32 580 current uint32 581 } 582 583 // Clear nodes 584 nodes := s.nodes[:huffNodesLen+1] 585 s.nodes = nodes 586 nodes = nodes[1 : huffNodesLen+1] 587 588 // Sort into buckets based on length of symbol count. 589 var rank [32]rankPos 590 for _, v := range s.count[:s.symbolLen] { 591 r := highBit32(v+1) & 31 592 rank[r].base++ 593 } 594 // maxBitLength is log2(BlockSizeMax) + 1 595 const maxBitLength = 18 + 1 596 for n := maxBitLength; n > 0; n-- { 597 rank[n-1].base += rank[n].base 598 } 599 for n := range rank[:maxBitLength] { 600 rank[n].current = rank[n].base 601 } 602 for n, c := range s.count[:s.symbolLen] { 603 r := (highBit32(c+1) + 1) & 31 604 pos := rank[r].current 605 rank[r].current++ 606 prev := nodes[(pos-1)&huffNodesMask] 607 for pos > rank[r].base && c > prev.count() { 608 nodes[pos&huffNodesMask] = prev 609 pos-- 610 prev = nodes[(pos-1)&huffNodesMask] 611 } 612 nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) 613 } 614 } 615 616 func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { 617 maxNbBits := s.actualTableLog 618 huffNode := s.nodes[1 : huffNodesLen+1] 619 //huffNode = huffNode[: huffNodesLen] 620 621 largestBits := huffNode[lastNonNull].nbBits() 622 623 // early exit : no elt > maxNbBits 624 if largestBits <= maxNbBits { 625 return largestBits 626 } 627 totalCost := int(0) 628 baseCost := int(1) << (largestBits - maxNbBits) 629 n := uint32(lastNonNull) 630 631 for huffNode[n].nbBits() > maxNbBits { 632 totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) 633 huffNode[n].setNbBits(maxNbBits) 634 n-- 635 } 636 // n stops at huffNode[n].nbBits <= maxNbBits 637 638 for huffNode[n].nbBits() == maxNbBits { 639 n-- 640 } 641 // n end at index of smallest symbol using < maxNbBits 642 643 // renorm totalCost 644 totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ 645 646 // repay normalized cost 647 { 648 const noSymbol = 0xF0F0F0F0 649 var rankLast [tableLogMax + 2]uint32 650 651 for i := range rankLast[:] { 652 rankLast[i] = noSymbol 653 } 654 655 // Get pos of last (smallest) symbol per rank 656 { 657 currentNbBits := maxNbBits 658 for pos := int(n); pos >= 0; pos-- { 659 if huffNode[pos].nbBits() >= currentNbBits { 660 continue 661 } 662 currentNbBits = huffNode[pos].nbBits() // < maxNbBits 663 rankLast[maxNbBits-currentNbBits] = uint32(pos) 664 } 665 } 666 667 for totalCost > 0 { 668 nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 669 670 for ; nBitsToDecrease > 1; nBitsToDecrease-- { 671 highPos := rankLast[nBitsToDecrease] 672 lowPos := rankLast[nBitsToDecrease-1] 673 if highPos == noSymbol { 674 continue 675 } 676 if lowPos == noSymbol { 677 break 678 } 679 highTotal := huffNode[highPos].count() 680 lowTotal := 2 * huffNode[lowPos].count() 681 if highTotal <= lowTotal { 682 break 683 } 684 } 685 // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) 686 // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary 687 // FIXME: try to remove 688 for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { 689 nBitsToDecrease++ 690 } 691 totalCost -= 1 << (nBitsToDecrease - 1) 692 if rankLast[nBitsToDecrease-1] == noSymbol { 693 // this rank is no longer empty 694 rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] 695 } 696 huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + 697 huffNode[rankLast[nBitsToDecrease]].nbBits()) 698 if rankLast[nBitsToDecrease] == 0 { 699 /* special case, reached largest symbol */ 700 rankLast[nBitsToDecrease] = noSymbol 701 } else { 702 rankLast[nBitsToDecrease]-- 703 if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { 704 rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ 705 } 706 } 707 } 708 709 for totalCost < 0 { /* Sometimes, cost correction overshoot */ 710 if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ 711 for huffNode[n].nbBits() == maxNbBits { 712 n-- 713 } 714 huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) 715 rankLast[1] = n + 1 716 totalCost++ 717 continue 718 } 719 huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) 720 rankLast[1]++ 721 totalCost++ 722 } 723 } 724 return maxNbBits 725 } 726 727 // A nodeElt is the fields 728 // 729 // count uint32 730 // parent uint16 731 // symbol byte 732 // nbBits uint8 733 // 734 // in some order, all squashed into an integer so that the compiler 735 // always loads and stores entire nodeElts instead of separate fields. 736 type nodeElt uint64 737 738 func makeNodeElt(count uint32, symbol byte) nodeElt { 739 return nodeElt(count) | nodeElt(symbol)<<48 740 } 741 742 func (e *nodeElt) count() uint32 { return uint32(*e) } 743 func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } 744 func (e *nodeElt) symbol() byte { return byte(*e >> 48) } 745 func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } 746 747 func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } 748 func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } 749 func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }