github.com/leslie-fei/fastcache@v0.0.0-20240520092641-b7a9eb05711f/cache.go (about)

     1  package fastcache
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"unsafe"
     7  
     8  	"github.com/leslie-fei/fastcache/gomemory"
     9  	"github.com/leslie-fei/fastcache/mmap"
    10  	"github.com/leslie-fei/fastcache/shm"
    11  )
    12  
    13  var (
    14  	ErrNoSpace            = errors.New("memory no space")
    15  	ErrMemorySizeTooSmall = errors.New("memory size too small")
    16  )
    17  
    18  const (
    19  	magic                 uint64 = 9259259527
    20  	PageSize                     = 64 * KB
    21  	perHashmapSlotLength         = 10
    22  	perHashmapElementSize        = 32
    23  )
    24  
    25  var (
    26  	sizeOfMetadata               = unsafe.Sizeof(Metadata{})
    27  	sizeOfHashmap                = unsafe.Sizeof(hashMap{})
    28  	sizeOfHashmapBucket          = unsafe.Sizeof(hashMapBucket{})
    29  	sizeOfHashmapBucketElement   = unsafe.Sizeof(hashMapBucketElement{})
    30  	sizeOfDataNode               = unsafe.Sizeof(DataNode{})
    31  	sizeOfBlockFreeListContainer = unsafe.Sizeof(lruAndFreeContainer{})
    32  )
    33  
    34  type Cache interface {
    35  	Get(key string) ([]byte, error)
    36  	Set(key string, value []byte) error
    37  	Del(key string) error
    38  	Len() uint64
    39  	Peek(key string) ([]byte, error)
    40  }
    41  
    42  func NewCache(size int, c *Config) (Cache, error) {
    43  	// init metadata
    44  	if size < 10*MB {
    45  		return nil, ErrMemorySizeTooSmall
    46  	}
    47  
    48  	config := DefaultConfig()
    49  	if c != nil {
    50  		if c.MemoryType > 0 {
    51  			config.MemoryType = c.MemoryType
    52  		}
    53  		if c.Shards > 0 {
    54  			config.Shards = c.Shards
    55  		}
    56  	}
    57  
    58  	var mem Memory
    59  	switch config.MemoryType {
    60  	case GO:
    61  		mem = gomemory.NewMemory(uint64(size))
    62  	case SHM:
    63  		if config.MemoryKey == "" {
    64  			return nil, errors.New("shm MemoryKey is required")
    65  		}
    66  		mem = shm.NewMemory(config.MemoryKey, uint64(size), true)
    67  	case MMAP:
    68  		if config.MemoryKey == "" {
    69  			return nil, errors.New("mmap MemoryKey is required")
    70  		}
    71  		mem = mmap.NewMemory(config.MemoryKey, uint64(size))
    72  	default:
    73  		return nil, fmt.Errorf("MemoryType: %d not support", config.MemoryType)
    74  	}
    75  
    76  	if err := mem.Attach(); err != nil {
    77  		return nil, err
    78  	}
    79  
    80  	/**
    81  	1. metadata 记录内存基本信息
    82  	2. blockFreeContainer中包含1~16M的内存对象分配池
    83  	3. 一切数据分配都通过blockFreeContainer中的FreeList来分配, 目前一共有25个FreeList, 从1~16M之间数据块分配
    84  	4. FreeList中保存的是可用数据块, 数据块DataNode是一个单向链表
    85  	5. 除了metadata以外的数据通过FreeList中取出DataNode进行转换, 涉及的对象都是定长对象, 简单高效
    86  	6. 当触发空间不足需要淘汰时, 判断Set时的数据需要大小, 去对应的FreeList的LRU list中淘汰最早的用来补充这次Set
    87  	7. cache分块, 通过shared对象, 每个shared对象包含hashmap locker etc
    88  	8. 保证进程线程安全, 通过processLocker or threadLocker, 当时候GO内存时初始化threadLocker, 当内存是共享内存模型时就初始化processLocker
    89  	9. processLocker就是在共享内存中通过atomic通过CAS进行, 如果CAS失败大量并发情况下退化到file lock, 保证不会死循环占用过多CPU
    90  	*/
    91  	// Question: 当内存不足的时候怎么进行淘汰, 一个Set对象需要包含, key DataNode, val DataNode, bucketElement{key val} DataNode
    92  	// 都通过val的长度来, 如果申请key 1KB空间不足时, 就去淘汰一个val 1KB的节点数据
    93  	metadata := (*Metadata)(mem.Ptr())
    94  	// TODO global locker init
    95  	metadata.GlobalLocker = &threadLocker{}
    96  	metadata.GlobalLocker.Lock()
    97  	defer metadata.GlobalLocker.Unlock()
    98  	// new globalAllocator
    99  	ga := &globalAllocator{
   100  		mem:      mem,
   101  		metadata: metadata,
   102  	}
   103  	// if magic not equals or memory data size changed should init memory
   104  	reinitialize := metadata.Magic != magic || metadata.TotalSize != mem.Size()
   105  	if reinitialize {
   106  		metadata.Reset()
   107  		metadata.Used = uint64(sizeOfMetadata)
   108  		metadata.Magic = magic
   109  		metadata.TotalSize = mem.Size()
   110  		metadata.Shards = config.Shards
   111  		// TODO shards memory allocator
   112  	}
   113  	// TODO shard and shard lockers
   114  	// TODO 可以初始化的时候分配一个最小的bigFreeContainer, 和每个shard下的freeContainer, 确保可以保证最小的数据量进行分配淘汰
   115  	bigFreePtr, _, err := ga.Alloc(uint64(sizeOfBlockFreeListContainer))
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	bigFreeContainer := (*lruAndFreeContainer)(bigFreePtr)
   120  	bigFreeContainer.Init(ga.Base())
   121  	shards := make([]*shard, metadata.Shards)
   122  	for i := 0; i < len(shards); i++ {
   123  		// locker
   124  		locker := &threadLocker{}
   125  
   126  		// hashmap
   127  		bucketLen := 1024
   128  		bucketSize := uint64(bucketLen) * uint64(sizeOfHashmapBucket)
   129  		hashPtr, _, err := ga.Alloc(uint64(sizeOfHashmap) + bucketSize)
   130  		if err != nil {
   131  			return nil, err
   132  		}
   133  		hashmap := (*hashMap)(hashPtr)
   134  		hashmap.bucketLen = uint32(bucketLen)
   135  
   136  		// free block list
   137  		freePtr, _, err := ga.Alloc(uint64(sizeOfBlockFreeListContainer))
   138  		if err != nil {
   139  			return nil, err
   140  		}
   141  
   142  		freeContainer := (*lruAndFreeContainer)(freePtr)
   143  		freeContainer.Init(ga.Base())
   144  		allocator := &shardAllocator{
   145  			global:   ga,
   146  			growSize: MB,
   147  		}
   148  
   149  		shr := &shard{
   150  			locker:          locker,
   151  			hashmap:         hashmap,
   152  			container:       freeContainer,
   153  			allocator:       allocator,
   154  			globalAllocator: ga,
   155  			bigContainer:    bigFreeContainer,
   156  		}
   157  
   158  		shards[i] = shr
   159  	}
   160  
   161  	return &cache{metadata: metadata, shards: shards}, nil
   162  }
   163  
   164  type cache struct {
   165  	metadata *Metadata
   166  	shards   []*shard
   167  	len      uint64
   168  }
   169  
   170  func (l *cache) Get(key string) ([]byte, error) {
   171  	hash := xxHashString(key)
   172  	shr := l.shard(hash)
   173  	return shr.Get(hash, key)
   174  }
   175  
   176  func (l *cache) Peek(key string) ([]byte, error) {
   177  	hash := xxHashString(key)
   178  	shr := l.shard(hash)
   179  	return shr.Peek(hash, key)
   180  }
   181  
   182  func (l *cache) Set(key string, value []byte) error {
   183  	if len(key) > 16*KB {
   184  		return ErrKeyTooLarge
   185  	}
   186  
   187  	if len(value) > 16*MB {
   188  		return ErrValueTooLarge
   189  	}
   190  
   191  	hash := xxHashString(key)
   192  	shr := l.shard(hash)
   193  
   194  	err := shr.Set(hash, key, value)
   195  	if err != nil {
   196  		return err
   197  	}
   198  	return nil
   199  }
   200  
   201  func (l *cache) Del(key string) error {
   202  	hash := xxHashString(key)
   203  	shr := l.shard(hash)
   204  	return shr.Del(hash, key)
   205  }
   206  
   207  func (l *cache) Len() uint64 {
   208  	return l.len
   209  }
   210  
   211  func (l *cache) shard(hash uint64) *shard {
   212  	idx := hash % uint64(len(l.shards))
   213  	return l.shards[idx]
   214  }