github.com/nacos-group/nacos-sdk-go@v1.1.4/clients/cache/concurrent_map.go (about)

     1  /*
     2  
     3  The MIT License (MIT)
     4  
     5  Copyright (c) 2014 streamrail
     6  
     7  Permission is hereby granted, free of charge, to any person obtaining a copy
     8  of this software and associated documentation files (the "Software"), to deal
     9  in the Software without restriction, including without limitation the rights
    10  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    11  copies of the Software, and to permit persons to whom the Software is
    12  furnished to do so, subject to the following conditions:
    13  
    14  The above copyright notice and this permission notice shall be included in all
    15  copies or substantial portions of the Software.
    16  
    17  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    18  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    19  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    20  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    21  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    22  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    23  SOFTWARE.
    24  
    25  */
    26  
    27  package cache
    28  
    29  import (
    30  	"encoding/json"
    31  	"sync"
    32  )
    33  
    34  var SHARD_COUNT = 32
    35  
    36  // A "thread" safe map of type string:Anything.
    37  // To avoid lock bottlenecks this map is dived to several (SHARD_COUNT) map shards.
    38  type ConcurrentMap []*ConcurrentMapShared
    39  
    40  // A "thread" safe string to anything map.
    41  type ConcurrentMapShared struct {
    42  	items        map[string]interface{}
    43  	sync.RWMutex // Read Write mutex, guards access to internal map.
    44  }
    45  
    46  // Creates a new concurrent map.
    47  func NewConcurrentMap() ConcurrentMap {
    48  	m := make(ConcurrentMap, SHARD_COUNT)
    49  	for i := 0; i < SHARD_COUNT; i++ {
    50  		m[i] = &ConcurrentMapShared{items: make(map[string]interface{})}
    51  	}
    52  	return m
    53  }
    54  
    55  // Returns shard under given key
    56  func (m ConcurrentMap) GetShard(key string) *ConcurrentMapShared {
    57  	return m[uint(fnv32(key))%uint(SHARD_COUNT)]
    58  }
    59  
    60  func (m ConcurrentMap) MSet(data map[string]interface{}) {
    61  	for key, value := range data {
    62  		shard := m.GetShard(key)
    63  		shard.Lock()
    64  		shard.items[key] = value
    65  		shard.Unlock()
    66  	}
    67  }
    68  
    69  // Sets the given value under the specified key.
    70  func (m ConcurrentMap) Set(key string, value interface{}) {
    71  	// Get map shard.
    72  	shard := m.GetShard(key)
    73  	shard.Lock()
    74  	shard.items[key] = value
    75  	shard.Unlock()
    76  }
    77  
    78  // Callback to return new element to be inserted into the map
    79  // It is called while lock is held, therefore it MUST NOT
    80  // try to access other keys in same map, as it can lead to deadlock since
    81  // Go sync.RWLock is not reentrant
    82  type UpsertCb func(exist bool, valueInMap interface{}, newValue interface{}) interface{}
    83  
    84  // Insert or Update - updates existing element or inserts a new one using UpsertCb
    85  func (m ConcurrentMap) Upsert(key string, value interface{}, cb UpsertCb) (res interface{}) {
    86  	shard := m.GetShard(key)
    87  	shard.Lock()
    88  	v, ok := shard.items[key]
    89  	res = cb(ok, v, value)
    90  	shard.items[key] = res
    91  	shard.Unlock()
    92  	return res
    93  }
    94  
    95  // Sets the given value under the specified key if no value was associated with it.
    96  func (m ConcurrentMap) SetIfAbsent(key string, value interface{}) bool {
    97  	// Get map shard.
    98  	shard := m.GetShard(key)
    99  	shard.Lock()
   100  	_, ok := shard.items[key]
   101  	if !ok {
   102  		shard.items[key] = value
   103  	}
   104  	shard.Unlock()
   105  	return !ok
   106  }
   107  
   108  // Retrieves an element from map under given key.
   109  func (m ConcurrentMap) Get(key string) (interface{}, bool) {
   110  	// Get shard
   111  	shard := m.GetShard(key)
   112  	shard.RLock()
   113  	// Get item from shard.
   114  	val, ok := shard.items[key]
   115  	shard.RUnlock()
   116  	return val, ok
   117  }
   118  
   119  // Returns the number of elements within the map.
   120  func (m ConcurrentMap) Count() int {
   121  	count := 0
   122  	for i := 0; i < SHARD_COUNT; i++ {
   123  		shard := m[i]
   124  		shard.RLock()
   125  		count += len(shard.items)
   126  		shard.RUnlock()
   127  	}
   128  	return count
   129  }
   130  
   131  // Looks up an item under specified key
   132  func (m ConcurrentMap) Has(key string) bool {
   133  	// Get shard
   134  	shard := m.GetShard(key)
   135  	shard.RLock()
   136  	// See if element is within shard.
   137  	_, ok := shard.items[key]
   138  	shard.RUnlock()
   139  	return ok
   140  }
   141  
   142  // Removes an element from the map.
   143  func (m ConcurrentMap) Remove(key string) {
   144  	// Try to get shard.
   145  	shard := m.GetShard(key)
   146  	shard.Lock()
   147  	delete(shard.items, key)
   148  	shard.Unlock()
   149  }
   150  
   151  // Removes an element from the map and returns it
   152  func (m ConcurrentMap) Pop(key string) (v interface{}, exists bool) {
   153  	// Try to get shard.
   154  	shard := m.GetShard(key)
   155  	shard.Lock()
   156  	v, exists = shard.items[key]
   157  	delete(shard.items, key)
   158  	shard.Unlock()
   159  	return v, exists
   160  }
   161  
   162  // Checks if map is empty.
   163  func (m ConcurrentMap) IsEmpty() bool {
   164  	return m.Count() == 0
   165  }
   166  
   167  // Used by the Iter & IterBuffered functions to wrap two variables together over a channel,
   168  type Tuple struct {
   169  	Key string
   170  	Val interface{}
   171  }
   172  
   173  // Returns an iterator which could be used in a for range loop.
   174  //
   175  // Deprecated: using IterBuffered() will get a better performence
   176  func (m ConcurrentMap) Iter() <-chan Tuple {
   177  	chans := snapshot(m)
   178  	ch := make(chan Tuple)
   179  	go fanIn(chans, ch)
   180  	return ch
   181  }
   182  
   183  // Returns a buffered iterator which could be used in a for range loop.
   184  func (m ConcurrentMap) IterBuffered() <-chan Tuple {
   185  	chans := snapshot(m)
   186  	total := 0
   187  	for _, c := range chans {
   188  		total += cap(c)
   189  	}
   190  	ch := make(chan Tuple, total)
   191  	go fanIn(chans, ch)
   192  	return ch
   193  }
   194  
   195  // Returns a array of channels that contains elements in each shard,
   196  // which likely takes a snapshot of `m`.
   197  // It returns once the size of each buffered channel is determined,
   198  // before all the channels are populated using goroutines.
   199  func snapshot(m ConcurrentMap) (chans []chan Tuple) {
   200  	chans = make([]chan Tuple, SHARD_COUNT)
   201  	wg := sync.WaitGroup{}
   202  	wg.Add(SHARD_COUNT)
   203  	// Foreach shard.
   204  	for index, shard := range m {
   205  		go func(index int, shard *ConcurrentMapShared) {
   206  			// Foreach key, value pair.
   207  			shard.RLock()
   208  			chans[index] = make(chan Tuple, len(shard.items))
   209  			wg.Done()
   210  			for key, val := range shard.items {
   211  				chans[index] <- Tuple{key, val}
   212  			}
   213  			shard.RUnlock()
   214  			close(chans[index])
   215  		}(index, shard)
   216  	}
   217  	wg.Wait()
   218  	return chans
   219  }
   220  
   221  // fanIn reads elements from channels `chans` into channel `out`
   222  func fanIn(chans []chan Tuple, out chan Tuple) {
   223  	wg := sync.WaitGroup{}
   224  	wg.Add(len(chans))
   225  	for _, ch := range chans {
   226  		go func(ch chan Tuple) {
   227  			for t := range ch {
   228  				out <- t
   229  			}
   230  			wg.Done()
   231  		}(ch)
   232  	}
   233  	wg.Wait()
   234  	close(out)
   235  }
   236  
   237  // Returns all items as map[string]interface{}
   238  func (m ConcurrentMap) Items() map[string]interface{} {
   239  	tmp := make(map[string]interface{})
   240  
   241  	// Insert items to temporary map.
   242  	for item := range m.IterBuffered() {
   243  		tmp[item.Key] = item.Val
   244  	}
   245  
   246  	return tmp
   247  }
   248  
   249  // Iterator callback,called for every key,value found in
   250  // maps. RLock is held for all calls for a given shard
   251  // therefore callback sess consistent view of a shard,
   252  // but not across the shards
   253  type IterCb func(key string, v interface{})
   254  
   255  // Callback based iterator, cheapest way to read
   256  // all elements in a map.
   257  func (m ConcurrentMap) IterCb(fn IterCb) {
   258  	for idx := range m {
   259  		shard := (m)[idx]
   260  		shard.RLock()
   261  		for key, value := range shard.items {
   262  			fn(key, value)
   263  		}
   264  		shard.RUnlock()
   265  	}
   266  }
   267  
   268  // Return all keys as []string
   269  func (m ConcurrentMap) Keys() []string {
   270  	count := m.Count()
   271  	ch := make(chan string, count)
   272  	go func() {
   273  		// Foreach shard.
   274  		wg := sync.WaitGroup{}
   275  		wg.Add(SHARD_COUNT)
   276  		for _, shard := range m {
   277  			go func(shard *ConcurrentMapShared) {
   278  				// Foreach key, value pair.
   279  				shard.RLock()
   280  				for key := range shard.items {
   281  					ch <- key
   282  				}
   283  				shard.RUnlock()
   284  				wg.Done()
   285  			}(shard)
   286  		}
   287  		wg.Wait()
   288  		close(ch)
   289  	}()
   290  
   291  	// Generate keys
   292  	keys := make([]string, 0, count)
   293  	for k := range ch {
   294  		keys = append(keys, k)
   295  	}
   296  	return keys
   297  }
   298  
   299  //Reviles ConcurrentMap "private" variables to json marshal.
   300  func (m ConcurrentMap) MarshalJSON() ([]byte, error) {
   301  	// Create a temporary map, which will hold all item spread across shards.
   302  	tmp := make(map[string]interface{})
   303  
   304  	// Insert items to temporary map.
   305  	for item := range m.IterBuffered() {
   306  		tmp[item.Key] = item.Val
   307  	}
   308  	return json.Marshal(tmp)
   309  }
   310  
   311  func fnv32(key string) uint32 {
   312  	hash := uint32(2166136261)
   313  	const prime32 = uint32(16777619)
   314  	for i := 0; i < len(key); i++ {
   315  		hash *= prime32
   316  		hash ^= uint32(key[i])
   317  	}
   318  	return hash
   319  }
   320  
   321  // Concurrent map uses Interface{} as its value, therefor JSON Unmarshal
   322  // will probably won't know which to type to unmarshal into, in such case
   323  // we'll end up with a value of type map[string]interface{}, In most cases this isn't
   324  // out value type, this is why we've decided to remove this functionality.
   325  
   326  // func (m *ConcurrentMap) UnmarshalJSON(b []byte) (err error) {
   327  // 	// Reverse process of Marshal.
   328  
   329  // 	tmp := make(map[string]interface{})
   330  
   331  // 	// Unmarshal into a single map.
   332  // 	if err := json.Unmarshal(b, &tmp); err != nil {
   333  // 		return nil
   334  // 	}
   335  
   336  // 	// foreach key,value pair in temporary map insert into our concurrent map.
   337  // 	for key, val := range tmp {
   338  // 		m.Set(key, val)
   339  // 	}
   340  // 	return nil
   341  // }