github.com/AntonOrnatskyi/goproxy@v0.0.0-20190205095733-4526a9fa18b4/core/lib/mapx/map.go (about)

     1  package mapx
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"runtime/debug"
     7  	"sync"
     8  )
     9  
    10  var SHARD_COUNT = 32
    11  
    12  // A "thread" safe map of type string:Anything.
    13  // To avoid lock bottlenecks this map is dived to several (SHARD_COUNT) map shards.
    14  type ConcurrentMap []*ConcurrentMapShared
    15  
    16  // A "thread" safe string to anything map.
    17  type ConcurrentMapShared struct {
    18  	items        map[string]interface{}
    19  	sync.RWMutex // Read Write mutex, guards access to internal map.
    20  }
    21  
    22  // Creates a new concurrent map.
    23  func NewConcurrentMap() ConcurrentMap {
    24  	m := make(ConcurrentMap, SHARD_COUNT)
    25  	for i := 0; i < SHARD_COUNT; i++ {
    26  		m[i] = &ConcurrentMapShared{items: make(map[string]interface{})}
    27  	}
    28  	return m
    29  }
    30  
    31  // Returns shard under given key
    32  func (m ConcurrentMap) GetShard(key string) *ConcurrentMapShared {
    33  	return m[uint(fnv32(key))%uint(SHARD_COUNT)]
    34  }
    35  
    36  func (m ConcurrentMap) MSet(data map[string]interface{}) {
    37  	for key, value := range data {
    38  		shard := m.GetShard(key)
    39  		shard.Lock()
    40  		shard.items[key] = value
    41  		shard.Unlock()
    42  	}
    43  }
    44  
    45  // Sets the given value under the specified key.
    46  func (m ConcurrentMap) Set(key string, value interface{}) {
    47  	// Get map shard.
    48  	shard := m.GetShard(key)
    49  	shard.Lock()
    50  	shard.items[key] = value
    51  	shard.Unlock()
    52  }
    53  
    54  // Callback to return new element to be inserted into the map
    55  // It is called while lock is held, therefore it MUST NOT
    56  // try to access other keys in same map, as it can lead to deadlock since
    57  // Go sync.RWLock is not reentrant
    58  type UpsertCb func(exist bool, valueInMap interface{}, newValue interface{}) interface{}
    59  
    60  // Insert or Update - updates existing element or inserts a new one using UpsertCb
    61  func (m ConcurrentMap) Upsert(key string, value interface{}, cb UpsertCb) (res interface{}) {
    62  	shard := m.GetShard(key)
    63  	shard.Lock()
    64  	v, ok := shard.items[key]
    65  	res = cb(ok, v, value)
    66  	shard.items[key] = res
    67  	shard.Unlock()
    68  	return res
    69  }
    70  
    71  // Sets the given value under the specified key if no value was associated with it.
    72  func (m ConcurrentMap) SetIfAbsent(key string, value interface{}) bool {
    73  	// Get map shard.
    74  	shard := m.GetShard(key)
    75  	shard.Lock()
    76  	_, ok := shard.items[key]
    77  	if !ok {
    78  		shard.items[key] = value
    79  	}
    80  	shard.Unlock()
    81  	return !ok
    82  }
    83  
    84  // Retrieves an element from map under given key.
    85  func (m ConcurrentMap) Get(key string) (interface{}, bool) {
    86  	// Get shard
    87  	shard := m.GetShard(key)
    88  	shard.RLock()
    89  	// Get item from shard.
    90  	val, ok := shard.items[key]
    91  	shard.RUnlock()
    92  	return val, ok
    93  }
    94  
    95  // Returns the number of elements within the map.
    96  func (m ConcurrentMap) Count() int {
    97  	count := 0
    98  	for i := 0; i < SHARD_COUNT; i++ {
    99  		shard := m[i]
   100  		shard.RLock()
   101  		count += len(shard.items)
   102  		shard.RUnlock()
   103  	}
   104  	return count
   105  }
   106  
   107  // Looks up an item under specified key
   108  func (m ConcurrentMap) Has(key string) bool {
   109  	// Get shard
   110  	shard := m.GetShard(key)
   111  	shard.RLock()
   112  	// See if element is within shard.
   113  	_, ok := shard.items[key]
   114  	shard.RUnlock()
   115  	return ok
   116  }
   117  
   118  // Removes an element from the map.
   119  func (m ConcurrentMap) Remove(key string) {
   120  	// Try to get shard.
   121  	shard := m.GetShard(key)
   122  	shard.Lock()
   123  	delete(shard.items, key)
   124  	shard.Unlock()
   125  }
   126  
   127  // Removes an element from the map and returns it
   128  func (m ConcurrentMap) Pop(key string) (v interface{}, exists bool) {
   129  	// Try to get shard.
   130  	shard := m.GetShard(key)
   131  	shard.Lock()
   132  	v, exists = shard.items[key]
   133  	delete(shard.items, key)
   134  	shard.Unlock()
   135  	return v, exists
   136  }
   137  
   138  // Checks if map is empty.
   139  func (m ConcurrentMap) IsEmpty() bool {
   140  	return m.Count() == 0
   141  }
   142  
   143  // Used by the Iter & IterBuffered functions to wrap two variables together over a channel,
   144  type Tuple struct {
   145  	Key string
   146  	Val interface{}
   147  }
   148  
   149  // Returns an iterator which could be used in a for range loop.
   150  //
   151  // Deprecated: using IterBuffered() will get a better performence
   152  func (m ConcurrentMap) Iter() <-chan Tuple {
   153  	chans := snapshot(m)
   154  	ch := make(chan Tuple)
   155  	go func() {
   156  		defer func() {
   157  			if e := recover(); e != nil {
   158  				fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   159  			}
   160  		}()
   161  		fanIn(chans, ch)
   162  	}()
   163  	return ch
   164  }
   165  
   166  // Returns a buffered iterator which could be used in a for range loop.
   167  func (m ConcurrentMap) IterBuffered() <-chan Tuple {
   168  	chans := snapshot(m)
   169  	total := 0
   170  	for _, c := range chans {
   171  		total += cap(c)
   172  	}
   173  	ch := make(chan Tuple, total)
   174  	go func() {
   175  		defer func() {
   176  			if e := recover(); e != nil {
   177  				fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   178  			}
   179  		}()
   180  		fanIn(chans, ch)
   181  	}()
   182  	return ch
   183  }
   184  
   185  // Returns a array of channels that contains elements in each shard,
   186  // which likely takes a snapshot of `m`.
   187  // It returns once the size of each buffered channel is determined,
   188  // before all the channels are populated using goroutines.
   189  func snapshot(m ConcurrentMap) (chans []chan Tuple) {
   190  	chans = make([]chan Tuple, SHARD_COUNT)
   191  	wg := sync.WaitGroup{}
   192  	wg.Add(SHARD_COUNT)
   193  	// Foreach shard.
   194  	for index, shard := range m {
   195  		go func(index int, shard *ConcurrentMapShared) {
   196  			defer func() {
   197  				if e := recover(); e != nil {
   198  					fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   199  				}
   200  			}()
   201  			// Foreach key, value pair.
   202  			shard.RLock()
   203  			chans[index] = make(chan Tuple, len(shard.items))
   204  			wg.Done()
   205  			for key, val := range shard.items {
   206  				chans[index] <- Tuple{key, val}
   207  			}
   208  			shard.RUnlock()
   209  			close(chans[index])
   210  		}(index, shard)
   211  	}
   212  	wg.Wait()
   213  	return chans
   214  }
   215  
   216  // fanIn reads elements from channels `chans` into channel `out`
   217  func fanIn(chans []chan Tuple, out chan Tuple) {
   218  	wg := sync.WaitGroup{}
   219  	wg.Add(len(chans))
   220  	for _, ch := range chans {
   221  		go func() {
   222  			defer func() {
   223  				if e := recover(); e != nil {
   224  					fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   225  				}
   226  			}()
   227  			func(ch chan Tuple) {
   228  				for t := range ch {
   229  					out <- t
   230  				}
   231  				wg.Done()
   232  			}(ch)
   233  		}()
   234  	}
   235  	wg.Wait()
   236  	close(out)
   237  }
   238  
   239  // Returns all items as map[string]interface{}
   240  func (m ConcurrentMap) Items() map[string]interface{} {
   241  	tmp := make(map[string]interface{})
   242  
   243  	// Insert items to temporary map.
   244  	for item := range m.IterBuffered() {
   245  		tmp[item.Key] = item.Val
   246  	}
   247  
   248  	return tmp
   249  }
   250  
   251  // Iterator callback,called for every key,value found in
   252  // maps. RLock is held for all calls for a given shard
   253  // therefore callback sess consistent view of a shard,
   254  // but not across the shards
   255  type IterCb func(key string, v interface{})
   256  
   257  // Callback based iterator, cheapest way to read
   258  // all elements in a map.
   259  func (m ConcurrentMap) IterCb(fn IterCb) {
   260  	for idx := range m {
   261  		shard := (m)[idx]
   262  		shard.RLock()
   263  		for key, value := range shard.items {
   264  			fn(key, value)
   265  		}
   266  		shard.RUnlock()
   267  	}
   268  }
   269  
   270  // Return all keys as []string
   271  func (m ConcurrentMap) Keys() []string {
   272  	count := m.Count()
   273  	ch := make(chan string, count)
   274  	go func() {
   275  		defer func() {
   276  			if e := recover(); e != nil {
   277  				fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   278  			}
   279  		}()
   280  		// Foreach shard.
   281  		wg := sync.WaitGroup{}
   282  		wg.Add(SHARD_COUNT)
   283  		for _, shard := range m {
   284  			go func() {
   285  				defer func() {
   286  					if e := recover(); e != nil {
   287  						fmt.Printf("crashed, err: %s\nstack:\n%s",e, string(debug.Stack()))
   288  					}
   289  				}()
   290  				func(shard *ConcurrentMapShared) {
   291  					// Foreach key, value pair.
   292  					shard.RLock()
   293  					for key := range shard.items {
   294  						ch <- key
   295  					}
   296  					shard.RUnlock()
   297  					wg.Done()
   298  				}(shard)
   299  			}()
   300  		}
   301  		wg.Wait()
   302  		close(ch)
   303  	}()
   304  
   305  	// Generate keys
   306  	keys := make([]string, 0, count)
   307  	for k := range ch {
   308  		keys = append(keys, k)
   309  	}
   310  	return keys
   311  }
   312  
   313  //Reviles ConcurrentMap "private" variables to json marshal.
   314  func (m ConcurrentMap) MarshalJSON() ([]byte, error) {
   315  	// Create a temporary map, which will hold all item spread across shards.
   316  	tmp := make(map[string]interface{})
   317  
   318  	// Insert items to temporary map.
   319  	for item := range m.IterBuffered() {
   320  		tmp[item.Key] = item.Val
   321  	}
   322  	return json.Marshal(tmp)
   323  }
   324  
   325  func fnv32(key string) uint32 {
   326  	hash := uint32(2166136261)
   327  	const prime32 = uint32(16777619)
   328  	for i := 0; i < len(key); i++ {
   329  		hash *= prime32
   330  		hash ^= uint32(key[i])
   331  	}
   332  	return hash
   333  }
   334  
   335  // Concurrent map uses Interface{} as its value, therefor JSON Unmarshal
   336  // will probably won't know which to type to unmarshal into, in such case
   337  // we'll end up with a value of type map[string]interface{}, In most cases this isn't
   338  // out value type, this is why we've decided to remove this functionality.
   339  
   340  // func (m *ConcurrentMap) UnmarshalJSON(b []byte) (err error) {
   341  // 	// Reverse process of Marshal.
   342  
   343  // 	tmp := make(map[string]interface{})
   344  
   345  // 	// Unmarshal into a single map.
   346  // 	if err := json.Unmarshal(b, &tmp); err != nil {
   347  // 		return nil
   348  // 	}
   349  
   350  // 	// foreach key,value pair in temporary map insert into our concurrent map.
   351  // 	for key, val := range tmp {
   352  // 		m.Set(key, val)
   353  // 	}
   354  // 	return nil
   355  // }