github.com/dubbogo/gost@v1.14.0/container/gxsync/consolidator.go (about)

     1  /*
     2  Copyright 2019 The Vitess Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  /*
    18   * Licensed to the Apache Software Foundation (ASF) under one or more
    19   * contributor license agreements.  See the NOTICE file distributed with
    20   * this work for additional information regarding copyright ownership.
    21   * The ASF licenses this file to You under the Apache License, Version 2.0
    22   * (the "License"); you may not use this file except in compliance with
    23   * the License.  You may obtain a copy of the License at
    24   *
    25   *     http://www.apache.org/licenses/LICENSE-2.0
    26   *
    27   * Unless required by applicable law or agreed to in writing, software
    28   * distributed under the License is distributed on an "AS IS" BASIS,
    29   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    30   * See the License for the specific language governing permissions and
    31   * limitations under the License.
    32   */
    33  
    34  package gxsync
    35  
    36  import (
    37  	"sync"
    38  	"sync/atomic"
    39  )
    40  
    41  import (
    42  	cache "github.com/dubbogo/gost/container/gxlru"
    43  )
    44  
    45  // Consolidator consolidates duplicate queries from executing simulaneously
    46  // and shares results between them.
    47  type Consolidator struct {
    48  	*ConsolidatorCache
    49  
    50  	mu      sync.Mutex
    51  	queries map[string]*Result
    52  }
    53  
    54  // NewConsolidator creates a new Consolidator
    55  func NewConsolidator() *Consolidator {
    56  	return &Consolidator{
    57  		queries:           make(map[string]*Result),
    58  		ConsolidatorCache: NewConsolidatorCache(1000),
    59  	}
    60  }
    61  
    62  // Result is a wrapper for result of a query.
    63  type Result struct {
    64  	// executing is used to block additional requests.
    65  	// The original request holds a write lock while additional ones are blocked
    66  	// on acquiring a read lock (see Wait() below.)
    67  	executing    sync.RWMutex
    68  	consolidator *Consolidator
    69  	query        string
    70  	Result       interface{}
    71  	Err          error
    72  }
    73  
    74  // Create adds a query to currently executing queries and acquires a
    75  // lock on its Result if it is not already present. If the query is
    76  // a duplicate, Create returns false.
    77  func (co *Consolidator) Create(query string) (r *Result, created bool) {
    78  	co.mu.Lock()
    79  	defer co.mu.Unlock()
    80  	ok := false
    81  	if r, ok = co.queries[query]; ok {
    82  		return r, false
    83  	}
    84  	r = &Result{consolidator: co, query: query}
    85  	r.executing.Lock()
    86  	co.queries[query] = r
    87  	return r, true
    88  }
    89  
    90  // Broadcast removes the entry from current queries and releases the
    91  // lock on its Result. Broadcast should be invoked when original
    92  // query completes execution.
    93  func (rs *Result) Broadcast() {
    94  	rs.consolidator.mu.Lock()
    95  	defer rs.consolidator.mu.Unlock()
    96  	delete(rs.consolidator.queries, rs.query)
    97  	rs.executing.Unlock()
    98  }
    99  
   100  // Wait waits for the original query to complete execution. Wait should
   101  // be invoked for duplicate queries.
   102  func (rs *Result) Wait() {
   103  	rs.consolidator.Record(rs.query)
   104  	rs.executing.RLock()
   105  }
   106  
   107  // ConsolidatorCache is a thread-safe object used for counting how often recent
   108  // queries have been consolidated.
   109  // It is also used by the txserializer package to count how often transactions
   110  // have been queued and had to wait because they targeted the same row (range).
   111  type ConsolidatorCache struct {
   112  	*cache.LRUCache
   113  }
   114  
   115  // NewConsolidatorCache creates a new cache with the given capacity.
   116  func NewConsolidatorCache(capacity int64) *ConsolidatorCache {
   117  	return &ConsolidatorCache{cache.NewLRUCache(capacity)}
   118  }
   119  
   120  // Record increments the count for "query" by 1.
   121  // If it's not in the cache yet, it will be added.
   122  func (cc *ConsolidatorCache) Record(query string) {
   123  	if v, ok := cc.Get(query); ok {
   124  		v.(*ccount).add(1)
   125  	} else {
   126  		c := ccount(1)
   127  		cc.Set(query, &c)
   128  	}
   129  }
   130  
   131  // ConsolidatorCacheItem is a wrapper for the items in the consolidator cache
   132  type ConsolidatorCacheItem struct {
   133  	Query string
   134  	Count int64
   135  }
   136  
   137  // Items returns the items in the cache as an array of String, int64 structs
   138  func (cc *ConsolidatorCache) Items() []ConsolidatorCacheItem {
   139  	items := cc.LRUCache.Items()
   140  	ret := make([]ConsolidatorCacheItem, len(items))
   141  	for i, v := range items {
   142  		ret[i] = ConsolidatorCacheItem{Query: v.Key, Count: v.Value.(*ccount).get()}
   143  	}
   144  	return ret
   145  }
   146  
   147  // ccount elements are used with a gxlru.LRUCache object to track if another
   148  // request for the same query is already in progress.
   149  type ccount int64
   150  
   151  // Size always returns 1 because we use the cache only to track queries,
   152  // independent of the number of requests waiting for them.
   153  // This implements the gxlru.Value interface.
   154  func (cc *ccount) Size() int {
   155  	return 1
   156  }
   157  
   158  func (cc *ccount) add(n int64) int64 {
   159  	return atomic.AddInt64((*int64)(cc), n)
   160  }
   161  
   162  func (cc *ccount) get() int64 {
   163  	return atomic.LoadInt64((*int64)(cc))
   164  }