github.com/ghodss/etcd@v0.3.1-0.20140417172404-cc329bfa55cb/mod/lock/v2/acquire_handler.go (about) 1 package v2 2 3 import ( 4 "errors" 5 "fmt" 6 "net/http" 7 "path" 8 "strconv" 9 "time" 10 11 etcdErr "github.com/coreos/etcd/error" 12 "github.com/coreos/etcd/third_party/github.com/coreos/go-etcd/etcd" 13 "github.com/coreos/etcd/third_party/github.com/gorilla/mux" 14 ) 15 16 // acquireHandler attempts to acquire a lock on the given key. 17 // The "key" parameter specifies the resource to lock. 18 // The "value" parameter specifies a value to associate with the lock. 19 // The "ttl" parameter specifies how long the lock will persist for. 20 // The "timeout" parameter specifies how long the request should wait for the lock. 21 func (h *handler) acquireHandler(w http.ResponseWriter, req *http.Request) error { 22 h.client.SyncCluster() 23 24 // Setup connection watcher. 25 closeNotifier, _ := w.(http.CloseNotifier) 26 closeChan := closeNotifier.CloseNotify() 27 28 // Wrap closeChan so we can pass it to subsequent components 29 timeoutChan := make(chan bool) 30 stopChan := make(chan bool) 31 go func() { 32 select { 33 case <-closeChan: 34 // Client closed connection 35 stopChan <- true 36 case <-timeoutChan: 37 // Timeout expired 38 stopChan <- true 39 case <-stopChan: 40 } 41 close(stopChan) 42 }() 43 44 // Parse the lock "key". 45 vars := mux.Vars(req) 46 keypath := path.Join(prefix, vars["key"]) 47 value := req.FormValue("value") 48 49 // Parse "timeout" parameter. 50 var timeout int 51 var err error 52 if req.FormValue("timeout") == "" { 53 timeout = -1 54 } else if timeout, err = strconv.Atoi(req.FormValue("timeout")); err != nil { 55 return etcdErr.NewError(etcdErr.EcodeTimeoutNaN, "Acquire", 0) 56 } 57 58 // Parse TTL. 59 ttl, err := strconv.Atoi(req.FormValue("ttl")) 60 if err != nil { 61 return etcdErr.NewError(etcdErr.EcodeTTLNaN, "Acquire", 0) 62 } 63 64 // Search for the node 65 _, index, pos := h.findExistingNode(keypath, value) 66 if index == 0 { 67 // Node doesn't exist; Create it 68 pos = -1 // Invalidate previous position 69 index, err = h.createNode(keypath, value, ttl) 70 if err != nil { 71 return err 72 } 73 } 74 75 indexpath := path.Join(keypath, strconv.Itoa(index)) 76 77 // If pos != 0, we do not already have the lock 78 if pos != 0 { 79 if timeout == 0 { 80 // Attempt to get lock once, no waiting 81 err = h.get(keypath, index) 82 } else { 83 // Keep updating TTL while we wait 84 go h.ttlKeepAlive(keypath, value, ttl, stopChan) 85 86 // Start timeout 87 go h.timeoutExpire(timeout, timeoutChan, stopChan) 88 89 // wait for lock 90 err = h.watch(keypath, index, stopChan) 91 } 92 } 93 94 // Return on error, deleting our lock request on the way 95 if err != nil { 96 if index > 0 { 97 h.client.Delete(indexpath, false) 98 } 99 return err 100 } 101 102 // Check for connection disconnect before we write the lock index. 103 select { 104 case <-stopChan: 105 err = errors.New("user interrupted") 106 default: 107 } 108 109 // Update TTL one last time if lock was acquired. Otherwise delete. 110 if err == nil { 111 h.client.Update(indexpath, value, uint64(ttl)) 112 } else { 113 h.client.Delete(indexpath, false) 114 } 115 116 // Write response. 117 w.Write([]byte(strconv.Itoa(index))) 118 return nil 119 } 120 121 // createNode creates a new lock node and watches it until it is acquired or acquisition fails. 122 func (h *handler) createNode(keypath string, value string, ttl int) (int, error) { 123 // Default the value to "-" if it is blank. 124 if len(value) == 0 { 125 value = "-" 126 } 127 128 // Create an incrementing id for the lock. 129 resp, err := h.client.AddChild(keypath, value, uint64(ttl)) 130 if err != nil { 131 return 0, err 132 } 133 indexpath := resp.Node.Key 134 index, err := strconv.Atoi(path.Base(indexpath)) 135 return index, err 136 } 137 138 // findExistingNode search for a node on the lock with the given value. 139 func (h *handler) findExistingNode(keypath string, value string) (*etcd.Node, int, int) { 140 if len(value) > 0 { 141 resp, err := h.client.Get(keypath, true, true) 142 if err == nil { 143 nodes := lockNodes{resp.Node.Nodes} 144 if node, pos := nodes.FindByValue(value); node != nil { 145 index, _ := strconv.Atoi(path.Base(node.Key)) 146 return node, index, pos 147 } 148 } 149 } 150 return nil, 0, 0 151 } 152 153 // ttlKeepAlive continues to update a key's TTL until the stop channel is closed. 154 func (h *handler) ttlKeepAlive(k string, value string, ttl int, stopChan chan bool) { 155 for { 156 select { 157 case <-time.After(time.Duration(ttl/2) * time.Second): 158 h.client.Update(k, value, uint64(ttl)) 159 case <-stopChan: 160 return 161 } 162 } 163 } 164 165 // timeoutExpire sets the countdown timer is a positive integer 166 // cancels on stopChan, sends true on timeoutChan after timer expires 167 func (h *handler) timeoutExpire(timeout int, timeoutChan chan bool, stopChan chan bool) { 168 // Set expiration timer if timeout is 1 or higher 169 if timeout < 1 { 170 timeoutChan = nil 171 return 172 } 173 select { 174 case <-stopChan: 175 return 176 case <-time.After(time.Duration(timeout) * time.Second): 177 timeoutChan <- true 178 return 179 } 180 } 181 182 func (h *handler) getLockIndex(keypath string, index int) (int, int, error) { 183 // Read all nodes for the lock. 184 resp, err := h.client.Get(keypath, true, true) 185 if err != nil { 186 return 0, 0, fmt.Errorf("lock watch lookup error: %s", err.Error()) 187 } 188 nodes := lockNodes{resp.Node.Nodes} 189 prevIndex, modifiedIndex := nodes.PrevIndex(index) 190 return prevIndex, modifiedIndex, nil 191 } 192 193 // get tries once to get the lock; no waiting 194 func (h *handler) get(keypath string, index int) error { 195 prevIndex, _, err := h.getLockIndex(keypath, index) 196 if err != nil { 197 return err 198 } 199 if prevIndex == 0 { 200 // Lock acquired 201 return nil 202 } 203 return fmt.Errorf("failed to acquire lock") 204 } 205 206 // watch continuously waits for a given lock index to be acquired or until lock fails. 207 // Returns a boolean indicating success. 208 func (h *handler) watch(keypath string, index int, closeChan <-chan bool) error { 209 // Wrap close chan so we can pass it to Client.Watch(). 210 stopWatchChan := make(chan bool) 211 stopWrapChan := make(chan bool) 212 go func() { 213 select { 214 case <-closeChan: 215 stopWatchChan <- true 216 case <-stopWrapChan: 217 stopWatchChan <- true 218 case <-stopWatchChan: 219 } 220 }() 221 defer close(stopWrapChan) 222 223 for { 224 prevIndex, modifiedIndex, err := h.getLockIndex(keypath, index) 225 // If there is no previous index then we have the lock. 226 if prevIndex == 0 { 227 return nil 228 } 229 230 // Wait from the last modification of the node. 231 waitIndex := modifiedIndex + 1 232 233 _, err = h.client.Watch(path.Join(keypath, strconv.Itoa(prevIndex)), uint64(waitIndex), false, nil, stopWatchChan) 234 if err == etcd.ErrWatchStoppedByUser { 235 return fmt.Errorf("lock watch closed") 236 } else if err != nil { 237 return fmt.Errorf("lock watch error: %s", err.Error()) 238 } 239 return nil 240 } 241 }