github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/concurrency/testdata/concurrency_manager/update (about) 1 # ------------------------------------------------------------- 2 # A transaction writes an intent. The intent is pushed to a 3 # higher timestamp by a second transaction. The transaction then 4 # returns to re-acquire the intent at a new sequence number but 5 # still at the original timestamp. This is permitted but the 6 # lock's timestamp should not regress. 7 # 8 # Setup: txn1 acquires lock k 9 # txn2 reads k and waits 10 # txn2 pushes txn1 11 # 12 # Test: txn2 succeeds in pushing txn1's ts forward 13 # txn2 proceeds 14 # txn1 re-acquires lock k at new seq num, lower ts 15 # ------------------------------------------------------------- 16 17 new-txn name=txn1 ts=10,1 epoch=0 18 ---- 19 20 new-txn name=txn2 ts=12,1 epoch=0 21 ---- 22 23 new-request name=req1 txn=txn1 ts=10,1 24 put key=k value=v 25 ---- 26 27 new-request name=req2 txn=txn2 ts=12,1 28 get key=k 29 ---- 30 31 sequence req=req1 32 ---- 33 [1] sequence req1: sequencing request 34 [1] sequence req1: acquiring latches 35 [1] sequence req1: scanning lock table for conflicting locks 36 [1] sequence req1: sequencing complete, returned guard 37 38 on-lock-acquired req=req1 key=k 39 ---- 40 [-] acquire lock: txn 00000001 @ k 41 42 finish req=req1 43 ---- 44 [-] finish req1: finishing request 45 46 sequence req=req2 47 ---- 48 [2] sequence req2: sequencing request 49 [2] sequence req2: acquiring latches 50 [2] sequence req2: scanning lock table for conflicting locks 51 [2] sequence req2: waiting in lock wait-queues 52 [2] sequence req2: pushing timestamp of txn 00000001 above 0.000000012,1 53 [2] sequence req2: blocked on select in concurrency_test.(*cluster).PushTransaction 54 55 debug-lock-table 56 ---- 57 global: num=1 58 lock: "k" 59 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000010,1, info: unrepl epoch: 0, seqs: [0] 60 waiting readers: 61 req: 2, txn: 00000002-0000-0000-0000-000000000000 62 distinguished req: 2 63 local: num=0 64 65 # -------------------------------- 66 # Setup complete, test starts here 67 # -------------------------------- 68 69 on-txn-updated txn=txn1 status=pending ts=12,2 70 ---- 71 [-] update txn: increasing timestamp of txn1 72 [2] sequence req2: resolving intent "k" for txn 00000001 with PENDING status 73 [2] sequence req2: acquiring latches 74 [2] sequence req2: scanning lock table for conflicting locks 75 [2] sequence req2: sequencing complete, returned guard 76 77 finish req=req2 78 ---- 79 [-] finish req2: finishing request 80 81 debug-lock-table 82 ---- 83 global: num=1 84 lock: "k" 85 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000012,2, info: unrepl epoch: 0, seqs: [0] 86 local: num=0 87 88 # Issue another write to the same key for txn1 at its initial 89 # timestamp. The timestamp in the lock table does not regress. 90 91 new-request name=req3 txn=txn1 ts=10,1 92 put key=k value=v2 seq=1 93 ---- 94 95 sequence req=req3 96 ---- 97 [3] sequence req3: sequencing request 98 [3] sequence req3: acquiring latches 99 [3] sequence req3: scanning lock table for conflicting locks 100 [3] sequence req3: sequencing complete, returned guard 101 102 on-lock-acquired req=req3 key=k seq=1 103 ---- 104 [-] acquire lock: txn 00000001 @ k 105 106 finish req=req3 107 ---- 108 [-] finish req3: finishing request 109 110 debug-lock-table 111 ---- 112 global: num=1 113 lock: "k" 114 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000012,2, info: unrepl epoch: 0, seqs: [0, 1] 115 local: num=0 116 117 reset namespace 118 ---- 119 120 # ------------------------------------------------------------- 121 # A transaction writes an intent. The intent is pushed to a 122 # higher timestamp by a second transaction. The transaction then 123 # returns to re-acquire the intent at a new epoch but still at 124 # the original timestamp. This is permitted but the lock's 125 # timestamp should not regress. 126 # 127 # Setup: txn1 acquires lock k 128 # txn2 reads k and waits 129 # 130 # Test: txn2 pushes txn1's timestamp forward 131 # txn2 proceeds 132 # txn1 re-acquires lock k at new epoch, lower ts 133 # ------------------------------------------------------------- 134 135 new-txn name=txn1 ts=10,1 epoch=0 136 ---- 137 138 new-txn name=txn2 ts=12,1 epoch=0 139 ---- 140 141 new-request name=req1 txn=txn1 ts=10,1 142 put key=k value=v 143 ---- 144 145 new-request name=req2 txn=txn2 ts=12,1 146 get key=k 147 ---- 148 149 sequence req=req1 150 ---- 151 [1] sequence req1: sequencing request 152 [1] sequence req1: acquiring latches 153 [1] sequence req1: scanning lock table for conflicting locks 154 [1] sequence req1: sequencing complete, returned guard 155 156 on-lock-acquired req=req1 key=k 157 ---- 158 [-] acquire lock: txn 00000001 @ k 159 160 finish req=req1 161 ---- 162 [-] finish req1: finishing request 163 164 sequence req=req2 165 ---- 166 [2] sequence req2: sequencing request 167 [2] sequence req2: acquiring latches 168 [2] sequence req2: scanning lock table for conflicting locks 169 [2] sequence req2: waiting in lock wait-queues 170 [2] sequence req2: pushing timestamp of txn 00000001 above 0.000000012,1 171 [2] sequence req2: blocked on select in concurrency_test.(*cluster).PushTransaction 172 173 debug-lock-table 174 ---- 175 global: num=1 176 lock: "k" 177 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000010,1, info: unrepl epoch: 0, seqs: [0] 178 waiting readers: 179 req: 5, txn: 00000002-0000-0000-0000-000000000000 180 distinguished req: 5 181 local: num=0 182 183 # -------------------------------- 184 # Setup complete, test starts here 185 # -------------------------------- 186 187 on-txn-updated txn=txn1 status=pending ts=12,2 188 ---- 189 [-] update txn: increasing timestamp of txn1 190 [2] sequence req2: resolving intent "k" for txn 00000001 with PENDING status 191 [2] sequence req2: acquiring latches 192 [2] sequence req2: scanning lock table for conflicting locks 193 [2] sequence req2: sequencing complete, returned guard 194 195 finish req=req2 196 ---- 197 [-] finish req2: finishing request 198 199 debug-lock-table 200 ---- 201 global: num=1 202 lock: "k" 203 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000012,2, info: unrepl epoch: 0, seqs: [0] 204 local: num=0 205 206 # The txn restarts at a new timestamp, but below the pushed 207 # timestamp. It re-issues the same write at the new epoch. The 208 # timestamp in the lock table does not regress. 209 210 new-txn name=txn1 ts=11,1 epoch=1 211 ---- 212 213 new-request name=req3 txn=txn1 ts=11,1 214 put key=k value=v2 215 ---- 216 217 sequence req=req3 218 ---- 219 [3] sequence req3: sequencing request 220 [3] sequence req3: acquiring latches 221 [3] sequence req3: scanning lock table for conflicting locks 222 [3] sequence req3: sequencing complete, returned guard 223 224 on-lock-acquired req=req3 key=k 225 ---- 226 [-] acquire lock: txn 00000001 @ k 227 228 finish req=req3 229 ---- 230 [-] finish req3: finishing request 231 232 debug-lock-table 233 ---- 234 global: num=1 235 lock: "k" 236 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000012,2, info: unrepl epoch: 1, seqs: [0] 237 local: num=0 238 239 reset namespace 240 ---- 241 242 # ------------------------------------------------------------- 243 # A transaction acquires an unreplicated lock. The lock is 244 # pushed to a higher timestamp by a second transaction. The 245 # transaction then returns to upgrade the unreplicated lock to a 246 # replicated intent at a new sequence number but still at the 247 # original timestamp. This is permitted and the lock's timestamp 248 # regresses back down to the intent's timestamp. In practice, if 249 # the pusher wanted to prevent its push from being reverted, it 250 # should have also bumped the timestamp cache to ensure that the 251 # intent couldn't be laid down at the original timestamp. 252 # 253 # Setup: txn1 acquires unreplicated lock k 254 # txn2 reads k and waits 255 # txn2 pushes txn1 256 # 257 # Test: txn2 succeeds in pushing txn1's ts forward 258 # txn2 proceeds 259 # txn1 re-acquires replicated lock k at lower ts 260 # ------------------------------------------------------------- 261 262 new-txn name=txn1 ts=10,1 epoch=0 263 ---- 264 265 new-txn name=txn2 ts=12,1 epoch=0 266 ---- 267 268 new-request name=req1 txn=txn1 ts=10,1 269 put key=k value=v 270 ---- 271 272 new-request name=req2 txn=txn2 ts=12,1 273 get key=k 274 ---- 275 276 sequence req=req1 277 ---- 278 [1] sequence req1: sequencing request 279 [1] sequence req1: acquiring latches 280 [1] sequence req1: scanning lock table for conflicting locks 281 [1] sequence req1: sequencing complete, returned guard 282 283 on-lock-acquired req=req1 key=k dur=u 284 ---- 285 [-] acquire lock: txn 00000001 @ k 286 287 finish req=req1 288 ---- 289 [-] finish req1: finishing request 290 291 sequence req=req2 292 ---- 293 [2] sequence req2: sequencing request 294 [2] sequence req2: acquiring latches 295 [2] sequence req2: scanning lock table for conflicting locks 296 [2] sequence req2: waiting in lock wait-queues 297 [2] sequence req2: pushing timestamp of txn 00000001 above 0.000000012,1 298 [2] sequence req2: blocked on select in concurrency_test.(*cluster).PushTransaction 299 300 debug-lock-table 301 ---- 302 global: num=1 303 lock: "k" 304 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000010,1, info: unrepl epoch: 0, seqs: [0] 305 waiting readers: 306 req: 8, txn: 00000002-0000-0000-0000-000000000000 307 distinguished req: 8 308 local: num=0 309 310 # -------------------------------- 311 # Setup complete, test starts here 312 # -------------------------------- 313 314 on-txn-updated txn=txn1 status=pending ts=12,2 315 ---- 316 [-] update txn: increasing timestamp of txn1 317 [2] sequence req2: resolving intent "k" for txn 00000001 with PENDING status 318 [2] sequence req2: acquiring latches 319 [2] sequence req2: scanning lock table for conflicting locks 320 [2] sequence req2: sequencing complete, returned guard 321 322 finish req=req2 323 ---- 324 [-] finish req2: finishing request 325 326 debug-lock-table 327 ---- 328 global: num=1 329 lock: "k" 330 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000012,2, info: unrepl epoch: 0, seqs: [0] 331 local: num=0 332 333 # Issue another write to the same key for txn1 at its initial timestamp, 334 # this time with a replicated durability. The timestamp in the lock 335 # table should regress back down to reflect the replicated lock state. 336 337 new-request name=req3 txn=txn1 ts=10,1 338 put key=k value=v2 seq=1 339 ---- 340 341 sequence req=req3 342 ---- 343 [3] sequence req3: sequencing request 344 [3] sequence req3: acquiring latches 345 [3] sequence req3: scanning lock table for conflicting locks 346 [3] sequence req3: sequencing complete, returned guard 347 348 on-lock-acquired req=req3 key=k seq=1 dur=r 349 ---- 350 [-] acquire lock: txn 00000001 @ k 351 352 finish req=req3 353 ---- 354 [-] finish req3: finishing request 355 356 debug-lock-table 357 ---- 358 global: num=1 359 lock: "k" 360 holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000010,1, info: repl epoch: 0, seqs: [1], unrepl epoch: 0, seqs: [0] 361 local: num=0 362 363 reset namespace 364 ----