go.temporal.io/server@v1.23.0/common/persistence/sql/history_store.go (about) 1 // The MIT License 2 // 3 // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. 4 // 5 // Copyright (c) 2020 Uber Technologies, Inc. 6 // 7 // Permission is hereby granted, free of charge, to any person obtaining a copy 8 // of this software and associated documentation files (the "Software"), to deal 9 // in the Software without restriction, including without limitation the rights 10 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 // copies of the Software, and to permit persons to whom the Software is 12 // furnished to do so, subject to the following conditions: 13 // 14 // The above copyright notice and this permission notice shall be included in 15 // all copies or substantial portions of the Software. 16 // 17 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 // THE SOFTWARE. 24 25 package sql 26 27 import ( 28 "context" 29 "database/sql" 30 "encoding/json" 31 "fmt" 32 "math" 33 34 commonpb "go.temporal.io/api/common/v1" 35 "go.temporal.io/api/serviceerror" 36 37 p "go.temporal.io/server/common/persistence" 38 "go.temporal.io/server/common/persistence/sql/sqlplugin" 39 "go.temporal.io/server/common/primitives" 40 ) 41 42 const ( 43 // NOTE: transaction ID is *= -1 in DB 44 MinTxnID int64 = math.MaxInt64 45 MaxTxnID int64 = math.MinInt64 + 1 // int overflow 46 ) 47 48 // AppendHistoryNodes add(or override) a node to a history branch 49 func (m *sqlExecutionStore) AppendHistoryNodes( 50 ctx context.Context, 51 request *p.InternalAppendHistoryNodesRequest, 52 ) error { 53 branchInfo := request.BranchInfo 54 node := request.Node 55 56 treeIDBytes, err := primitives.ParseUUID(branchInfo.GetTreeId()) 57 if err != nil { 58 return err 59 } 60 branchIDBytes, err := primitives.ParseUUID(branchInfo.GetBranchId()) 61 if err != nil { 62 return err 63 } 64 65 nodeRow := &sqlplugin.HistoryNodeRow{ 66 TreeID: treeIDBytes, 67 BranchID: branchIDBytes, 68 NodeID: node.NodeID, 69 PrevTxnID: node.PrevTransactionID, 70 TxnID: node.TransactionID, 71 Data: node.Events.Data, 72 DataEncoding: node.Events.EncodingType.String(), 73 ShardID: request.ShardID, 74 } 75 76 if !request.IsNewBranch { 77 _, err = m.Db.InsertIntoHistoryNode(ctx, nodeRow) 78 switch err { 79 case nil: 80 return nil 81 case context.DeadlineExceeded, context.Canceled: 82 return &p.AppendHistoryTimeoutError{ 83 Msg: err.Error(), 84 } 85 default: 86 if m.Db.IsDupEntryError(err) { 87 return &p.ConditionFailedError{Msg: fmt.Sprintf("AppendHistoryNodes: row already exist: %v", err)} 88 } 89 return serviceerror.NewUnavailable(fmt.Sprintf("AppendHistoryNodes: %v", err)) 90 } 91 } 92 93 treeInfoBlob := request.TreeInfo 94 treeRow := &sqlplugin.HistoryTreeRow{ 95 ShardID: request.ShardID, 96 TreeID: treeIDBytes, 97 BranchID: branchIDBytes, 98 Data: treeInfoBlob.Data, 99 DataEncoding: treeInfoBlob.EncodingType.String(), 100 } 101 102 return m.txExecute(ctx, "AppendHistoryNodes", func(tx sqlplugin.Tx) error { 103 result, err := tx.InsertIntoHistoryNode(ctx, nodeRow) 104 if err != nil { 105 return err 106 } 107 rowsAffected, err := result.RowsAffected() 108 if err != nil { 109 return err 110 } 111 if !(rowsAffected == 1 || rowsAffected == 2) { 112 return fmt.Errorf("expected 1 or 2 row to be affected for node table, got %v", rowsAffected) 113 } 114 115 result, err = tx.InsertIntoHistoryTree(ctx, treeRow) 116 switch err { 117 case nil: 118 rowsAffected, err = result.RowsAffected() 119 if err != nil { 120 return err 121 } 122 if !(rowsAffected == 1 || rowsAffected == 2) { 123 return fmt.Errorf("expected 1 or 2 rows to be affected for tree table as we allow upserts, got %v", rowsAffected) 124 } 125 return nil 126 case context.DeadlineExceeded, context.Canceled: 127 return &p.AppendHistoryTimeoutError{ 128 Msg: err.Error(), 129 } 130 default: 131 return serviceerror.NewUnavailable(fmt.Sprintf("AppendHistoryNodes: %v", err)) 132 } 133 }) 134 } 135 136 func (m *sqlExecutionStore) DeleteHistoryNodes( 137 ctx context.Context, 138 request *p.InternalDeleteHistoryNodesRequest, 139 ) error { 140 branchInfo := request.BranchInfo 141 nodeID := request.NodeID 142 txnID := request.TransactionID 143 shardID := request.ShardID 144 145 if nodeID < p.GetBeginNodeID(branchInfo) { 146 return &p.InvalidPersistenceRequestError{ 147 Msg: "cannot append to ancestors' nodes", 148 } 149 } 150 151 treeIDBytes, err := primitives.ParseUUID(branchInfo.GetTreeId()) 152 if err != nil { 153 return err 154 } 155 branchIDBytes, err := primitives.ParseUUID(branchInfo.GetBranchId()) 156 if err != nil { 157 return err 158 } 159 160 nodeRow := &sqlplugin.HistoryNodeRow{ 161 TreeID: treeIDBytes, 162 BranchID: branchIDBytes, 163 NodeID: nodeID, 164 TxnID: txnID, 165 ShardID: shardID, 166 } 167 168 _, err = m.Db.DeleteFromHistoryNode(ctx, nodeRow) 169 if err != nil { 170 return serviceerror.NewUnavailable(fmt.Sprintf("DeleteHistoryNodes: %v", err)) 171 } 172 return nil 173 } 174 175 // ReadHistoryBranch returns history node data for a branch 176 func (m *sqlExecutionStore) ReadHistoryBranch( 177 ctx context.Context, 178 request *p.InternalReadHistoryBranchRequest, 179 ) (*p.InternalReadHistoryBranchResponse, error) { 180 branch, err := m.GetHistoryBranchUtil().ParseHistoryBranchInfo(request.BranchToken) 181 if err != nil { 182 return nil, err 183 } 184 branchIDBytes, err := primitives.ParseUUID(request.BranchID) 185 if err != nil { 186 return nil, err 187 } 188 treeIDBytes, err := primitives.ParseUUID(branch.TreeId) 189 if err != nil { 190 return nil, err 191 } 192 193 var token historyNodePaginationToken 194 if len(request.NextPageToken) == 0 { 195 if request.ReverseOrder { 196 token = newHistoryNodePaginationToken(request.MaxNodeID, MaxTxnID) 197 } else { 198 token = newHistoryNodePaginationToken(request.MinNodeID, MinTxnID) 199 } 200 } else { 201 token, err = deserializeHistoryNodePaginationToken(request.NextPageToken) 202 if err != nil { 203 return nil, err 204 } 205 } 206 207 minNodeId, maxNodeId := request.MinNodeID, request.MaxNodeID 208 minTxnId, maxTxnId := MinTxnID, MaxTxnID 209 if request.ReverseOrder { 210 maxNodeId = token.LastNodeID 211 maxTxnId = token.LastTxnID 212 } else { 213 minNodeId = token.LastNodeID 214 minTxnId = token.LastTxnID 215 } 216 217 rows, err := m.Db.RangeSelectFromHistoryNode(ctx, sqlplugin.HistoryNodeSelectFilter{ 218 ShardID: request.ShardID, 219 TreeID: treeIDBytes, 220 BranchID: branchIDBytes, 221 MinNodeID: minNodeId, 222 MinTxnID: minTxnId, 223 MaxNodeID: maxNodeId, 224 MaxTxnID: maxTxnId, 225 PageSize: request.PageSize, 226 MetadataOnly: request.MetadataOnly, 227 ReverseOrder: request.ReverseOrder, 228 }) 229 switch err { 230 case nil: 231 // noop 232 case sql.ErrNoRows: 233 // noop 234 default: 235 return nil, err 236 } 237 238 nodes := make([]p.InternalHistoryNode, 0, len(rows)) 239 for _, row := range rows { 240 nodes = append(nodes, p.InternalHistoryNode{ 241 NodeID: row.NodeID, 242 PrevTransactionID: row.PrevTxnID, 243 TransactionID: row.TxnID, 244 Events: p.NewDataBlob(row.Data, row.DataEncoding), 245 }) 246 } 247 248 var pagingToken []byte 249 if len(rows) < request.PageSize { 250 pagingToken = nil 251 } else { 252 lastRow := rows[len(rows)-1] 253 pagingToken, err = serializeHistoryNodePaginationToken( 254 newHistoryNodePaginationToken(lastRow.NodeID, lastRow.TxnID), 255 ) 256 if err != nil { 257 return nil, err 258 } 259 } 260 261 return &p.InternalReadHistoryBranchResponse{ 262 Nodes: nodes, 263 NextPageToken: pagingToken, 264 }, nil 265 } 266 267 // ForkHistoryBranch forks a new branch from an existing branch 268 // Note that application must provide a void forking nodeID, it must be a valid nodeID in that branch. 269 // A valid forking nodeID can be an ancestor from the existing branch. 270 // For example, we have branch B1 with three nodes(1[1,2], 3[3,4,5] and 6[6,7,8]. 1, 3 and 6 are nodeIDs (first eventID of the batch). 271 // So B1 looks like this: 272 // 273 // 1[1,2] 274 // / 275 // 3[3,4,5] 276 // / 277 // 6[6,7,8] 278 // 279 // Assuming we have branch B2 which contains one ancestor B1 stopping at 6 (exclusive). So B2 inherit nodeID 1 and 3 from B1, and have its own nodeID 6 and 8. 280 // Branch B2 looks like this: 281 // 282 // 1[1,2] 283 // / 284 // 3[3,4,5] 285 // \ 286 // 6[6,7] 287 // \ 288 // 8[8] 289 // 290 // Now we want to fork a new branch B3 from B2. 291 // The only valid forking nodeIDs are 3,6 or 8. 292 // 1 is not valid because we can't fork from first node. 293 // 2/4/5 is NOT valid either because they are inside a batch. 294 // 295 // Case #1: If we fork from nodeID 6, then B3 will have an ancestor B1 which stops at 6(exclusive). 296 // As we append a batch of events[6,7,8,9] to B3, it will look like : 297 // 298 // 1[1,2] 299 // / 300 // 3[3,4,5] 301 // \ 302 // 6[6,7,8,9] 303 // 304 // Case #2: If we fork from node 8, then B3 will have two ancestors: B1 stops at 6(exclusive) and ancestor B2 stops at 8(exclusive) 305 // As we append a batch of events[8,9] to B3, it will look like: 306 // 307 // 1[1,2] 308 // / 309 // 3[3,4,5] 310 // / 311 // 6[6,7] 312 // \ 313 // 8[8,9] 314 func (m *sqlExecutionStore) ForkHistoryBranch( 315 ctx context.Context, 316 request *p.InternalForkHistoryBranchRequest, 317 ) error { 318 forkB := request.ForkBranchInfo 319 treeInfoBlob := request.TreeInfo 320 newBranchIdBytes, err := primitives.ParseUUID(request.NewBranchID) 321 if err != nil { 322 return err 323 } 324 treeIDBytes, err := primitives.ParseUUID(forkB.GetTreeId()) 325 if err != nil { 326 return err 327 } 328 329 row := &sqlplugin.HistoryTreeRow{ 330 ShardID: request.ShardID, 331 TreeID: treeIDBytes, 332 BranchID: newBranchIdBytes, 333 Data: treeInfoBlob.Data, 334 DataEncoding: treeInfoBlob.EncodingType.String(), 335 } 336 337 result, err := m.Db.InsertIntoHistoryTree(ctx, row) 338 if err != nil { 339 return err 340 } 341 342 rowsAffected, err := result.RowsAffected() 343 if err != nil { 344 return err 345 } 346 if !(rowsAffected == 1 || rowsAffected == 2) { 347 return fmt.Errorf("expected 1 or 2 row to be affected for tree table, got %v", rowsAffected) 348 } 349 return nil 350 } 351 352 // DeleteHistoryBranch removes a branch 353 func (m *sqlExecutionStore) DeleteHistoryBranch( 354 ctx context.Context, 355 request *p.InternalDeleteHistoryBranchRequest, 356 ) error { 357 branchIDBytes, err := primitives.ParseUUID(request.BranchInfo.BranchId) 358 if err != nil { 359 return err 360 } 361 treeIDBytes, err := primitives.ParseUUID(request.BranchInfo.TreeId) 362 if err != nil { 363 return err 364 } 365 366 return m.txExecute(ctx, "DeleteHistoryBranch", func(tx sqlplugin.Tx) error { 367 _, err = tx.DeleteFromHistoryTree(ctx, sqlplugin.HistoryTreeDeleteFilter{ 368 TreeID: treeIDBytes, 369 BranchID: branchIDBytes, 370 ShardID: request.ShardID, 371 }) 372 if err != nil { 373 return err 374 } 375 376 // delete each branch range 377 for _, br := range request.BranchRanges { 378 branchIDBytes, err := primitives.ParseUUID(br.BranchId) 379 if err != nil { 380 return err 381 } 382 383 deleteFilter := sqlplugin.HistoryNodeDeleteFilter{ 384 ShardID: request.ShardID, 385 TreeID: treeIDBytes, 386 BranchID: branchIDBytes, 387 MinNodeID: br.BeginNodeId, 388 } 389 _, err = tx.RangeDeleteFromHistoryNode(ctx, deleteFilter) 390 if err != nil { 391 return err 392 } 393 } 394 return nil 395 }) 396 } 397 398 // getAllHistoryTreeBranchesPaginationToken represents the primary key of the latest row in the history_tree table that 399 // we returned. 400 type getAllHistoryTreeBranchesPaginationToken struct { 401 ShardID int32 402 TreeID primitives.UUID 403 BranchID primitives.UUID 404 } 405 406 func (m *sqlExecutionStore) GetAllHistoryTreeBranches( 407 ctx context.Context, 408 request *p.GetAllHistoryTreeBranchesRequest, 409 ) (*p.InternalGetAllHistoryTreeBranchesResponse, error) { 410 pageSize := request.PageSize 411 if pageSize <= 0 { 412 return nil, fmt.Errorf("PageSize must be greater than 0, but was %d", pageSize) 413 } 414 415 page := sqlplugin.HistoryTreeBranchPage{ 416 Limit: pageSize, 417 } 418 if len(request.NextPageToken) != 0 { 419 var token getAllHistoryTreeBranchesPaginationToken 420 if err := json.Unmarshal(request.NextPageToken, &token); err != nil { 421 return nil, err 422 } 423 page.ShardID = token.ShardID 424 page.TreeID = token.TreeID 425 page.BranchID = token.BranchID 426 } 427 428 rows, err := m.Db.PaginateBranchesFromHistoryTree(ctx, page) 429 if err != nil { 430 return nil, err 431 } 432 branches := make([]p.InternalHistoryBranchDetail, 0, pageSize) 433 for _, row := range rows { 434 branch := p.InternalHistoryBranchDetail{ 435 TreeID: row.TreeID.String(), 436 BranchID: row.BranchID.String(), 437 Data: row.Data, 438 Encoding: row.DataEncoding, 439 } 440 branches = append(branches, branch) 441 } 442 443 response := &p.InternalGetAllHistoryTreeBranchesResponse{ 444 Branches: branches, 445 } 446 if len(branches) < pageSize { 447 // no next page token because there are no more results 448 return response, nil 449 } 450 451 // if we filled the page with rows, then set the next page token 452 lastRow := rows[len(rows)-1] 453 token := getAllHistoryTreeBranchesPaginationToken{ 454 ShardID: lastRow.ShardID, 455 TreeID: lastRow.TreeID, 456 BranchID: lastRow.BranchID, 457 } 458 tokenBytes, err := json.Marshal(token) 459 if err != nil { 460 return nil, err 461 } 462 response.NextPageToken = tokenBytes 463 return response, nil 464 } 465 466 // GetHistoryTree returns all branch information of a tree 467 func (m *sqlExecutionStore) GetHistoryTree( 468 ctx context.Context, 469 request *p.GetHistoryTreeRequest, 470 ) (*p.InternalGetHistoryTreeResponse, error) { 471 treeID, err := primitives.ParseUUID(request.TreeID) 472 if err != nil { 473 return nil, err 474 } 475 476 rows, err := m.Db.SelectFromHistoryTree(ctx, sqlplugin.HistoryTreeSelectFilter{ 477 TreeID: treeID, 478 ShardID: request.ShardID, 479 }) 480 if err == sql.ErrNoRows || (err == nil && len(rows) == 0) { 481 return &p.InternalGetHistoryTreeResponse{}, nil 482 } 483 treeInfos := make([]*commonpb.DataBlob, 0, len(rows)) 484 for _, row := range rows { 485 treeInfos = append(treeInfos, p.NewDataBlob(row.Data, row.DataEncoding)) 486 } 487 488 return &p.InternalGetHistoryTreeResponse{ 489 TreeInfos: treeInfos, 490 }, nil 491 }