go.temporal.io/server@v1.23.0/common/persistence/sql/sqlplugin/tests/history_replication_task_dlq.go (about) 1 // The MIT License 2 // 3 // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. 4 // 5 // Copyright (c) 2020 Uber Technologies, Inc. 6 // 7 // Permission is hereby granted, free of charge, to any person obtaining a copy 8 // of this software and associated documentation files (the "Software"), to deal 9 // in the Software without restriction, including without limitation the rights 10 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 // copies of the Software, and to permit persons to whom the Software is 12 // furnished to do so, subject to the following conditions: 13 // 14 // The above copyright notice and this permission notice shall be included in 15 // all copies or substantial portions of the Software. 16 // 17 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 // THE SOFTWARE. 24 25 package tests 26 27 import ( 28 "math/rand" 29 "testing" 30 31 "github.com/stretchr/testify/require" 32 "github.com/stretchr/testify/suite" 33 34 "go.temporal.io/server/common/persistence/sql/sqlplugin" 35 "go.temporal.io/server/common/shuffle" 36 ) 37 38 type ( 39 historyHistoryReplicationDLQTaskSuite struct { 40 suite.Suite 41 *require.Assertions 42 43 store sqlplugin.HistoryReplicationDLQTask 44 } 45 ) 46 47 const ( 48 testHistoryReplicationTaskDLQSourceCluster = "random history replication task DLQ source cluster" 49 50 testHistoryReplicationTaskDLQEncoding = "random encoding" 51 ) 52 53 var ( 54 testHistoryReplicationTaskDLQData = []byte("random history replication task data") 55 ) 56 57 func NewHistoryReplicationDLQTaskSuite( 58 t *testing.T, 59 store sqlplugin.HistoryReplicationDLQTask, 60 ) *historyHistoryReplicationDLQTaskSuite { 61 return &historyHistoryReplicationDLQTaskSuite{ 62 Assertions: require.New(t), 63 store: store, 64 } 65 } 66 67 func (s *historyHistoryReplicationDLQTaskSuite) SetupSuite() { 68 69 } 70 71 func (s *historyHistoryReplicationDLQTaskSuite) TearDownSuite() { 72 73 } 74 75 func (s *historyHistoryReplicationDLQTaskSuite) SetupTest() { 76 s.Assertions = require.New(s.T()) 77 } 78 79 func (s *historyHistoryReplicationDLQTaskSuite) TearDownTest() { 80 81 } 82 83 func (s *historyHistoryReplicationDLQTaskSuite) TestInsert_Single_Success() { 84 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 85 shardID := rand.Int31() 86 taskID := int64(1) 87 88 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 89 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task}) 90 s.NoError(err) 91 rowsAffected, err := result.RowsAffected() 92 s.NoError(err) 93 s.Equal(1, int(rowsAffected)) 94 } 95 96 func (s *historyHistoryReplicationDLQTaskSuite) TestInsert_Multiple_Success() { 97 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 98 shardID := rand.Int31() 99 taskID := int64(1) 100 101 task1 := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 102 taskID++ 103 task2 := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 104 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task1, task2}) 105 s.NoError(err) 106 rowsAffected, err := result.RowsAffected() 107 s.NoError(err) 108 s.Equal(2, int(rowsAffected)) 109 } 110 111 func (s *historyHistoryReplicationDLQTaskSuite) TestInsert_Single_Fail_Duplicate() { 112 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 113 shardID := rand.Int31() 114 taskID := int64(1) 115 116 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 117 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task}) 118 s.NoError(err) 119 rowsAffected, err := result.RowsAffected() 120 s.NoError(err) 121 s.Equal(1, int(rowsAffected)) 122 123 task = s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 124 _, err = s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task}) 125 s.Error(err) // TODO persistence layer should do proper error translation 126 } 127 128 func (s *historyHistoryReplicationDLQTaskSuite) TestInsert_Multiple_Fail_Duplicate() { 129 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 130 shardID := rand.Int31() 131 taskID := int64(1) 132 133 task1 := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 134 taskID++ 135 task2 := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 136 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task1, task2}) 137 s.NoError(err) 138 rowsAffected, err := result.RowsAffected() 139 s.NoError(err) 140 s.Equal(2, int(rowsAffected)) 141 142 task2 = s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 143 taskID++ 144 task3 := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 145 _, err = s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task2, task3}) 146 s.Error(err) // TODO persistence layer should do proper error translation 147 } 148 149 func (s *historyHistoryReplicationDLQTaskSuite) TestInsertSelect_Single() { 150 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 151 shardID := rand.Int31() 152 taskID := int64(1) 153 154 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 155 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task}) 156 s.NoError(err) 157 rowsAffected, err := result.RowsAffected() 158 s.NoError(err) 159 s.Equal(1, int(rowsAffected)) 160 161 rangeFilter := sqlplugin.ReplicationDLQTasksRangeFilter{ 162 ShardID: shardID, 163 SourceClusterName: sourceCluster, 164 InclusiveMinTaskID: taskID, 165 ExclusiveMaxTaskID: taskID + 1, 166 PageSize: 1, 167 } 168 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), rangeFilter) 169 s.NoError(err) 170 for index := range rows { 171 rows[index].ShardID = shardID 172 rows[index].SourceClusterName = sourceCluster 173 } 174 s.Equal([]sqlplugin.ReplicationDLQTasksRow{task}, rows) 175 } 176 177 func (s *historyHistoryReplicationDLQTaskSuite) TestInsertSelect_Multiple() { 178 numTasks := 20 179 pageSize := numTasks * 2 180 181 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 182 shardID := rand.Int31() 183 minTaskID := int64(1) 184 taskID := minTaskID 185 maxTaskID := taskID + int64(numTasks) 186 187 var tasks []sqlplugin.ReplicationDLQTasksRow 188 for i := 0; i < numTasks; i++ { 189 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 190 taskID++ 191 tasks = append(tasks, task) 192 } 193 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), tasks) 194 s.NoError(err) 195 rowsAffected, err := result.RowsAffected() 196 s.NoError(err) 197 s.Equal(numTasks, int(rowsAffected)) 198 199 filter := sqlplugin.ReplicationDLQTasksRangeFilter{ 200 ShardID: shardID, 201 SourceClusterName: sourceCluster, 202 InclusiveMinTaskID: minTaskID, 203 ExclusiveMaxTaskID: maxTaskID, 204 PageSize: pageSize, 205 } 206 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), filter) 207 s.NoError(err) 208 for index := range rows { 209 rows[index].ShardID = shardID 210 rows[index].SourceClusterName = sourceCluster 211 } 212 s.Equal(tasks, rows) 213 } 214 215 func (s *historyHistoryReplicationDLQTaskSuite) TestDeleteSelect_Single() { 216 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 217 shardID := rand.Int31() 218 taskID := int64(1) 219 220 filter := sqlplugin.ReplicationDLQTasksFilter{ 221 ShardID: shardID, 222 SourceClusterName: sourceCluster, 223 TaskID: taskID, 224 } 225 result, err := s.store.DeleteFromReplicationDLQTasks(newExecutionContext(), filter) 226 s.NoError(err) 227 rowsAffected, err := result.RowsAffected() 228 s.NoError(err) 229 s.Equal(0, int(rowsAffected)) 230 231 rangeFilter := sqlplugin.ReplicationDLQTasksRangeFilter{ 232 ShardID: shardID, 233 SourceClusterName: sourceCluster, 234 InclusiveMinTaskID: taskID, 235 ExclusiveMaxTaskID: taskID + 1, 236 PageSize: 1, 237 } 238 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), rangeFilter) 239 s.NoError(err) 240 for index := range rows { 241 rows[index].ShardID = shardID 242 rows[index].SourceClusterName = sourceCluster 243 } 244 s.Equal([]sqlplugin.ReplicationDLQTasksRow(nil), rows) 245 } 246 247 func (s *historyHistoryReplicationDLQTaskSuite) TestDeleteSelect_Multiple() { 248 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 249 shardID := rand.Int31() 250 minTaskID := int64(1) 251 maxTaskID := int64(101) 252 253 filter := sqlplugin.ReplicationDLQTasksRangeFilter{ 254 ShardID: shardID, 255 SourceClusterName: sourceCluster, 256 InclusiveMinTaskID: minTaskID, 257 ExclusiveMaxTaskID: maxTaskID, 258 PageSize: 0, 259 } 260 result, err := s.store.RangeDeleteFromReplicationDLQTasks(newExecutionContext(), filter) 261 s.NoError(err) 262 rowsAffected, err := result.RowsAffected() 263 s.NoError(err) 264 s.Equal(0, int(rowsAffected)) 265 266 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), filter) 267 s.NoError(err) 268 for index := range rows { 269 rows[index].ShardID = shardID 270 rows[index].SourceClusterName = sourceCluster 271 } 272 s.Equal([]sqlplugin.ReplicationDLQTasksRow(nil), rows) 273 } 274 275 func (s *historyHistoryReplicationDLQTaskSuite) TestInsertDeleteSelect_Single() { 276 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 277 shardID := rand.Int31() 278 taskID := int64(1) 279 280 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 281 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), []sqlplugin.ReplicationDLQTasksRow{task}) 282 s.NoError(err) 283 rowsAffected, err := result.RowsAffected() 284 s.NoError(err) 285 s.Equal(1, int(rowsAffected)) 286 287 filter := sqlplugin.ReplicationDLQTasksFilter{ 288 ShardID: shardID, 289 SourceClusterName: sourceCluster, 290 TaskID: taskID, 291 } 292 result, err = s.store.DeleteFromReplicationDLQTasks(newExecutionContext(), filter) 293 s.NoError(err) 294 rowsAffected, err = result.RowsAffected() 295 s.NoError(err) 296 s.Equal(1, int(rowsAffected)) 297 298 rangeFilter := sqlplugin.ReplicationDLQTasksRangeFilter{ 299 ShardID: shardID, 300 SourceClusterName: sourceCluster, 301 InclusiveMinTaskID: taskID, 302 ExclusiveMaxTaskID: taskID + 1, 303 PageSize: 1, 304 } 305 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), rangeFilter) 306 s.NoError(err) 307 for index := range rows { 308 rows[index].ShardID = shardID 309 rows[index].SourceClusterName = sourceCluster 310 } 311 s.Equal([]sqlplugin.ReplicationDLQTasksRow(nil), rows) 312 } 313 314 func (s *historyHistoryReplicationDLQTaskSuite) TestInsertDeleteSelect_Multiple() { 315 numTasks := 20 316 pageSize := numTasks * 2 317 318 sourceCluster := shuffle.String(testHistoryReplicationTaskDLQSourceCluster) 319 shardID := rand.Int31() 320 minTaskID := int64(1) 321 taskID := minTaskID 322 maxTaskID := taskID + int64(numTasks) 323 324 var tasks []sqlplugin.ReplicationDLQTasksRow 325 for i := 0; i < numTasks; i++ { 326 task := s.newRandomReplicationTasksDLQRow(sourceCluster, shardID, taskID) 327 taskID++ 328 tasks = append(tasks, task) 329 } 330 result, err := s.store.InsertIntoReplicationDLQTasks(newExecutionContext(), tasks) 331 s.NoError(err) 332 rowsAffected, err := result.RowsAffected() 333 s.NoError(err) 334 s.Equal(numTasks, int(rowsAffected)) 335 336 filter := sqlplugin.ReplicationDLQTasksRangeFilter{ 337 ShardID: shardID, 338 SourceClusterName: sourceCluster, 339 InclusiveMinTaskID: minTaskID, 340 ExclusiveMaxTaskID: maxTaskID, 341 PageSize: pageSize, 342 } 343 result, err = s.store.RangeDeleteFromReplicationDLQTasks(newExecutionContext(), filter) 344 s.NoError(err) 345 rowsAffected, err = result.RowsAffected() 346 s.NoError(err) 347 s.Equal(numTasks, int(rowsAffected)) 348 349 rows, err := s.store.RangeSelectFromReplicationDLQTasks(newExecutionContext(), filter) 350 s.NoError(err) 351 for index := range rows { 352 rows[index].ShardID = shardID 353 rows[index].SourceClusterName = sourceCluster 354 } 355 s.Equal([]sqlplugin.ReplicationDLQTasksRow(nil), rows) 356 } 357 358 func (s *historyHistoryReplicationDLQTaskSuite) newRandomReplicationTasksDLQRow( 359 sourceClusterName string, 360 shardID int32, 361 taskID int64, 362 ) sqlplugin.ReplicationDLQTasksRow { 363 return sqlplugin.ReplicationDLQTasksRow{ 364 SourceClusterName: sourceClusterName, 365 ShardID: shardID, 366 TaskID: taskID, 367 Data: shuffle.Bytes(testHistoryReplicationTaskDLQData), 368 DataEncoding: testHistoryReplicationTaskDLQEncoding, 369 } 370 }