github.com/pingcap/tidb-lightning@v5.0.0-rc.0.20210428090220-84b649866577+incompatible/lightning/web/progress.go (about) 1 package web 2 3 import ( 4 "encoding/json" 5 "sync" 6 7 "github.com/pingcap/errors" 8 9 "github.com/pingcap/tidb-lightning/lightning/checkpoints" 10 "github.com/pingcap/tidb-lightning/lightning/common" 11 "github.com/pingcap/tidb-lightning/lightning/mydump" 12 ) 13 14 // checkpointsMap is a concurrent map (table name → checkpoints). 15 // 16 // Implementation note: Currently the checkpointsMap is only written from a 17 // single goroutine inside (*RestoreController).listenCheckpointUpdates(), so 18 // all writes are going to be single threaded. Writing to checkpoint is not 19 // considered performance critical. The map can be read from any HTTP connection 20 // goroutine. Therefore, we simply implement the concurrent map using a single 21 // RWMutex. We may switch to more complicated data structure if contention is 22 // shown to be a problem. 23 // 24 // Do not implement this using a sync.Map, its mutex can't protect the content 25 // of a pointer. 26 type checkpointsMap struct { 27 mu sync.RWMutex 28 checkpoints map[string]*checkpoints.TableCheckpoint 29 } 30 31 func makeCheckpointsMap() (res checkpointsMap) { 32 res.checkpoints = make(map[string]*checkpoints.TableCheckpoint) 33 return 34 } 35 36 func (cpm *checkpointsMap) clear() { 37 cpm.mu.Lock() 38 cpm.checkpoints = make(map[string]*checkpoints.TableCheckpoint) 39 cpm.mu.Unlock() 40 } 41 42 func (cpm *checkpointsMap) insert(key string, cp *checkpoints.TableCheckpoint) { 43 cpm.mu.Lock() 44 cpm.checkpoints[key] = cp 45 cpm.mu.Unlock() 46 } 47 48 type totalWritten struct { 49 key string 50 totalWritten int64 51 } 52 53 func (cpm *checkpointsMap) update(diffs map[string]*checkpoints.TableCheckpointDiff) []totalWritten { 54 totalWrittens := make([]totalWritten, 0, len(diffs)) 55 56 cpm.mu.Lock() 57 defer cpm.mu.Unlock() 58 59 for key, diff := range diffs { 60 cp := cpm.checkpoints[key] 61 cp.Apply(diff) 62 63 tw := int64(0) 64 for _, engine := range cp.Engines { 65 for _, chunk := range engine.Chunks { 66 if engine.Status >= checkpoints.CheckpointStatusAllWritten { 67 tw += chunk.Chunk.EndOffset - chunk.Key.Offset 68 } else { 69 tw += chunk.Chunk.Offset - chunk.Key.Offset 70 } 71 } 72 } 73 totalWrittens = append(totalWrittens, totalWritten{key: key, totalWritten: tw}) 74 } 75 return totalWrittens 76 } 77 78 func (cpm *checkpointsMap) marshal(key string) ([]byte, error) { 79 cpm.mu.RLock() 80 defer cpm.mu.RUnlock() 81 82 if cp, ok := cpm.checkpoints[key]; ok { 83 return json.Marshal(cp) 84 } 85 return nil, errors.NotFoundf("table %s", key) 86 } 87 88 type taskStatus uint8 89 90 const ( 91 taskStatusNotStarted taskStatus = 0 92 taskStatusRunning = 1 93 taskStatusCompleted = 2 94 ) 95 96 type tableInfo struct { 97 TotalWritten int64 `json:"w"` 98 TotalSize int64 `json:"z"` 99 Status taskStatus `json:"s"` 100 Message string `json:"m,omitempty"` 101 } 102 103 type taskProgress struct { 104 mu sync.RWMutex 105 Tables map[string]*tableInfo `json:"t"` 106 Status taskStatus `json:"s"` 107 Message string `json:"m,omitempty"` 108 109 // The contents have their own mutex for protection 110 checkpoints checkpointsMap 111 } 112 113 var currentProgress = taskProgress{ 114 checkpoints: makeCheckpointsMap(), 115 } 116 117 func BroadcastStartTask() { 118 currentProgress.mu.Lock() 119 currentProgress.Status = taskStatusRunning 120 currentProgress.mu.Unlock() 121 122 currentProgress.checkpoints.clear() 123 } 124 125 func BroadcastEndTask(err error) { 126 errString := errors.ErrorStack(err) 127 128 currentProgress.mu.Lock() 129 currentProgress.Status = taskStatusCompleted 130 currentProgress.Message = errString 131 currentProgress.mu.Unlock() 132 } 133 134 func BroadcastInitProgress(databases []*mydump.MDDatabaseMeta) { 135 tables := make(map[string]*tableInfo, len(databases)) 136 137 for _, db := range databases { 138 for _, tbl := range db.Tables { 139 name := common.UniqueTable(db.Name, tbl.Name) 140 tables[name] = &tableInfo{TotalSize: tbl.TotalSize} 141 } 142 } 143 144 currentProgress.mu.Lock() 145 currentProgress.Tables = tables 146 currentProgress.mu.Unlock() 147 } 148 149 func BroadcastTableCheckpoint(tableName string, cp *checkpoints.TableCheckpoint) { 150 currentProgress.mu.Lock() 151 currentProgress.Tables[tableName].Status = taskStatusRunning 152 currentProgress.mu.Unlock() 153 154 // create a deep copy to avoid false sharing 155 currentProgress.checkpoints.insert(tableName, cp.DeepCopy()) 156 } 157 158 func BroadcastCheckpointDiff(diffs map[string]*checkpoints.TableCheckpointDiff) { 159 totalWrittens := currentProgress.checkpoints.update(diffs) 160 161 currentProgress.mu.Lock() 162 for _, tw := range totalWrittens { 163 currentProgress.Tables[tw.key].TotalWritten = tw.totalWritten 164 } 165 currentProgress.mu.Unlock() 166 } 167 168 func BroadcastError(tableName string, err error) { 169 errString := errors.ErrorStack(err) 170 171 currentProgress.mu.Lock() 172 if tbl := currentProgress.Tables[tableName]; tbl != nil { 173 tbl.Status = taskStatusCompleted 174 tbl.Message = errString 175 } 176 currentProgress.mu.Unlock() 177 } 178 179 func MarshalTaskProgress() ([]byte, error) { 180 currentProgress.mu.RLock() 181 defer currentProgress.mu.RUnlock() 182 return json.Marshal(¤tProgress) 183 } 184 185 func MarshalTableCheckpoints(tableName string) ([]byte, error) { 186 return currentProgress.checkpoints.marshal(tableName) 187 }