github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/lightning/web/progress.go (about) 1 package web 2 3 import ( 4 "encoding/json" 5 "sync" 6 7 "github.com/pingcap/errors" 8 9 "github.com/pingcap/br/pkg/lightning/checkpoints" 10 "github.com/pingcap/br/pkg/lightning/common" 11 "github.com/pingcap/br/pkg/lightning/mydump" 12 ) 13 14 // checkpointsMap is a concurrent map (table name → checkpoints). 15 // 16 // Implementation note: Currently the checkpointsMap is only written from a 17 // single goroutine inside (*RestoreController).listenCheckpointUpdates(), so 18 // all writes are going to be single threaded. Writing to checkpoint is not 19 // considered performance critical. The map can be read from any HTTP connection 20 // goroutine. Therefore, we simply implement the concurrent map using a single 21 // RWMutex. We may switch to more complicated data structure if contention is 22 // shown to be a problem. 23 // 24 // Do not implement this using a sync.Map, its mutex can't protect the content 25 // of a pointer. 26 type checkpointsMap struct { 27 mu sync.RWMutex 28 checkpoints map[string]*checkpoints.TableCheckpoint 29 } 30 31 func makeCheckpointsMap() (res checkpointsMap) { 32 res.checkpoints = make(map[string]*checkpoints.TableCheckpoint) 33 return 34 } 35 36 func (cpm *checkpointsMap) clear() { 37 cpm.mu.Lock() 38 cpm.checkpoints = make(map[string]*checkpoints.TableCheckpoint) 39 cpm.mu.Unlock() 40 } 41 42 func (cpm *checkpointsMap) insert(key string, cp *checkpoints.TableCheckpoint) { 43 cpm.mu.Lock() 44 cpm.checkpoints[key] = cp 45 cpm.mu.Unlock() 46 } 47 48 type totalWritten struct { 49 key string 50 totalWritten int64 51 } 52 53 func (cpm *checkpointsMap) update(diffs map[string]*checkpoints.TableCheckpointDiff) []totalWritten { 54 totalWrittens := make([]totalWritten, 0, len(diffs)) 55 56 cpm.mu.Lock() 57 defer cpm.mu.Unlock() 58 59 for key, diff := range diffs { 60 cp := cpm.checkpoints[key] 61 cp.Apply(diff) 62 63 tw := int64(0) 64 for _, engine := range cp.Engines { 65 for _, chunk := range engine.Chunks { 66 if engine.Status >= checkpoints.CheckpointStatusAllWritten { 67 tw += chunk.Chunk.EndOffset - chunk.Key.Offset 68 } else { 69 tw += chunk.Chunk.Offset - chunk.Key.Offset 70 } 71 } 72 } 73 totalWrittens = append(totalWrittens, totalWritten{key: key, totalWritten: tw}) 74 } 75 return totalWrittens 76 } 77 78 func (cpm *checkpointsMap) marshal(key string) ([]byte, error) { 79 cpm.mu.RLock() 80 defer cpm.mu.RUnlock() 81 82 if cp, ok := cpm.checkpoints[key]; ok { 83 return json.Marshal(cp) 84 } 85 return nil, errors.NotFoundf("table %s", key) 86 } 87 88 type taskStatus uint8 89 90 const ( 91 taskStatusRunning taskStatus = 1 92 taskStatusCompleted taskStatus = 2 93 ) 94 95 type tableInfo struct { 96 TotalWritten int64 `json:"w"` 97 TotalSize int64 `json:"z"` 98 Status taskStatus `json:"s"` 99 Message string `json:"m,omitempty"` 100 } 101 102 type taskProgress struct { 103 mu sync.RWMutex 104 Tables map[string]*tableInfo `json:"t"` 105 Status taskStatus `json:"s"` 106 Message string `json:"m,omitempty"` 107 108 // The contents have their own mutex for protection 109 checkpoints checkpointsMap 110 } 111 112 var currentProgress = taskProgress{ 113 checkpoints: makeCheckpointsMap(), 114 } 115 116 func BroadcastStartTask() { 117 currentProgress.mu.Lock() 118 currentProgress.Status = taskStatusRunning 119 currentProgress.mu.Unlock() 120 121 currentProgress.checkpoints.clear() 122 } 123 124 func BroadcastEndTask(err error) { 125 errString := errors.ErrorStack(err) 126 127 currentProgress.mu.Lock() 128 currentProgress.Status = taskStatusCompleted 129 currentProgress.Message = errString 130 currentProgress.mu.Unlock() 131 } 132 133 func BroadcastInitProgress(databases []*mydump.MDDatabaseMeta) { 134 tables := make(map[string]*tableInfo, len(databases)) 135 136 for _, db := range databases { 137 for _, tbl := range db.Tables { 138 name := common.UniqueTable(db.Name, tbl.Name) 139 tables[name] = &tableInfo{TotalSize: tbl.TotalSize} 140 } 141 } 142 143 currentProgress.mu.Lock() 144 currentProgress.Tables = tables 145 currentProgress.mu.Unlock() 146 } 147 148 func BroadcastTableCheckpoint(tableName string, cp *checkpoints.TableCheckpoint) { 149 currentProgress.mu.Lock() 150 currentProgress.Tables[tableName].Status = taskStatusRunning 151 currentProgress.mu.Unlock() 152 153 // create a deep copy to avoid false sharing 154 currentProgress.checkpoints.insert(tableName, cp.DeepCopy()) 155 } 156 157 func BroadcastCheckpointDiff(diffs map[string]*checkpoints.TableCheckpointDiff) { 158 totalWrittens := currentProgress.checkpoints.update(diffs) 159 160 currentProgress.mu.Lock() 161 for _, tw := range totalWrittens { 162 currentProgress.Tables[tw.key].TotalWritten = tw.totalWritten 163 } 164 currentProgress.mu.Unlock() 165 } 166 167 func BroadcastError(tableName string, err error) { 168 errString := errors.ErrorStack(err) 169 170 currentProgress.mu.Lock() 171 if tbl := currentProgress.Tables[tableName]; tbl != nil { 172 tbl.Status = taskStatusCompleted 173 tbl.Message = errString 174 } 175 currentProgress.mu.Unlock() 176 } 177 178 func MarshalTaskProgress() ([]byte, error) { 179 currentProgress.mu.RLock() 180 defer currentProgress.mu.RUnlock() 181 return json.Marshal(¤tProgress) 182 } 183 184 func MarshalTableCheckpoints(tableName string) ([]byte, error) { 185 return currentProgress.checkpoints.marshal(tableName) 186 }