github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/dashboard/log.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 19:16:37</date> 10 //</624450087350702080> 11 12 13 package dashboard 14 15 import ( 16 "bytes" 17 "encoding/json" 18 "io/ioutil" 19 "os" 20 "path/filepath" 21 "regexp" 22 "sort" 23 "time" 24 25 "github.com/ethereum/go-ethereum/log" 26 "github.com/mohae/deepcopy" 27 "github.com/rjeczalik/notify" 28 ) 29 30 var emptyChunk = json.RawMessage("[]") 31 32 //preplogs从给定的日志记录缓冲区创建一个JSON数组。 33 //返回准备好的数组和最后一个'\n’的位置 34 //原始缓冲区中的字符,如果不包含任何字符,则为-1。 35 func prepLogs(buf []byte) (json.RawMessage, int) { 36 b := make(json.RawMessage, 1, len(buf)+1) 37 b[0] = '[' 38 b = append(b, buf...) 39 last := -1 40 for i := 1; i < len(b); i++ { 41 if b[i] == '\n' { 42 b[i] = ',' 43 last = i 44 } 45 } 46 if last < 0 { 47 return emptyChunk, -1 48 } 49 b[last] = ']' 50 return b[:last+1], last - 1 51 } 52 53 //handleLogRequest searches for the log file specified by the timestamp of the 54 //请求,从中创建一个JSON数组并将其发送到请求客户机。 55 func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) { 56 files, err := ioutil.ReadDir(db.logdir) 57 if err != nil { 58 log.Warn("Failed to open logdir", "path", db.logdir, "err", err) 59 return 60 } 61 re := regexp.MustCompile(`\.log$`) 62 fileNames := make([]string, 0, len(files)) 63 for _, f := range files { 64 if f.Mode().IsRegular() && re.MatchString(f.Name()) { 65 fileNames = append(fileNames, f.Name()) 66 } 67 } 68 if len(fileNames) < 1 { 69 log.Warn("No log files in logdir", "path", db.logdir) 70 return 71 } 72 idx := sort.Search(len(fileNames), func(idx int) bool { 73 //返回最小的索引,如文件名[idx]>=r.name, 74 //if there is no such index, returns n. 75 return fileNames[idx] >= r.Name 76 }) 77 78 switch { 79 case idx < 0: 80 return 81 case idx == 0 && r.Past: 82 return 83 case idx >= len(fileNames): 84 return 85 case r.Past: 86 idx-- 87 case idx == len(fileNames)-1 && fileNames[idx] == r.Name: 88 return 89 case idx == len(fileNames)-1 || (idx == len(fileNames)-2 && fileNames[idx] == r.Name): 90 //最后一个文件会不断更新,其块会被流式处理, 91 //因此,为了避免在客户机端复制日志记录,需要 92 //handled differently. Its actual content is always saved in the history. 93 db.lock.Lock() 94 if db.history.Logs != nil { 95 c.msg <- &Message{ 96 Logs: db.history.Logs, 97 } 98 } 99 db.lock.Unlock() 100 return 101 case fileNames[idx] == r.Name: 102 idx++ 103 } 104 105 path := filepath.Join(db.logdir, fileNames[idx]) 106 var buf []byte 107 if buf, err = ioutil.ReadFile(path); err != nil { 108 log.Warn("Failed to read file", "path", path, "err", err) 109 return 110 } 111 chunk, end := prepLogs(buf) 112 if end < 0 { 113 log.Warn("The file doesn't contain valid logs", "path", path) 114 return 115 } 116 c.msg <- &Message{ 117 Logs: &LogsMessage{ 118 Source: &LogFile{ 119 Name: fileNames[idx], 120 Last: r.Past && idx == 0, 121 }, 122 Chunk: chunk, 123 }, 124 } 125 } 126 127 //streamlogs监视文件系统,并在记录器写入时 128 //新的日志记录到文件中,收集它们,然后 129 //从中取出JSON数组并将其发送到客户机。 130 func (db *Dashboard) streamLogs() { 131 defer db.wg.Done() 132 var ( 133 err error 134 errc chan error 135 ) 136 defer func() { 137 if errc == nil { 138 errc = <-db.quit 139 } 140 errc <- err 141 }() 142 143 files, err := ioutil.ReadDir(db.logdir) 144 if err != nil { 145 log.Warn("Failed to open logdir", "path", db.logdir, "err", err) 146 return 147 } 148 var ( 149 opened *os.File //打开的活动日志文件的文件描述符。 150 buf []byte //包含最近写入的日志块,但尚未发送到客户端。 151 ) 152 153 //由于时间戳的存在,日志记录总是按字母顺序写入最后一个文件。 154 re := regexp.MustCompile(`\.log$`) 155 i := len(files) - 1 156 for i >= 0 && (!files[i].Mode().IsRegular() || !re.MatchString(files[i].Name())) { 157 i-- 158 } 159 if i < 0 { 160 log.Warn("No log files in logdir", "path", db.logdir) 161 return 162 } 163 if opened, err = os.OpenFile(filepath.Join(db.logdir, files[i].Name()), os.O_RDONLY, 0600); err != nil { 164 log.Warn("Failed to open file", "name", files[i].Name(), "err", err) 165 return 166 } 167 defer opened.Close() //关闭最后打开的文件。 168 fi, err := opened.Stat() 169 if err != nil { 170 log.Warn("Problem with file", "name", opened.Name(), "err", err) 171 return 172 } 173 db.lock.Lock() 174 db.history.Logs = &LogsMessage{ 175 Source: &LogFile{ 176 Name: fi.Name(), 177 Last: true, 178 }, 179 Chunk: emptyChunk, 180 } 181 db.lock.Unlock() 182 183 watcher := make(chan notify.EventInfo, 10) 184 if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil { 185 log.Warn("Failed to create file system watcher", "err", err) 186 return 187 } 188 defer notify.Stop(watcher) 189 190 ticker := time.NewTicker(db.config.Refresh) 191 defer ticker.Stop() 192 193 loop: 194 for err == nil || errc == nil { 195 select { 196 case event := <-watcher: 197 //确保创建了新的日志文件。 198 if !re.Match([]byte(event.Path())) { 199 break 200 } 201 if opened == nil { 202 log.Warn("The last log file is not opened") 203 break loop 204 } 205 //新日志文件的名称总是更大, 206 //因为它是使用实际日志记录的时间创建的。 207 if opened.Name() >= event.Path() { 208 break 209 } 210 //读取以前打开的文件的其余部分。 211 chunk, err := ioutil.ReadAll(opened) 212 if err != nil { 213 log.Warn("Failed to read file", "name", opened.Name(), "err", err) 214 break loop 215 } 216 buf = append(buf, chunk...) 217 opened.Close() 218 219 if chunk, last := prepLogs(buf); last >= 0 { 220 //发送以前打开的文件的其余部分。 221 db.sendToAll(&Message{ 222 Logs: &LogsMessage{ 223 Chunk: chunk, 224 }, 225 }) 226 } 227 if opened, err = os.OpenFile(event.Path(), os.O_RDONLY, 0644); err != nil { 228 log.Warn("Failed to open file", "name", event.Path(), "err", err) 229 break loop 230 } 231 buf = buf[:0] 232 233 //Change the last file in the history. 234 fi, err := opened.Stat() 235 if err != nil { 236 log.Warn("Problem with file", "name", opened.Name(), "err", err) 237 break loop 238 } 239 db.lock.Lock() 240 db.history.Logs.Source.Name = fi.Name() 241 db.history.Logs.Chunk = emptyChunk 242 db.lock.Unlock() 243 case <-ticker.C: //向客户端发送日志更新。 244 if opened == nil { 245 log.Warn("The last log file is not opened") 246 break loop 247 } 248 //读取自上次读取以来创建的新日志。 249 chunk, err := ioutil.ReadAll(opened) 250 if err != nil { 251 log.Warn("Failed to read file", "name", opened.Name(), "err", err) 252 break loop 253 } 254 b := append(buf, chunk...) 255 256 chunk, last := prepLogs(b) 257 if last < 0 { 258 break 259 } 260 //Only keep the invalid part of the buffer, which can be valid after the next read. 261 buf = b[last+1:] 262 263 var l *LogsMessage 264 //更新历史记录。 265 db.lock.Lock() 266 if bytes.Equal(db.history.Logs.Chunk, emptyChunk) { 267 db.history.Logs.Chunk = chunk 268 l = deepcopy.Copy(db.history.Logs).(*LogsMessage) 269 } else { 270 b = make([]byte, len(db.history.Logs.Chunk)+len(chunk)-1) 271 copy(b, db.history.Logs.Chunk) 272 b[len(db.history.Logs.Chunk)-1] = ',' 273 copy(b[len(db.history.Logs.Chunk):], chunk[1:]) 274 db.history.Logs.Chunk = b 275 l = &LogsMessage{Chunk: chunk} 276 } 277 db.lock.Unlock() 278 279 db.sendToAll(&Message{Logs: l}) 280 case errc = <-db.quit: 281 break loop 282 } 283 } 284 } 285