github.com/chainopen/ethchaincode@v0.0.0-20190924072703-d975acdaa1c6/dashboard/log.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package dashboard 18 19 import ( 20 "bytes" 21 "encoding/json" 22 "io/ioutil" 23 "os" 24 "path/filepath" 25 "regexp" 26 "sort" 27 "time" 28 29 "github.com/ethereum/go-ethereum/log" 30 "github.com/mohae/deepcopy" 31 "github.com/rjeczalik/notify" 32 ) 33 34 var emptyChunk = json.RawMessage("[]") 35 36 // prepLogs creates a JSON array from the given log record buffer. 37 // Returns the prepared array and the position of the last '\n' 38 // character in the original buffer, or -1 if it doesn't contain any. 39 func prepLogs(buf []byte) (json.RawMessage, int) { 40 b := make(json.RawMessage, 1, len(buf)+1) 41 b[0] = '[' 42 b = append(b, buf...) 43 last := -1 44 for i := 1; i < len(b); i++ { 45 if b[i] == '\n' { 46 b[i] = ',' 47 last = i 48 } 49 } 50 if last < 0 { 51 return emptyChunk, -1 52 } 53 b[last] = ']' 54 return b[:last+1], last - 1 55 } 56 57 // handleLogRequest searches for the log file specified by the timestamp of the 58 // request, creates a JSON array out of it and sends it to the requesting client. 59 func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) { 60 files, err := ioutil.ReadDir(db.logdir) 61 if err != nil { 62 log.Warn("Failed to open logdir", "path", db.logdir, "err", err) 63 return 64 } 65 re := regexp.MustCompile(`\.log$`) 66 fileNames := make([]string, 0, len(files)) 67 for _, f := range files { 68 if f.Mode().IsRegular() && re.MatchString(f.Name()) { 69 fileNames = append(fileNames, f.Name()) 70 } 71 } 72 if len(fileNames) < 1 { 73 log.Warn("No log files in logdir", "path", db.logdir) 74 return 75 } 76 idx := sort.Search(len(fileNames), func(idx int) bool { 77 // Returns the smallest index such as fileNames[idx] >= r.Name, 78 // if there is no such index, returns n. 79 return fileNames[idx] >= r.Name 80 }) 81 82 switch { 83 case idx < 0: 84 return 85 case idx == 0 && r.Past: 86 return 87 case idx >= len(fileNames): 88 return 89 case r.Past: 90 idx-- 91 case idx == len(fileNames)-1 && fileNames[idx] == r.Name: 92 return 93 case idx == len(fileNames)-1 || (idx == len(fileNames)-2 && fileNames[idx] == r.Name): 94 // The last file is continuously updated, and its chunks are streamed, 95 // so in order to avoid log record duplication on the client side, it is 96 // handled differently. Its actual content is always saved in the history. 97 db.logLock.RLock() 98 if db.history.Logs != nil { 99 c.msg <- &Message{ 100 Logs: deepcopy.Copy(db.history.Logs).(*LogsMessage), 101 } 102 } 103 db.logLock.RUnlock() 104 return 105 case fileNames[idx] == r.Name: 106 idx++ 107 } 108 109 path := filepath.Join(db.logdir, fileNames[idx]) 110 var buf []byte 111 if buf, err = ioutil.ReadFile(path); err != nil { 112 log.Warn("Failed to read file", "path", path, "err", err) 113 return 114 } 115 chunk, end := prepLogs(buf) 116 if end < 0 { 117 log.Warn("The file doesn't contain valid logs", "path", path) 118 return 119 } 120 c.msg <- &Message{ 121 Logs: &LogsMessage{ 122 Source: &LogFile{ 123 Name: fileNames[idx], 124 Last: r.Past && idx == 0, 125 }, 126 Chunk: chunk, 127 }, 128 } 129 } 130 131 // streamLogs watches the file system, and when the logger writes 132 // the new log records into the files, picks them up, then makes 133 // JSON array out of them and sends them to the clients. 134 func (db *Dashboard) streamLogs() { 135 defer db.wg.Done() 136 var ( 137 err error 138 errc chan error 139 ) 140 defer func() { 141 if errc == nil { 142 errc = <-db.quit 143 } 144 errc <- err 145 }() 146 147 files, err := ioutil.ReadDir(db.logdir) 148 if err != nil { 149 log.Warn("Failed to open logdir", "path", db.logdir, "err", err) 150 return 151 } 152 var ( 153 opened *os.File // File descriptor for the opened active log file. 154 buf []byte // Contains the recently written log chunks, which are not sent to the clients yet. 155 ) 156 157 // The log records are always written into the last file in alphabetical order, because of the timestamp. 158 re := regexp.MustCompile(`\.log$`) 159 i := len(files) - 1 160 for i >= 0 && (!files[i].Mode().IsRegular() || !re.MatchString(files[i].Name())) { 161 i-- 162 } 163 if i < 0 { 164 log.Warn("No log files in logdir", "path", db.logdir) 165 return 166 } 167 if opened, err = os.OpenFile(filepath.Join(db.logdir, files[i].Name()), os.O_RDONLY, 0600); err != nil { 168 log.Warn("Failed to open file", "name", files[i].Name(), "err", err) 169 return 170 } 171 defer opened.Close() // Close the lastly opened file. 172 fi, err := opened.Stat() 173 if err != nil { 174 log.Warn("Problem with file", "name", opened.Name(), "err", err) 175 return 176 } 177 db.logLock.Lock() 178 db.history.Logs = &LogsMessage{ 179 Source: &LogFile{ 180 Name: fi.Name(), 181 Last: true, 182 }, 183 Chunk: emptyChunk, 184 } 185 db.logLock.Unlock() 186 187 watcher := make(chan notify.EventInfo, 10) 188 if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil { 189 log.Warn("Failed to create file system watcher", "err", err) 190 return 191 } 192 defer notify.Stop(watcher) 193 194 ticker := time.NewTicker(db.config.Refresh) 195 defer ticker.Stop() 196 197 loop: 198 for err == nil || errc == nil { 199 select { 200 case event := <-watcher: 201 // Make sure that new log file was created. 202 if !re.Match([]byte(event.Path())) { 203 break 204 } 205 if opened == nil { 206 log.Warn("The last log file is not opened") 207 break loop 208 } 209 // The new log file's name is always greater, 210 // because it is created using the actual log record's time. 211 if opened.Name() >= event.Path() { 212 break 213 } 214 // Read the rest of the previously opened file. 215 chunk, err := ioutil.ReadAll(opened) 216 if err != nil { 217 log.Warn("Failed to read file", "name", opened.Name(), "err", err) 218 break loop 219 } 220 buf = append(buf, chunk...) 221 opened.Close() 222 223 if chunk, last := prepLogs(buf); last >= 0 { 224 // Send the rest of the previously opened file. 225 db.sendToAll(&Message{ 226 Logs: &LogsMessage{ 227 Chunk: chunk, 228 }, 229 }) 230 } 231 if opened, err = os.OpenFile(event.Path(), os.O_RDONLY, 0644); err != nil { 232 log.Warn("Failed to open file", "name", event.Path(), "err", err) 233 break loop 234 } 235 buf = buf[:0] 236 237 // Change the last file in the history. 238 fi, err := opened.Stat() 239 if err != nil { 240 log.Warn("Problem with file", "name", opened.Name(), "err", err) 241 break loop 242 } 243 db.logLock.Lock() 244 db.history.Logs.Source.Name = fi.Name() 245 db.history.Logs.Chunk = emptyChunk 246 db.logLock.Unlock() 247 case <-ticker.C: // Send log updates to the client. 248 if opened == nil { 249 log.Warn("The last log file is not opened") 250 break loop 251 } 252 // Read the new logs created since the last read. 253 chunk, err := ioutil.ReadAll(opened) 254 if err != nil { 255 log.Warn("Failed to read file", "name", opened.Name(), "err", err) 256 break loop 257 } 258 b := append(buf, chunk...) 259 260 chunk, last := prepLogs(b) 261 if last < 0 { 262 break 263 } 264 // Only keep the invalid part of the buffer, which can be valid after the next read. 265 buf = b[last+1:] 266 267 var l *LogsMessage 268 // Update the history. 269 db.logLock.Lock() 270 if bytes.Equal(db.history.Logs.Chunk, emptyChunk) { 271 db.history.Logs.Chunk = chunk 272 l = deepcopy.Copy(db.history.Logs).(*LogsMessage) 273 } else { 274 b = make([]byte, len(db.history.Logs.Chunk)+len(chunk)-1) 275 copy(b, db.history.Logs.Chunk) 276 b[len(db.history.Logs.Chunk)-1] = ',' 277 copy(b[len(db.history.Logs.Chunk):], chunk[1:]) 278 db.history.Logs.Chunk = b 279 l = &LogsMessage{Chunk: chunk} 280 } 281 db.logLock.Unlock() 282 283 db.sendToAll(&Message{Logs: l}) 284 case errc = <-db.quit: 285 break loop 286 } 287 } 288 }