github.com/cockroachdb/cockroachdb-parser@v0.23.3-0.20240213214944-911057d40c9a/pkg/util/log/logpb/log.proto (about) 1 // Copyright 2016 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 syntax = "proto3"; 12 package cockroach.util.log; 13 option go_package = "github.com/cockroachdb/cockroach/pkg/util/log/logpb"; 14 15 import "gogoproto/gogo.proto"; 16 17 // Severity is the severity level of individual log events. 18 // 19 // Note: do not forget to run gen.sh (go generate) when 20 // changing this list or the explanatory comments. 21 enum Severity { 22 // UNKNOWN is populated into decoded log entries when the 23 // severity could not be determined. 24 UNKNOWN = 0; 25 // INFO is used for informational messages that do not 26 // require action. 27 INFO = 1; 28 // WARNING is used for situations which may require special handling, 29 // where normal operation is expected to resume automatically. 30 WARNING = 2; 31 // ERROR is used for situations that require special handling, 32 // where normal operation could not proceed as expected. 33 // Other operations can continue mostly unaffected. 34 ERROR = 3; 35 // FATAL is used for situations that require an immedate, hard 36 // server shutdown. A report is also sent to telemetry if telemetry 37 // is enabled. 38 FATAL = 4; 39 // NONE can be used in filters to specify that no messages 40 // should be emitted. 41 NONE = 5; 42 // DEFAULT is the end sentinel. It is used during command-line 43 // handling to indicate that another value should be replaced instead 44 // (depending on which command is being run); see cli/flags.go for 45 // details. 46 DEFAULT = 6; 47 } 48 49 // Channel is the logical logging channel on which a message is sent. 50 // Different channels can be redirected to different sinks. All 51 // messages from the same channel are sent to the same sink(s). 52 // 53 // 54 // Note: do not forget to run gen.sh (go generate) when 55 // changing this list or the explanatory comments. 56 enum Channel { 57 // DEV is used during development to collect log 58 // details useful for troubleshooting that fall outside the 59 // scope of other channels. It is also the default logging 60 // channel for events not associated with a channel. 61 // 62 // This channel is special in that there are no constraints as to 63 // what may or may not be logged on it. Conversely, users in 64 // production deployments are invited to not collect `DEV` logs in 65 // centralized logging facilities, because they likely contain 66 // sensitive operational data. 67 // See [Configure logs](configure-logs.html#dev-channel). 68 DEV = 0; 69 70 // OPS is used to report "point" operational events, 71 // initiated by user operators or automation: 72 // 73 // - Operator or system actions on server processes: process starts, 74 // stops, shutdowns, crashes (if they can be logged), 75 // including each time: command-line parameters, current version being run 76 // - Actions that impact the topology of a cluster: node additions, 77 // removals, decommissions, etc. 78 // - Job-related initiation or termination 79 // - [Cluster setting](cluster-settings.html) changes 80 // - [Zone configuration](configure-replication-zones.html) changes 81 OPS = 1; 82 83 // HEALTH is used to report "background" operational 84 // events, initiated by CockroachDB or reporting on automatic processes: 85 // 86 // - Current resource usage, including critical resource usage 87 // - Node-node connection events, including connection errors and 88 // gossip details 89 // - Range and table leasing events 90 // - Up- and down-replication, range unavailability 91 HEALTH = 2; 92 93 // STORAGE is used to report low-level storage 94 // layer events (RocksDB/Pebble). 95 STORAGE = 3; 96 97 // SESSIONS is used to report client network activity when enabled via 98 // the `server.auth_log.sql_connections.enabled` and/or 99 // `server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): 100 // 101 // - Connections opened/closed 102 // - Authentication events: logins, failed attempts 103 // - Session and query cancellation 104 // 105 // This is typically configured in "audit" mode, with event 106 // numbering and synchronous writes. 107 SESSIONS = 4; 108 109 // SQL_SCHEMA is used to report changes to the 110 // SQL logical schema, excluding privilege and ownership changes 111 // (which are reported separately on the `PRIVILEGES` channel) and 112 // zone configuration changes (which go to the `OPS` channel). 113 // 114 // This includes: 115 // 116 // - Database/schema/table/sequence/view/type creation 117 // - Adding/removing/changing table columns 118 // - Changing sequence parameters 119 // 120 // `SQL_SCHEMA` events generally comprise changes to the schema that affect the 121 // functional behavior of client apps using stored objects. 122 SQL_SCHEMA = 5; 123 124 // USER_ADMIN is used to report changes 125 // in users and roles, including: 126 // 127 // - Users added/dropped 128 // - Changes to authentication credentials (e.g., passwords, validity, etc.) 129 // - Role grants/revocations 130 // - Role option grants/revocations 131 // 132 // This is typically configured in "audit" mode, with event 133 // numbering and synchronous writes. 134 USER_ADMIN = 6; 135 136 // PRIVILEGES is used to report data 137 // authorization changes, including: 138 // 139 // - Privilege grants/revocations on database, objects, etc. 140 // - Object ownership changes 141 // 142 // This is typically configured in "audit" mode, with event 143 // numbering and synchronous writes. 144 PRIVILEGES = 7; 145 146 // SENSITIVE_ACCESS is used to report SQL 147 // data access to sensitive data: 148 // 149 // - Data access audit events (when table audit is enabled via 150 // [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) 151 // - Data access audit events (when role-based audit is enabled via 152 // [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) 153 // - SQL statements executed by users with the admin role 154 // - Operations that write to system tables 155 // 156 // This is typically configured in "audit" mode, with event 157 // numbering and synchronous writes. 158 SENSITIVE_ACCESS = 8; 159 160 // SQL_EXEC is used to report SQL execution on 161 // behalf of client connections: 162 // 163 // - Logical SQL statement executions (when enabled via the 164 // `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) 165 // - uncaught Go panic errors during the execution of a SQL statement. 166 SQL_EXEC = 9; 167 168 // SQL_PERF is used to report SQL executions 169 // that are marked as "out of the ordinary" 170 // to facilitate performance investigations. 171 // This includes the SQL "slow query log". 172 // 173 // Arguably, this channel overlaps with `SQL_EXEC`. 174 // However, we keep both channels separate for backward compatibility 175 // with versions prior to v21.1, where the corresponding events 176 // were redirected to separate files. 177 SQL_PERF = 10; 178 179 // SQL_INTERNAL_PERF is like the `SQL_PERF` channel, but is aimed at 180 // helping developers of CockroachDB itself. It exists as a separate 181 // channel so as to not pollute the `SQL_PERF` logging output with 182 // internal troubleshooting details. 183 SQL_INTERNAL_PERF = 11; 184 185 // TELEMETRY reports telemetry events. Telemetry events describe 186 // feature usage within CockroachDB and anonymizes any application- 187 // specific data. 188 TELEMETRY = 12; 189 190 // KV_DISTRIBUTION is used to report data distribution events, such as moving 191 // replicas between stores in the cluster, or adding (removing) replicas to 192 // ranges. 193 KV_DISTRIBUTION = 13; 194 195 // CHANNEL_MAX is the maximum allocated channel number so far. 196 // This should be increased every time a new channel is added. 197 CHANNEL_MAX = 14; 198 } 199 200 // Entry represents a cockroach log entry in the following two cases: 201 // - when reading a log file using the crdb-v1 format, entries 202 // are parsed into this struct. 203 // - when injecting an interceptor into the logging package, the 204 // interceptor is fed entries using this structure. 205 message Entry { 206 // Severity is the importance of the log entry. See the 207 // documentation for the Severity enum for more details. 208 Severity severity = 1; 209 // Nanoseconds since the epoch. 210 int64 time = 2; 211 // Goroutine ID. This helps match logging events with goroutine 212 // stack dumps. 213 int64 goroutine = 6; 214 // File name where the logging event was produced. Logging client 215 // code can adjust this with the "depth" parameter. 216 string file = 3; 217 // Line number in the file where the logging event was produced. 218 int64 line = 4; 219 // Message contains the main text of the logging message. 220 string message = 5; 221 222 // Tags contains the context tags available in the context where the 223 // entry was created. 224 string tags = 7; 225 226 // Counter is an entry counter, meant for use in audit logs as an 227 // instrument against log repudiation. 228 // See: https://en.wikipedia.org/wiki/Non-repudiation 229 // 230 // It is incremented for every use of the logger where the entry was 231 // produced. 232 uint64 counter = 8; 233 234 // Redactable is true if the message and tags fields include markers 235 // to delineate sensitive information. In that case, confidentiality 236 // can be obtained by only stripping away the data within this 237 // marker. If redactable is false or unknown, the message should be 238 // considered to only contain sensitive information, and should be 239 // stripped away completely for confidentiality. 240 bool redactable = 9; 241 242 // Channel is the channel on which the message was sent. 243 Channel channel = 10; 244 245 // StructuredEnd, if non-zero, indicates that the entry 246 // is structured; it is also the index 247 // inside the Message field where the JSON payload ends (exclusive). 248 uint32 structured_end = 11; 249 250 // StructuredStart, when StructuredEnd is non-zero, is the index 251 // inside the Message field where the JSON payload starts (inclusive). 252 uint32 structured_start = 12; 253 254 // StackTraceStart is the index inside Message where a detailed 255 // stack trace starts. If zero, no stack trace is present. Stack 256 // traces are always separated from the message using a newline 257 // character. If a stack trace is included, StackTracePosition is 258 // the index of the character immediately after the newline 259 // character. 260 // 261 // We use an index-in-string field in the protobuf, instead of two 262 // separate string fields, because previous-version consumers of 263 // Entry are still expecting the message and the stack trace in the 264 // same field. 265 uint32 stack_trace_start = 13; 266 267 // TenantID is the tenant ID that the log entry originated from. NB: if a 268 // log entry was not found to contain any tenant ID, we default to the system 269 // tenant ID. 270 string tenant_id = 14 [(gogoproto.customname) = "TenantID"]; 271 272 // TenantName is the tenant name that the log entry originated from. NB: if a 273 // log entry was not found to contain any tenant name, we default to the empty 274 // string. 275 string tenant_name = 15 [(gogoproto.customname) = "TenantName"]; 276 } 277 278 // A FileDetails holds all of the particulars that can be parsed by the name of 279 // a log file. 280 message FileDetails { 281 // program contains the combination of program name and log file 282 // group name, separated by a hyphen. The program name part is 283 // guaranteed to not contain hyphens itself; if there had been any 284 // in the executable file name, they would be escaped to 285 // underscores. The first hyphen separates the program name from the 286 // file group name. The file group itself can contain hyphens. 287 // 288 // For example, if the field is set to "mybinary-my-log-group", 289 // the program name is "mybinary" and the file group is "my-log-group". 290 // 291 // The field is also guaranteed not to contain periods. If there had 292 // been periods in the executable file name, they would be replaced 293 // by underscores. 294 string program = 1; 295 // host is the hostname part of the file name. 296 // The field is guaranteed not to contain periods. If there had 297 // been periods in the hostname, they would be replaced by underscores. 298 string host = 2; 299 // user_name is the unix username part of the file name. 300 // The field is guaranteed not to contain periods. If there had 301 // been periods in the username, they would be replaced by underscores. 302 string user_name = 3; 303 reserved 4; 304 int64 time = 5; 305 int64 pid = 6 [(gogoproto.customname) = "PID"]; 306 } 307 308 message FileInfo { 309 string name = 1; 310 int64 size_bytes = 2; 311 int64 mod_time_nanos = 3; 312 FileDetails details = 4 [(gogoproto.nullable) = false]; 313 uint32 file_mode = 5; 314 }