github.com/graywolf-at-work-2/terraform-vendor@v1.4.5/internal/command/views/json/diagnostic.go (about) 1 package json 2 3 import ( 4 "bufio" 5 "bytes" 6 "fmt" 7 "sort" 8 "strings" 9 10 "github.com/hashicorp/hcl/v2" 11 "github.com/hashicorp/hcl/v2/hcled" 12 "github.com/hashicorp/hcl/v2/hclparse" 13 "github.com/hashicorp/hcl/v2/hclsyntax" 14 "github.com/hashicorp/terraform/internal/lang/marks" 15 "github.com/hashicorp/terraform/internal/tfdiags" 16 "github.com/zclconf/go-cty/cty" 17 ) 18 19 // These severities map to the tfdiags.Severity values, plus an explicit 20 // unknown in case that enum grows without us noticing here. 21 const ( 22 DiagnosticSeverityUnknown = "unknown" 23 DiagnosticSeverityError = "error" 24 DiagnosticSeverityWarning = "warning" 25 ) 26 27 // Diagnostic represents any tfdiags.Diagnostic value. The simplest form has 28 // just a severity, single line summary, and optional detail. If there is more 29 // information about the source of the diagnostic, this is represented in the 30 // range field. 31 type Diagnostic struct { 32 Severity string `json:"severity"` 33 Summary string `json:"summary"` 34 Detail string `json:"detail"` 35 Address string `json:"address,omitempty"` 36 Range *DiagnosticRange `json:"range,omitempty"` 37 Snippet *DiagnosticSnippet `json:"snippet,omitempty"` 38 } 39 40 // Pos represents a position in the source code. 41 type Pos struct { 42 // Line is a one-based count for the line in the indicated file. 43 Line int `json:"line"` 44 45 // Column is a one-based count of Unicode characters from the start of the line. 46 Column int `json:"column"` 47 48 // Byte is a zero-based offset into the indicated file. 49 Byte int `json:"byte"` 50 } 51 52 // DiagnosticRange represents the filename and position of the diagnostic 53 // subject. This defines the range of the source to be highlighted in the 54 // output. Note that the snippet may include additional surrounding source code 55 // if the diagnostic has a context range. 56 // 57 // The Start position is inclusive, and the End position is exclusive. Exact 58 // positions are intended for highlighting for human interpretation only and 59 // are subject to change. 60 type DiagnosticRange struct { 61 Filename string `json:"filename"` 62 Start Pos `json:"start"` 63 End Pos `json:"end"` 64 } 65 66 // DiagnosticSnippet represents source code information about the diagnostic. 67 // It is possible for a diagnostic to have a source (and therefore a range) but 68 // no source code can be found. In this case, the range field will be present and 69 // the snippet field will not. 70 type DiagnosticSnippet struct { 71 // Context is derived from HCL's hcled.ContextString output. This gives a 72 // high-level summary of the root context of the diagnostic: for example, 73 // the resource block in which an expression causes an error. 74 Context *string `json:"context"` 75 76 // Code is a possibly-multi-line string of Terraform configuration, which 77 // includes both the diagnostic source and any relevant context as defined 78 // by the diagnostic. 79 Code string `json:"code"` 80 81 // StartLine is the line number in the source file for the first line of 82 // the snippet code block. This is not necessarily the same as the value of 83 // Range.Start.Line, as it is possible to have zero or more lines of 84 // context source code before the diagnostic range starts. 85 StartLine int `json:"start_line"` 86 87 // HighlightStartOffset is the character offset into Code at which the 88 // diagnostic source range starts, which ought to be highlighted as such by 89 // the consumer of this data. 90 HighlightStartOffset int `json:"highlight_start_offset"` 91 92 // HighlightEndOffset is the character offset into Code at which the 93 // diagnostic source range ends. 94 HighlightEndOffset int `json:"highlight_end_offset"` 95 96 // Values is a sorted slice of expression values which may be useful in 97 // understanding the source of an error in a complex expression. 98 Values []DiagnosticExpressionValue `json:"values"` 99 100 // FunctionCall is information about a function call whose failure is 101 // being reported by this diagnostic, if any. 102 FunctionCall *DiagnosticFunctionCall `json:"function_call,omitempty"` 103 } 104 105 // DiagnosticExpressionValue represents an HCL traversal string (e.g. 106 // "var.foo") and a statement about its value while the expression was 107 // evaluated (e.g. "is a string", "will be known only after apply"). These are 108 // intended to help the consumer diagnose why an expression caused a diagnostic 109 // to be emitted. 110 type DiagnosticExpressionValue struct { 111 Traversal string `json:"traversal"` 112 Statement string `json:"statement"` 113 } 114 115 // DiagnosticFunctionCall represents a function call whose information is 116 // being included as part of a diagnostic snippet. 117 type DiagnosticFunctionCall struct { 118 // CalledAs is the full name that was used to call this function, 119 // potentially including namespace prefixes if the function does not belong 120 // to the default function namespace. 121 CalledAs string `json:"called_as"` 122 123 // Signature is a description of the signature of the function that was 124 // called, if any. Might be omitted if we're reporting that a call failed 125 // because the given function name isn't known, for example. 126 Signature *Function `json:"signature,omitempty"` 127 } 128 129 // NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources, 130 // and returns a Diagnostic struct. 131 func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string][]byte) *Diagnostic { 132 var sev string 133 switch diag.Severity() { 134 case tfdiags.Error: 135 sev = DiagnosticSeverityError 136 case tfdiags.Warning: 137 sev = DiagnosticSeverityWarning 138 default: 139 sev = DiagnosticSeverityUnknown 140 } 141 142 desc := diag.Description() 143 144 diagnostic := &Diagnostic{ 145 Severity: sev, 146 Summary: desc.Summary, 147 Detail: desc.Detail, 148 Address: desc.Address, 149 } 150 151 sourceRefs := diag.Source() 152 if sourceRefs.Subject != nil { 153 // We'll borrow HCL's range implementation here, because it has some 154 // handy features to help us produce a nice source code snippet. 155 highlightRange := sourceRefs.Subject.ToHCL() 156 157 // Some diagnostic sources fail to set the end of the subject range. 158 if highlightRange.End == (hcl.Pos{}) { 159 highlightRange.End = highlightRange.Start 160 } 161 162 snippetRange := highlightRange 163 if sourceRefs.Context != nil { 164 snippetRange = sourceRefs.Context.ToHCL() 165 } 166 167 // Make sure the snippet includes the highlight. This should be true 168 // for any reasonable diagnostic, but we'll make sure. 169 snippetRange = hcl.RangeOver(snippetRange, highlightRange) 170 171 // Empty ranges result in odd diagnostic output, so extend the end to 172 // ensure there's at least one byte in the snippet or highlight. 173 if snippetRange.Empty() { 174 snippetRange.End.Byte++ 175 snippetRange.End.Column++ 176 } 177 if highlightRange.Empty() { 178 highlightRange.End.Byte++ 179 highlightRange.End.Column++ 180 } 181 182 diagnostic.Range = &DiagnosticRange{ 183 Filename: highlightRange.Filename, 184 Start: Pos{ 185 Line: highlightRange.Start.Line, 186 Column: highlightRange.Start.Column, 187 Byte: highlightRange.Start.Byte, 188 }, 189 End: Pos{ 190 Line: highlightRange.End.Line, 191 Column: highlightRange.End.Column, 192 Byte: highlightRange.End.Byte, 193 }, 194 } 195 196 var src []byte 197 if sources != nil { 198 src = sources[highlightRange.Filename] 199 } 200 201 // If we have a source file for the diagnostic, we can emit a code 202 // snippet. 203 if src != nil { 204 diagnostic.Snippet = &DiagnosticSnippet{ 205 StartLine: snippetRange.Start.Line, 206 207 // Ensure that the default Values struct is an empty array, as this 208 // makes consuming the JSON structure easier in most languages. 209 Values: []DiagnosticExpressionValue{}, 210 } 211 212 file, offset := parseRange(src, highlightRange) 213 214 // Some diagnostics may have a useful top-level context to add to 215 // the code snippet output. 216 contextStr := hcled.ContextString(file, offset-1) 217 if contextStr != "" { 218 diagnostic.Snippet.Context = &contextStr 219 } 220 221 // Build the string of the code snippet, tracking at which byte of 222 // the file the snippet starts. 223 var codeStartByte int 224 sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) 225 var code strings.Builder 226 for sc.Scan() { 227 lineRange := sc.Range() 228 if lineRange.Overlaps(snippetRange) { 229 if codeStartByte == 0 && code.Len() == 0 { 230 codeStartByte = lineRange.Start.Byte 231 } 232 code.Write(lineRange.SliceBytes(src)) 233 code.WriteRune('\n') 234 } 235 } 236 codeStr := strings.TrimSuffix(code.String(), "\n") 237 diagnostic.Snippet.Code = codeStr 238 239 // Calculate the start and end byte of the highlight range relative 240 // to the code snippet string. 241 start := highlightRange.Start.Byte - codeStartByte 242 end := start + (highlightRange.End.Byte - highlightRange.Start.Byte) 243 244 // We can end up with some quirky results here in edge cases like 245 // when a source range starts or ends at a newline character, 246 // so we'll cap the results at the bounds of the highlight range 247 // so that consumers of this data don't need to contend with 248 // out-of-bounds errors themselves. 249 if start < 0 { 250 start = 0 251 } else if start > len(codeStr) { 252 start = len(codeStr) 253 } 254 if end < 0 { 255 end = 0 256 } else if end > len(codeStr) { 257 end = len(codeStr) 258 } 259 260 diagnostic.Snippet.HighlightStartOffset = start 261 diagnostic.Snippet.HighlightEndOffset = end 262 263 if fromExpr := diag.FromExpr(); fromExpr != nil { 264 // We may also be able to generate information about the dynamic 265 // values of relevant variables at the point of evaluation, then. 266 // This is particularly useful for expressions that get evaluated 267 // multiple times with different values, such as blocks using 268 // "count" and "for_each", or within "for" expressions. 269 expr := fromExpr.Expression 270 ctx := fromExpr.EvalContext 271 vars := expr.Variables() 272 values := make([]DiagnosticExpressionValue, 0, len(vars)) 273 seen := make(map[string]struct{}, len(vars)) 274 includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag) 275 includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag) 276 Traversals: 277 for _, traversal := range vars { 278 for len(traversal) > 1 { 279 val, diags := traversal.TraverseAbs(ctx) 280 if diags.HasErrors() { 281 // Skip anything that generates errors, since we probably 282 // already have the same error in our diagnostics set 283 // already. 284 traversal = traversal[:len(traversal)-1] 285 continue 286 } 287 288 traversalStr := traversalStr(traversal) 289 if _, exists := seen[traversalStr]; exists { 290 continue Traversals // don't show duplicates when the same variable is referenced multiple times 291 } 292 value := DiagnosticExpressionValue{ 293 Traversal: traversalStr, 294 } 295 switch { 296 case val.HasMark(marks.Sensitive): 297 // We only mention a sensitive value if the diagnostic 298 // we're rendering is explicitly marked as being 299 // caused by sensitive values, because otherwise 300 // readers tend to be misled into thinking the error 301 // is caused by the sensitive value even when it isn't. 302 if !includeSensitive { 303 continue Traversals 304 } 305 // Even when we do mention one, we keep it vague 306 // in order to minimize the chance of giving away 307 // whatever was sensitive about it. 308 value.Statement = "has a sensitive value" 309 case !val.IsKnown(): 310 // We'll avoid saying anything about unknown or 311 // "known after apply" unless the diagnostic is 312 // explicitly marked as being caused by unknown 313 // values, because otherwise readers tend to be 314 // misled into thinking the error is caused by the 315 // unknown value even when it isn't. 316 if ty := val.Type(); ty != cty.DynamicPseudoType { 317 if includeUnknown { 318 value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName()) 319 } else { 320 value.Statement = fmt.Sprintf("is a %s", ty.FriendlyName()) 321 } 322 } else { 323 if !includeUnknown { 324 continue Traversals 325 } 326 value.Statement = "will be known only after apply" 327 } 328 default: 329 value.Statement = fmt.Sprintf("is %s", compactValueStr(val)) 330 } 331 values = append(values, value) 332 seen[traversalStr] = struct{}{} 333 } 334 } 335 sort.Slice(values, func(i, j int) bool { 336 return values[i].Traversal < values[j].Traversal 337 }) 338 diagnostic.Snippet.Values = values 339 340 if callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag); callInfo != nil && callInfo.CalledFunctionName() != "" { 341 calledAs := callInfo.CalledFunctionName() 342 baseName := calledAs 343 if idx := strings.LastIndex(baseName, "::"); idx >= 0 { 344 baseName = baseName[idx+2:] 345 } 346 callInfo := &DiagnosticFunctionCall{ 347 CalledAs: calledAs, 348 } 349 if f, ok := ctx.Functions[calledAs]; ok { 350 callInfo.Signature = DescribeFunction(baseName, f) 351 } 352 diagnostic.Snippet.FunctionCall = callInfo 353 } 354 355 } 356 357 } 358 } 359 360 return diagnostic 361 } 362 363 func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { 364 filename := rng.Filename 365 offset := rng.Start.Byte 366 367 // We need to re-parse here to get a *hcl.File we can interrogate. This 368 // is not awesome since we presumably already parsed the file earlier too, 369 // but this re-parsing is architecturally simpler than retaining all of 370 // the hcl.File objects and we only do this in the case of an error anyway 371 // so the overhead here is not a big problem. 372 parser := hclparse.NewParser() 373 var file *hcl.File 374 375 // Ignore diagnostics here as there is nothing we can do with them. 376 if strings.HasSuffix(filename, ".json") { 377 file, _ = parser.ParseJSON(src, filename) 378 } else { 379 file, _ = parser.ParseHCL(src, filename) 380 } 381 382 return file, offset 383 } 384 385 // compactValueStr produces a compact, single-line summary of a given value 386 // that is suitable for display in the UI. 387 // 388 // For primitives it returns a full representation, while for more complex 389 // types it instead summarizes the type, size, etc to produce something 390 // that is hopefully still somewhat useful but not as verbose as a rendering 391 // of the entire data structure. 392 func compactValueStr(val cty.Value) string { 393 // This is a specialized subset of value rendering tailored to producing 394 // helpful but concise messages in diagnostics. It is not comprehensive 395 // nor intended to be used for other purposes. 396 397 if val.HasMark(marks.Sensitive) { 398 // We check this in here just to make sure, but note that the caller 399 // of compactValueStr ought to have already checked this and skipped 400 // calling into compactValueStr anyway, so this shouldn't actually 401 // be reachable. 402 return "(sensitive value)" 403 } 404 405 // WARNING: We've only checked that the value isn't sensitive _shallowly_ 406 // here, and so we must never show any element values from complex types 407 // in here. However, it's fine to show map keys and attribute names because 408 // those are never sensitive in isolation: the entire value would be 409 // sensitive in that case. 410 411 ty := val.Type() 412 switch { 413 case val.IsNull(): 414 return "null" 415 case !val.IsKnown(): 416 // Should never happen here because we should filter before we get 417 // in here, but we'll do something reasonable rather than panic. 418 return "(not yet known)" 419 case ty == cty.Bool: 420 if val.True() { 421 return "true" 422 } 423 return "false" 424 case ty == cty.Number: 425 bf := val.AsBigFloat() 426 return bf.Text('g', 10) 427 case ty == cty.String: 428 // Go string syntax is not exactly the same as HCL native string syntax, 429 // but we'll accept the minor edge-cases where this is different here 430 // for now, just to get something reasonable here. 431 return fmt.Sprintf("%q", val.AsString()) 432 case ty.IsCollectionType() || ty.IsTupleType(): 433 l := val.LengthInt() 434 switch l { 435 case 0: 436 return "empty " + ty.FriendlyName() 437 case 1: 438 return ty.FriendlyName() + " with 1 element" 439 default: 440 return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) 441 } 442 case ty.IsObjectType(): 443 atys := ty.AttributeTypes() 444 l := len(atys) 445 switch l { 446 case 0: 447 return "object with no attributes" 448 case 1: 449 var name string 450 for k := range atys { 451 name = k 452 } 453 return fmt.Sprintf("object with 1 attribute %q", name) 454 default: 455 return fmt.Sprintf("object with %d attributes", l) 456 } 457 default: 458 return ty.FriendlyName() 459 } 460 } 461 462 // traversalStr produces a representation of an HCL traversal that is compact, 463 // resembles HCL native syntax, and is suitable for display in the UI. 464 func traversalStr(traversal hcl.Traversal) string { 465 // This is a specialized subset of traversal rendering tailored to 466 // producing helpful contextual messages in diagnostics. It is not 467 // comprehensive nor intended to be used for other purposes. 468 469 var buf bytes.Buffer 470 for _, step := range traversal { 471 switch tStep := step.(type) { 472 case hcl.TraverseRoot: 473 buf.WriteString(tStep.Name) 474 case hcl.TraverseAttr: 475 buf.WriteByte('.') 476 buf.WriteString(tStep.Name) 477 case hcl.TraverseIndex: 478 buf.WriteByte('[') 479 if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { 480 buf.WriteString(compactValueStr(tStep.Key)) 481 } else { 482 // We'll just use a placeholder for more complex values, 483 // since otherwise our result could grow ridiculously long. 484 buf.WriteString("...") 485 } 486 buf.WriteByte(']') 487 } 488 } 489 return buf.String() 490 }