golang.org/toolchain@v0.0.1-go1.9rc2.windows-amd64/src/cmd/vendor/github.com/google/pprof/profile/merge.go (about) 1 // Copyright 2014 Google Inc. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package profile 16 17 import ( 18 "fmt" 19 "sort" 20 "strconv" 21 "strings" 22 ) 23 24 // Compact performs garbage collection on a profile to remove any 25 // unreferenced fields. This is useful to reduce the size of a profile 26 // after samples or locations have been removed. 27 func (p *Profile) Compact() *Profile { 28 p, _ = Merge([]*Profile{p}) 29 return p 30 } 31 32 // Merge merges all the profiles in profs into a single Profile. 33 // Returns a new profile independent of the input profiles. The merged 34 // profile is compacted to eliminate unused samples, locations, 35 // functions and mappings. Profiles must have identical profile sample 36 // and period types or the merge will fail. profile.Period of the 37 // resulting profile will be the maximum of all profiles, and 38 // profile.TimeNanos will be the earliest nonzero one. 39 func Merge(srcs []*Profile) (*Profile, error) { 40 if len(srcs) == 0 { 41 return nil, fmt.Errorf("no profiles to merge") 42 } 43 p, err := combineHeaders(srcs) 44 if err != nil { 45 return nil, err 46 } 47 48 pm := &profileMerger{ 49 p: p, 50 samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), 51 locations: make(map[locationKey]*Location, len(srcs[0].Location)), 52 functions: make(map[functionKey]*Function, len(srcs[0].Function)), 53 mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), 54 } 55 56 for _, src := range srcs { 57 // Clear the profile-specific hash tables 58 pm.locationsByID = make(map[uint64]*Location, len(src.Location)) 59 pm.functionsByID = make(map[uint64]*Function, len(src.Function)) 60 pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) 61 62 if len(pm.mappings) == 0 && len(src.Mapping) > 0 { 63 // The Mapping list has the property that the first mapping 64 // represents the main binary. Take the first Mapping we see, 65 // otherwise the operations below will add mappings in an 66 // arbitrary order. 67 pm.mapMapping(srcs[0].Mapping[0]) 68 } 69 70 for _, s := range src.Sample { 71 if !isZeroSample(s) { 72 pm.mapSample(s) 73 } 74 } 75 } 76 77 for _, s := range p.Sample { 78 if isZeroSample(s) { 79 // If there are any zero samples, re-merge the profile to GC 80 // them. 81 return Merge([]*Profile{p}) 82 } 83 } 84 85 return p, nil 86 } 87 88 func isZeroSample(s *Sample) bool { 89 for _, v := range s.Value { 90 if v != 0 { 91 return false 92 } 93 } 94 return true 95 } 96 97 type profileMerger struct { 98 p *Profile 99 100 // Memoization tables within a profile. 101 locationsByID map[uint64]*Location 102 functionsByID map[uint64]*Function 103 mappingsByID map[uint64]mapInfo 104 105 // Memoization tables for profile entities. 106 samples map[sampleKey]*Sample 107 locations map[locationKey]*Location 108 functions map[functionKey]*Function 109 mappings map[mappingKey]*Mapping 110 } 111 112 type mapInfo struct { 113 m *Mapping 114 offset int64 115 } 116 117 func (pm *profileMerger) mapSample(src *Sample) *Sample { 118 s := &Sample{ 119 Location: make([]*Location, len(src.Location)), 120 Value: make([]int64, len(src.Value)), 121 Label: make(map[string][]string, len(src.Label)), 122 NumLabel: make(map[string][]int64, len(src.NumLabel)), 123 } 124 for i, l := range src.Location { 125 s.Location[i] = pm.mapLocation(l) 126 } 127 for k, v := range src.Label { 128 vv := make([]string, len(v)) 129 copy(vv, v) 130 s.Label[k] = vv 131 } 132 for k, v := range src.NumLabel { 133 vv := make([]int64, len(v)) 134 copy(vv, v) 135 s.NumLabel[k] = vv 136 } 137 // Check memoization table. Must be done on the remapped location to 138 // account for the remapped mapping. Add current values to the 139 // existing sample. 140 k := s.key() 141 if ss, ok := pm.samples[k]; ok { 142 for i, v := range src.Value { 143 ss.Value[i] += v 144 } 145 return ss 146 } 147 copy(s.Value, src.Value) 148 pm.samples[k] = s 149 pm.p.Sample = append(pm.p.Sample, s) 150 return s 151 } 152 153 // key generates sampleKey to be used as a key for maps. 154 func (sample *Sample) key() sampleKey { 155 ids := make([]string, len(sample.Location)) 156 for i, l := range sample.Location { 157 ids[i] = strconv.FormatUint(l.ID, 16) 158 } 159 160 labels := make([]string, 0, len(sample.Label)) 161 for k, v := range sample.Label { 162 labels = append(labels, fmt.Sprintf("%q%q", k, v)) 163 } 164 sort.Strings(labels) 165 166 numlabels := make([]string, 0, len(sample.NumLabel)) 167 for k, v := range sample.NumLabel { 168 numlabels = append(numlabels, fmt.Sprintf("%q%x", k, v)) 169 } 170 sort.Strings(numlabels) 171 172 return sampleKey{ 173 strings.Join(ids, "|"), 174 strings.Join(labels, ""), 175 strings.Join(numlabels, ""), 176 } 177 } 178 179 type sampleKey struct { 180 locations string 181 labels string 182 numlabels string 183 } 184 185 func (pm *profileMerger) mapLocation(src *Location) *Location { 186 if src == nil { 187 return nil 188 } 189 190 if l, ok := pm.locationsByID[src.ID]; ok { 191 pm.locationsByID[src.ID] = l 192 return l 193 } 194 195 mi := pm.mapMapping(src.Mapping) 196 l := &Location{ 197 ID: uint64(len(pm.p.Location) + 1), 198 Mapping: mi.m, 199 Address: uint64(int64(src.Address) + mi.offset), 200 Line: make([]Line, len(src.Line)), 201 } 202 for i, ln := range src.Line { 203 l.Line[i] = pm.mapLine(ln) 204 } 205 // Check memoization table. Must be done on the remapped location to 206 // account for the remapped mapping ID. 207 k := l.key() 208 if ll, ok := pm.locations[k]; ok { 209 pm.locationsByID[src.ID] = ll 210 return ll 211 } 212 pm.locationsByID[src.ID] = l 213 pm.locations[k] = l 214 pm.p.Location = append(pm.p.Location, l) 215 return l 216 } 217 218 // key generates locationKey to be used as a key for maps. 219 func (l *Location) key() locationKey { 220 key := locationKey{ 221 addr: l.Address, 222 } 223 if l.Mapping != nil { 224 // Normalizes address to handle address space randomization. 225 key.addr -= l.Mapping.Start 226 key.mappingID = l.Mapping.ID 227 } 228 lines := make([]string, len(l.Line)*2) 229 for i, line := range l.Line { 230 if line.Function != nil { 231 lines[i*2] = strconv.FormatUint(line.Function.ID, 16) 232 } 233 lines[i*2+1] = strconv.FormatInt(line.Line, 16) 234 } 235 key.lines = strings.Join(lines, "|") 236 return key 237 } 238 239 type locationKey struct { 240 addr, mappingID uint64 241 lines string 242 } 243 244 func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { 245 if src == nil { 246 return mapInfo{} 247 } 248 249 if mi, ok := pm.mappingsByID[src.ID]; ok { 250 return mi 251 } 252 253 // Check memoization tables. 254 bk, pk := src.key() 255 if src.BuildID != "" { 256 if m, ok := pm.mappings[bk]; ok { 257 mi := mapInfo{m, int64(m.Start) - int64(src.Start)} 258 pm.mappingsByID[src.ID] = mi 259 return mi 260 } 261 } 262 if src.File != "" { 263 if m, ok := pm.mappings[pk]; ok { 264 mi := mapInfo{m, int64(m.Start) - int64(src.Start)} 265 pm.mappingsByID[src.ID] = mi 266 return mi 267 } 268 } 269 m := &Mapping{ 270 ID: uint64(len(pm.p.Mapping) + 1), 271 Start: src.Start, 272 Limit: src.Limit, 273 Offset: src.Offset, 274 File: src.File, 275 BuildID: src.BuildID, 276 HasFunctions: src.HasFunctions, 277 HasFilenames: src.HasFilenames, 278 HasLineNumbers: src.HasLineNumbers, 279 HasInlineFrames: src.HasInlineFrames, 280 } 281 pm.p.Mapping = append(pm.p.Mapping, m) 282 283 // Update memoization tables. 284 if m.BuildID != "" { 285 pm.mappings[bk] = m 286 } 287 if m.File != "" { 288 pm.mappings[pk] = m 289 } 290 mi := mapInfo{m, 0} 291 pm.mappingsByID[src.ID] = mi 292 return mi 293 } 294 295 // key generates encoded strings of Mapping to be used as a key for 296 // maps. The first key represents only the build id, while the second 297 // represents only the file path. 298 func (m *Mapping) key() (buildIDKey, pathKey mappingKey) { 299 // Normalize addresses to handle address space randomization. 300 // Round up to next 4K boundary to avoid minor discrepancies. 301 const mapsizeRounding = 0x1000 302 303 size := m.Limit - m.Start 304 size = size + mapsizeRounding - 1 305 size = size - (size % mapsizeRounding) 306 307 buildIDKey = mappingKey{ 308 size, 309 m.Offset, 310 m.BuildID, 311 } 312 313 pathKey = mappingKey{ 314 size, 315 m.Offset, 316 m.File, 317 } 318 return 319 } 320 321 type mappingKey struct { 322 size, offset uint64 323 buildidIDOrFile string 324 } 325 326 func (pm *profileMerger) mapLine(src Line) Line { 327 ln := Line{ 328 Function: pm.mapFunction(src.Function), 329 Line: src.Line, 330 } 331 return ln 332 } 333 334 func (pm *profileMerger) mapFunction(src *Function) *Function { 335 if src == nil { 336 return nil 337 } 338 if f, ok := pm.functionsByID[src.ID]; ok { 339 return f 340 } 341 k := src.key() 342 if f, ok := pm.functions[k]; ok { 343 pm.functionsByID[src.ID] = f 344 return f 345 } 346 f := &Function{ 347 ID: uint64(len(pm.p.Function) + 1), 348 Name: src.Name, 349 SystemName: src.SystemName, 350 Filename: src.Filename, 351 StartLine: src.StartLine, 352 } 353 pm.functions[k] = f 354 pm.functionsByID[src.ID] = f 355 pm.p.Function = append(pm.p.Function, f) 356 return f 357 } 358 359 // key generates a struct to be used as a key for maps. 360 func (f *Function) key() functionKey { 361 return functionKey{ 362 f.StartLine, 363 f.Name, 364 f.SystemName, 365 f.Filename, 366 } 367 } 368 369 type functionKey struct { 370 startLine int64 371 name, systemName, fileName string 372 } 373 374 // combineHeaders checks that all profiles can be merged and returns 375 // their combined profile. 376 func combineHeaders(srcs []*Profile) (*Profile, error) { 377 for _, s := range srcs[1:] { 378 if err := srcs[0].compatible(s); err != nil { 379 return nil, err 380 } 381 } 382 383 var timeNanos, durationNanos, period int64 384 var comments []string 385 var defaultSampleType string 386 for _, s := range srcs { 387 if timeNanos == 0 || s.TimeNanos < timeNanos { 388 timeNanos = s.TimeNanos 389 } 390 durationNanos += s.DurationNanos 391 if period == 0 || period < s.Period { 392 period = s.Period 393 } 394 comments = append(comments, s.Comments...) 395 if defaultSampleType == "" { 396 defaultSampleType = s.DefaultSampleType 397 } 398 } 399 400 p := &Profile{ 401 SampleType: make([]*ValueType, len(srcs[0].SampleType)), 402 403 DropFrames: srcs[0].DropFrames, 404 KeepFrames: srcs[0].KeepFrames, 405 406 TimeNanos: timeNanos, 407 DurationNanos: durationNanos, 408 PeriodType: srcs[0].PeriodType, 409 Period: period, 410 411 Comments: comments, 412 DefaultSampleType: defaultSampleType, 413 } 414 copy(p.SampleType, srcs[0].SampleType) 415 return p, nil 416 } 417 418 // compatible determines if two profiles can be compared/merged. 419 // returns nil if the profiles are compatible; otherwise an error with 420 // details on the incompatibility. 421 func (p *Profile) compatible(pb *Profile) error { 422 if !equalValueType(p.PeriodType, pb.PeriodType) { 423 return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) 424 } 425 426 if len(p.SampleType) != len(pb.SampleType) { 427 return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) 428 } 429 430 for i := range p.SampleType { 431 if !equalValueType(p.SampleType[i], pb.SampleType[i]) { 432 return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) 433 } 434 } 435 436 return nil 437 } 438 439 // equalValueType returns true if the two value types are semantically 440 // equal. It ignores the internal fields used during encode/decode. 441 func equalValueType(st1, st2 *ValueType) bool { 442 return st1.Type == st2.Type && st1.Unit == st2.Unit 443 }