github.com/kubewharf/katalyst-core@v0.5.3/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/cpu.pb.go (about) 1 /* 2 Copyright 2022 The Katalyst Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ // Code generated by protoc-gen-gogo. DO NOT EDIT. 16 // source: cpu.proto 17 18 package cpuadvisor 19 20 import ( 21 context "context" 22 fmt "fmt" 23 _ "github.com/gogo/protobuf/gogoproto" 24 proto "github.com/gogo/protobuf/proto" 25 github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" 26 advisorsvc "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/advisorsvc" 27 grpc "google.golang.org/grpc" 28 codes "google.golang.org/grpc/codes" 29 status "google.golang.org/grpc/status" 30 io "io" 31 math "math" 32 math_bits "math/bits" 33 reflect "reflect" 34 strings "strings" 35 ) 36 37 // Reference imports to suppress errors if they are not otherwise used. 38 var _ = proto.Marshal 39 var _ = fmt.Errorf 40 var _ = math.Inf 41 42 // This is a compile-time assertion to ensure that this generated file 43 // is compatible with the proto package it is being compiled against. 44 // A compilation error at this line likely means your copy of the 45 // proto package needs to be updated. 46 const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package 47 48 type OverlapType int32 49 50 const ( 51 OverlapType_OverlapWithPod OverlapType = 0 52 OverlapType_OverlapWithPool OverlapType = 1 53 ) 54 55 var OverlapType_name = map[int32]string{ 56 0: "OverlapWithPod", 57 1: "OverlapWithPool", 58 } 59 60 var OverlapType_value = map[string]int32{ 61 "OverlapWithPod": 0, 62 "OverlapWithPool": 1, 63 } 64 65 func (x OverlapType) String() string { 66 return proto.EnumName(OverlapType_name, int32(x)) 67 } 68 69 func (OverlapType) EnumDescriptor() ([]byte, []int) { 70 return fileDescriptor_08fc9a87e8768c24, []int{0} 71 } 72 73 type ListAndWatchResponse struct { 74 Entries map[string]*CalculationEntries `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 75 AllowSharedCoresOverlapReclaimedCores bool `protobuf:"varint,2,opt,name=allow_shared_cores_overlap_reclaimed_cores,json=allowSharedCoresOverlapReclaimedCores,proto3" json:"allow_shared_cores_overlap_reclaimed_cores,omitempty"` 76 XXX_NoUnkeyedLiteral struct{} `json:"-"` 77 XXX_sizecache int32 `json:"-"` 78 } 79 80 func (m *ListAndWatchResponse) Reset() { *m = ListAndWatchResponse{} } 81 func (*ListAndWatchResponse) ProtoMessage() {} 82 func (*ListAndWatchResponse) Descriptor() ([]byte, []int) { 83 return fileDescriptor_08fc9a87e8768c24, []int{0} 84 } 85 func (m *ListAndWatchResponse) XXX_Unmarshal(b []byte) error { 86 return m.Unmarshal(b) 87 } 88 func (m *ListAndWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 89 if deterministic { 90 return xxx_messageInfo_ListAndWatchResponse.Marshal(b, m, deterministic) 91 } else { 92 b = b[:cap(b)] 93 n, err := m.MarshalToSizedBuffer(b) 94 if err != nil { 95 return nil, err 96 } 97 return b[:n], nil 98 } 99 } 100 func (m *ListAndWatchResponse) XXX_Merge(src proto.Message) { 101 xxx_messageInfo_ListAndWatchResponse.Merge(m, src) 102 } 103 func (m *ListAndWatchResponse) XXX_Size() int { 104 return m.Size() 105 } 106 func (m *ListAndWatchResponse) XXX_DiscardUnknown() { 107 xxx_messageInfo_ListAndWatchResponse.DiscardUnknown(m) 108 } 109 110 var xxx_messageInfo_ListAndWatchResponse proto.InternalMessageInfo 111 112 func (m *ListAndWatchResponse) GetEntries() map[string]*CalculationEntries { 113 if m != nil { 114 return m.Entries 115 } 116 return nil 117 } 118 119 func (m *ListAndWatchResponse) GetAllowSharedCoresOverlapReclaimedCores() bool { 120 if m != nil { 121 return m.AllowSharedCoresOverlapReclaimedCores 122 } 123 return false 124 } 125 126 type CalculationEntries struct { 127 Entries map[string]*CalculationInfo `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 128 XXX_NoUnkeyedLiteral struct{} `json:"-"` 129 XXX_sizecache int32 `json:"-"` 130 } 131 132 func (m *CalculationEntries) Reset() { *m = CalculationEntries{} } 133 func (*CalculationEntries) ProtoMessage() {} 134 func (*CalculationEntries) Descriptor() ([]byte, []int) { 135 return fileDescriptor_08fc9a87e8768c24, []int{1} 136 } 137 func (m *CalculationEntries) XXX_Unmarshal(b []byte) error { 138 return m.Unmarshal(b) 139 } 140 func (m *CalculationEntries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 141 if deterministic { 142 return xxx_messageInfo_CalculationEntries.Marshal(b, m, deterministic) 143 } else { 144 b = b[:cap(b)] 145 n, err := m.MarshalToSizedBuffer(b) 146 if err != nil { 147 return nil, err 148 } 149 return b[:n], nil 150 } 151 } 152 func (m *CalculationEntries) XXX_Merge(src proto.Message) { 153 xxx_messageInfo_CalculationEntries.Merge(m, src) 154 } 155 func (m *CalculationEntries) XXX_Size() int { 156 return m.Size() 157 } 158 func (m *CalculationEntries) XXX_DiscardUnknown() { 159 xxx_messageInfo_CalculationEntries.DiscardUnknown(m) 160 } 161 162 var xxx_messageInfo_CalculationEntries proto.InternalMessageInfo 163 164 func (m *CalculationEntries) GetEntries() map[string]*CalculationInfo { 165 if m != nil { 166 return m.Entries 167 } 168 return nil 169 } 170 171 type CalculationInfo struct { 172 // valid values: 173 // 1. "dedicated" (dedicated_cores container with or without numa_biding) 174 // 2. real pool name (shared_cores container entries and pool entries),, including: 175 // - common pools (eg. share, reclaim, flink, batch, bmq) 176 // - pools generated by qos aware server containing isolated shared_cores containers (eg. isolation0, isolation1, ...) 177 OwnerPoolName string `protobuf:"bytes,1,opt,name=owner_pool_name,json=ownerPoolName,proto3" json:"owner_pool_name,omitempty"` 178 CalculationResultsByNumas map[int64]*NumaCalculationResult `protobuf:"bytes,2,rep,name=calculation_results_by_numas,json=calculationResultsByNumas,proto3" json:"calculation_results_by_numas,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 179 XXX_NoUnkeyedLiteral struct{} `json:"-"` 180 XXX_sizecache int32 `json:"-"` 181 } 182 183 func (m *CalculationInfo) Reset() { *m = CalculationInfo{} } 184 func (*CalculationInfo) ProtoMessage() {} 185 func (*CalculationInfo) Descriptor() ([]byte, []int) { 186 return fileDescriptor_08fc9a87e8768c24, []int{2} 187 } 188 func (m *CalculationInfo) XXX_Unmarshal(b []byte) error { 189 return m.Unmarshal(b) 190 } 191 func (m *CalculationInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 192 if deterministic { 193 return xxx_messageInfo_CalculationInfo.Marshal(b, m, deterministic) 194 } else { 195 b = b[:cap(b)] 196 n, err := m.MarshalToSizedBuffer(b) 197 if err != nil { 198 return nil, err 199 } 200 return b[:n], nil 201 } 202 } 203 func (m *CalculationInfo) XXX_Merge(src proto.Message) { 204 xxx_messageInfo_CalculationInfo.Merge(m, src) 205 } 206 func (m *CalculationInfo) XXX_Size() int { 207 return m.Size() 208 } 209 func (m *CalculationInfo) XXX_DiscardUnknown() { 210 xxx_messageInfo_CalculationInfo.DiscardUnknown(m) 211 } 212 213 var xxx_messageInfo_CalculationInfo proto.InternalMessageInfo 214 215 func (m *CalculationInfo) GetOwnerPoolName() string { 216 if m != nil { 217 return m.OwnerPoolName 218 } 219 return "" 220 } 221 222 func (m *CalculationInfo) GetCalculationResultsByNumas() map[int64]*NumaCalculationResult { 223 if m != nil { 224 return m.CalculationResultsByNumas 225 } 226 return nil 227 } 228 229 type NumaCalculationResult struct { 230 // every block doesn't overlap with other blocks in same NumaCalculationResult 231 Blocks []*Block `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` 232 XXX_NoUnkeyedLiteral struct{} `json:"-"` 233 XXX_sizecache int32 `json:"-"` 234 } 235 236 func (m *NumaCalculationResult) Reset() { *m = NumaCalculationResult{} } 237 func (*NumaCalculationResult) ProtoMessage() {} 238 func (*NumaCalculationResult) Descriptor() ([]byte, []int) { 239 return fileDescriptor_08fc9a87e8768c24, []int{3} 240 } 241 func (m *NumaCalculationResult) XXX_Unmarshal(b []byte) error { 242 return m.Unmarshal(b) 243 } 244 func (m *NumaCalculationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 245 if deterministic { 246 return xxx_messageInfo_NumaCalculationResult.Marshal(b, m, deterministic) 247 } else { 248 b = b[:cap(b)] 249 n, err := m.MarshalToSizedBuffer(b) 250 if err != nil { 251 return nil, err 252 } 253 return b[:n], nil 254 } 255 } 256 func (m *NumaCalculationResult) XXX_Merge(src proto.Message) { 257 xxx_messageInfo_NumaCalculationResult.Merge(m, src) 258 } 259 func (m *NumaCalculationResult) XXX_Size() int { 260 return m.Size() 261 } 262 func (m *NumaCalculationResult) XXX_DiscardUnknown() { 263 xxx_messageInfo_NumaCalculationResult.DiscardUnknown(m) 264 } 265 266 var xxx_messageInfo_NumaCalculationResult proto.InternalMessageInfo 267 268 func (m *NumaCalculationResult) GetBlocks() []*Block { 269 if m != nil { 270 return m.Blocks 271 } 272 return nil 273 } 274 275 type Block struct { 276 Result uint64 `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` 277 OverlapTargets []*OverlapTarget `protobuf:"bytes,2,rep,name=overlap_targets,json=overlapTargets,proto3" json:"overlap_targets,omitempty"` 278 BlockId string `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` 279 XXX_NoUnkeyedLiteral struct{} `json:"-"` 280 XXX_sizecache int32 `json:"-"` 281 } 282 283 func (m *Block) Reset() { *m = Block{} } 284 func (*Block) ProtoMessage() {} 285 func (*Block) Descriptor() ([]byte, []int) { 286 return fileDescriptor_08fc9a87e8768c24, []int{4} 287 } 288 func (m *Block) XXX_Unmarshal(b []byte) error { 289 return m.Unmarshal(b) 290 } 291 func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 292 if deterministic { 293 return xxx_messageInfo_Block.Marshal(b, m, deterministic) 294 } else { 295 b = b[:cap(b)] 296 n, err := m.MarshalToSizedBuffer(b) 297 if err != nil { 298 return nil, err 299 } 300 return b[:n], nil 301 } 302 } 303 func (m *Block) XXX_Merge(src proto.Message) { 304 xxx_messageInfo_Block.Merge(m, src) 305 } 306 func (m *Block) XXX_Size() int { 307 return m.Size() 308 } 309 func (m *Block) XXX_DiscardUnknown() { 310 xxx_messageInfo_Block.DiscardUnknown(m) 311 } 312 313 var xxx_messageInfo_Block proto.InternalMessageInfo 314 315 func (m *Block) GetResult() uint64 { 316 if m != nil { 317 return m.Result 318 } 319 return 0 320 } 321 322 func (m *Block) GetOverlapTargets() []*OverlapTarget { 323 if m != nil { 324 return m.OverlapTargets 325 } 326 return nil 327 } 328 329 func (m *Block) GetBlockId() string { 330 if m != nil { 331 return m.BlockId 332 } 333 return "" 334 } 335 336 type OverlapTarget struct { 337 OverlapTargetPoolName string `protobuf:"bytes,1,opt,name=overlap_target_pool_name,json=overlapTargetPoolName,proto3" json:"overlap_target_pool_name,omitempty"` 338 OverlapTargetPodUid string `protobuf:"bytes,2,opt,name=overlap_target_pod_uid,json=overlapTargetPodUid,proto3" json:"overlap_target_pod_uid,omitempty"` 339 OverlapTargetContainerName string `protobuf:"bytes,3,opt,name=overlap_target_container_name,json=overlapTargetContainerName,proto3" json:"overlap_target_container_name,omitempty"` 340 OverlapType OverlapType `protobuf:"varint,4,opt,name=overlap_type,json=overlapType,proto3,enum=cpuadvisor.OverlapType" json:"overlap_type,omitempty"` 341 XXX_NoUnkeyedLiteral struct{} `json:"-"` 342 XXX_sizecache int32 `json:"-"` 343 } 344 345 func (m *OverlapTarget) Reset() { *m = OverlapTarget{} } 346 func (*OverlapTarget) ProtoMessage() {} 347 func (*OverlapTarget) Descriptor() ([]byte, []int) { 348 return fileDescriptor_08fc9a87e8768c24, []int{5} 349 } 350 func (m *OverlapTarget) XXX_Unmarshal(b []byte) error { 351 return m.Unmarshal(b) 352 } 353 func (m *OverlapTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 354 if deterministic { 355 return xxx_messageInfo_OverlapTarget.Marshal(b, m, deterministic) 356 } else { 357 b = b[:cap(b)] 358 n, err := m.MarshalToSizedBuffer(b) 359 if err != nil { 360 return nil, err 361 } 362 return b[:n], nil 363 } 364 } 365 func (m *OverlapTarget) XXX_Merge(src proto.Message) { 366 xxx_messageInfo_OverlapTarget.Merge(m, src) 367 } 368 func (m *OverlapTarget) XXX_Size() int { 369 return m.Size() 370 } 371 func (m *OverlapTarget) XXX_DiscardUnknown() { 372 xxx_messageInfo_OverlapTarget.DiscardUnknown(m) 373 } 374 375 var xxx_messageInfo_OverlapTarget proto.InternalMessageInfo 376 377 func (m *OverlapTarget) GetOverlapTargetPoolName() string { 378 if m != nil { 379 return m.OverlapTargetPoolName 380 } 381 return "" 382 } 383 384 func (m *OverlapTarget) GetOverlapTargetPodUid() string { 385 if m != nil { 386 return m.OverlapTargetPodUid 387 } 388 return "" 389 } 390 391 func (m *OverlapTarget) GetOverlapTargetContainerName() string { 392 if m != nil { 393 return m.OverlapTargetContainerName 394 } 395 return "" 396 } 397 398 func (m *OverlapTarget) GetOverlapType() OverlapType { 399 if m != nil { 400 return m.OverlapType 401 } 402 return OverlapType_OverlapWithPod 403 } 404 405 type GetCheckpointRequest struct { 406 XXX_NoUnkeyedLiteral struct{} `json:"-"` 407 XXX_sizecache int32 `json:"-"` 408 } 409 410 func (m *GetCheckpointRequest) Reset() { *m = GetCheckpointRequest{} } 411 func (*GetCheckpointRequest) ProtoMessage() {} 412 func (*GetCheckpointRequest) Descriptor() ([]byte, []int) { 413 return fileDescriptor_08fc9a87e8768c24, []int{6} 414 } 415 func (m *GetCheckpointRequest) XXX_Unmarshal(b []byte) error { 416 return m.Unmarshal(b) 417 } 418 func (m *GetCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 419 if deterministic { 420 return xxx_messageInfo_GetCheckpointRequest.Marshal(b, m, deterministic) 421 } else { 422 b = b[:cap(b)] 423 n, err := m.MarshalToSizedBuffer(b) 424 if err != nil { 425 return nil, err 426 } 427 return b[:n], nil 428 } 429 } 430 func (m *GetCheckpointRequest) XXX_Merge(src proto.Message) { 431 xxx_messageInfo_GetCheckpointRequest.Merge(m, src) 432 } 433 func (m *GetCheckpointRequest) XXX_Size() int { 434 return m.Size() 435 } 436 func (m *GetCheckpointRequest) XXX_DiscardUnknown() { 437 xxx_messageInfo_GetCheckpointRequest.DiscardUnknown(m) 438 } 439 440 var xxx_messageInfo_GetCheckpointRequest proto.InternalMessageInfo 441 442 type GetCheckpointResponse struct { 443 Entries map[string]*AllocationEntries `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 444 XXX_NoUnkeyedLiteral struct{} `json:"-"` 445 XXX_sizecache int32 `json:"-"` 446 } 447 448 func (m *GetCheckpointResponse) Reset() { *m = GetCheckpointResponse{} } 449 func (*GetCheckpointResponse) ProtoMessage() {} 450 func (*GetCheckpointResponse) Descriptor() ([]byte, []int) { 451 return fileDescriptor_08fc9a87e8768c24, []int{7} 452 } 453 func (m *GetCheckpointResponse) XXX_Unmarshal(b []byte) error { 454 return m.Unmarshal(b) 455 } 456 func (m *GetCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 457 if deterministic { 458 return xxx_messageInfo_GetCheckpointResponse.Marshal(b, m, deterministic) 459 } else { 460 b = b[:cap(b)] 461 n, err := m.MarshalToSizedBuffer(b) 462 if err != nil { 463 return nil, err 464 } 465 return b[:n], nil 466 } 467 } 468 func (m *GetCheckpointResponse) XXX_Merge(src proto.Message) { 469 xxx_messageInfo_GetCheckpointResponse.Merge(m, src) 470 } 471 func (m *GetCheckpointResponse) XXX_Size() int { 472 return m.Size() 473 } 474 func (m *GetCheckpointResponse) XXX_DiscardUnknown() { 475 xxx_messageInfo_GetCheckpointResponse.DiscardUnknown(m) 476 } 477 478 var xxx_messageInfo_GetCheckpointResponse proto.InternalMessageInfo 479 480 func (m *GetCheckpointResponse) GetEntries() map[string]*AllocationEntries { 481 if m != nil { 482 return m.Entries 483 } 484 return nil 485 } 486 487 type AllocationEntries struct { 488 Entries map[string]*AllocationInfo `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 489 XXX_NoUnkeyedLiteral struct{} `json:"-"` 490 XXX_sizecache int32 `json:"-"` 491 } 492 493 func (m *AllocationEntries) Reset() { *m = AllocationEntries{} } 494 func (*AllocationEntries) ProtoMessage() {} 495 func (*AllocationEntries) Descriptor() ([]byte, []int) { 496 return fileDescriptor_08fc9a87e8768c24, []int{8} 497 } 498 func (m *AllocationEntries) XXX_Unmarshal(b []byte) error { 499 return m.Unmarshal(b) 500 } 501 func (m *AllocationEntries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 502 if deterministic { 503 return xxx_messageInfo_AllocationEntries.Marshal(b, m, deterministic) 504 } else { 505 b = b[:cap(b)] 506 n, err := m.MarshalToSizedBuffer(b) 507 if err != nil { 508 return nil, err 509 } 510 return b[:n], nil 511 } 512 } 513 func (m *AllocationEntries) XXX_Merge(src proto.Message) { 514 xxx_messageInfo_AllocationEntries.Merge(m, src) 515 } 516 func (m *AllocationEntries) XXX_Size() int { 517 return m.Size() 518 } 519 func (m *AllocationEntries) XXX_DiscardUnknown() { 520 xxx_messageInfo_AllocationEntries.DiscardUnknown(m) 521 } 522 523 var xxx_messageInfo_AllocationEntries proto.InternalMessageInfo 524 525 func (m *AllocationEntries) GetEntries() map[string]*AllocationInfo { 526 if m != nil { 527 return m.Entries 528 } 529 return nil 530 } 531 532 type AllocationInfo struct { 533 RampUp bool `protobuf:"varint,1,opt,name=ramp_up,json=rampUp,proto3" json:"ramp_up,omitempty"` 534 // owner_pool_name indicates the real pool this entry belongs to, it may equal to 535 // 1. real pool name shows up explicitly in GetCheckpointResponse, including: 536 // - common pools (eg. share, reclaim, flink, batch, bmq) 537 // - pools generated by qos aware server containing isolated shared_cores containers (eg. isolation0, isolation1, ...) 538 // 2. "dedicated" (dedicated_cores container with or without numa_biding) 539 // 3. "fallback" (dedicated_cores without numa_binding will be put to this fake pool when it can't allocate isolated cpuset for them), there is no AllocationInfo for this fake pool 540 // 4. empty (the entry is ramping up) 541 OwnerPoolName string `protobuf:"bytes,2,opt,name=owner_pool_name,json=ownerPoolName,proto3" json:"owner_pool_name,omitempty"` 542 TopologyAwareAssignments map[uint64]string `protobuf:"bytes,3,rep,name=topology_aware_assignments,json=topologyAwareAssignments,proto3" json:"topology_aware_assignments,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 543 OriginalTopologyAwareAssignments map[uint64]string `protobuf:"bytes,4,rep,name=original_topology_aware_assignments,json=originalTopologyAwareAssignments,proto3" json:"original_topology_aware_assignments,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` 544 XXX_NoUnkeyedLiteral struct{} `json:"-"` 545 XXX_sizecache int32 `json:"-"` 546 } 547 548 func (m *AllocationInfo) Reset() { *m = AllocationInfo{} } 549 func (*AllocationInfo) ProtoMessage() {} 550 func (*AllocationInfo) Descriptor() ([]byte, []int) { 551 return fileDescriptor_08fc9a87e8768c24, []int{9} 552 } 553 func (m *AllocationInfo) XXX_Unmarshal(b []byte) error { 554 return m.Unmarshal(b) 555 } 556 func (m *AllocationInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 557 if deterministic { 558 return xxx_messageInfo_AllocationInfo.Marshal(b, m, deterministic) 559 } else { 560 b = b[:cap(b)] 561 n, err := m.MarshalToSizedBuffer(b) 562 if err != nil { 563 return nil, err 564 } 565 return b[:n], nil 566 } 567 } 568 func (m *AllocationInfo) XXX_Merge(src proto.Message) { 569 xxx_messageInfo_AllocationInfo.Merge(m, src) 570 } 571 func (m *AllocationInfo) XXX_Size() int { 572 return m.Size() 573 } 574 func (m *AllocationInfo) XXX_DiscardUnknown() { 575 xxx_messageInfo_AllocationInfo.DiscardUnknown(m) 576 } 577 578 var xxx_messageInfo_AllocationInfo proto.InternalMessageInfo 579 580 func (m *AllocationInfo) GetRampUp() bool { 581 if m != nil { 582 return m.RampUp 583 } 584 return false 585 } 586 587 func (m *AllocationInfo) GetOwnerPoolName() string { 588 if m != nil { 589 return m.OwnerPoolName 590 } 591 return "" 592 } 593 594 func (m *AllocationInfo) GetTopologyAwareAssignments() map[uint64]string { 595 if m != nil { 596 return m.TopologyAwareAssignments 597 } 598 return nil 599 } 600 601 func (m *AllocationInfo) GetOriginalTopologyAwareAssignments() map[uint64]string { 602 if m != nil { 603 return m.OriginalTopologyAwareAssignments 604 } 605 return nil 606 } 607 608 func init() { 609 proto.RegisterEnum("cpuadvisor.OverlapType", OverlapType_name, OverlapType_value) 610 proto.RegisterType((*ListAndWatchResponse)(nil), "cpuadvisor.ListAndWatchResponse") 611 proto.RegisterMapType((map[string]*CalculationEntries)(nil), "cpuadvisor.ListAndWatchResponse.EntriesEntry") 612 proto.RegisterType((*CalculationEntries)(nil), "cpuadvisor.CalculationEntries") 613 proto.RegisterMapType((map[string]*CalculationInfo)(nil), "cpuadvisor.CalculationEntries.EntriesEntry") 614 proto.RegisterType((*CalculationInfo)(nil), "cpuadvisor.CalculationInfo") 615 proto.RegisterMapType((map[int64]*NumaCalculationResult)(nil), "cpuadvisor.CalculationInfo.CalculationResultsByNumasEntry") 616 proto.RegisterType((*NumaCalculationResult)(nil), "cpuadvisor.NumaCalculationResult") 617 proto.RegisterType((*Block)(nil), "cpuadvisor.Block") 618 proto.RegisterType((*OverlapTarget)(nil), "cpuadvisor.OverlapTarget") 619 proto.RegisterType((*GetCheckpointRequest)(nil), "cpuadvisor.GetCheckpointRequest") 620 proto.RegisterType((*GetCheckpointResponse)(nil), "cpuadvisor.GetCheckpointResponse") 621 proto.RegisterMapType((map[string]*AllocationEntries)(nil), "cpuadvisor.GetCheckpointResponse.EntriesEntry") 622 proto.RegisterType((*AllocationEntries)(nil), "cpuadvisor.AllocationEntries") 623 proto.RegisterMapType((map[string]*AllocationInfo)(nil), "cpuadvisor.AllocationEntries.EntriesEntry") 624 proto.RegisterType((*AllocationInfo)(nil), "cpuadvisor.AllocationInfo") 625 proto.RegisterMapType((map[uint64]string)(nil), "cpuadvisor.AllocationInfo.OriginalTopologyAwareAssignmentsEntry") 626 proto.RegisterMapType((map[uint64]string)(nil), "cpuadvisor.AllocationInfo.TopologyAwareAssignmentsEntry") 627 } 628 629 func init() { proto.RegisterFile("cpu.proto", fileDescriptor_08fc9a87e8768c24) } 630 631 var fileDescriptor_08fc9a87e8768c24 = []byte{ 632 // 1031 bytes of a gzipped FileDescriptorProto 633 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcb, 0x6e, 0xdb, 0x46, 634 0x17, 0x16, 0x2d, 0xc7, 0xb6, 0x8e, 0xef, 0x13, 0xdb, 0x91, 0xf9, 0x47, 0x82, 0xa2, 0x1f, 0x29, 635 0x5c, 0x17, 0x96, 0x52, 0xb9, 0x68, 0x02, 0xaf, 0x2a, 0xa9, 0x86, 0x9b, 0x5e, 0x12, 0x95, 0x89, 636 0x62, 0x24, 0x1b, 0x62, 0x44, 0x8e, 0x29, 0x42, 0x24, 0x87, 0x21, 0x87, 0x32, 0x88, 0x02, 0x45, 637 0xdf, 0xa0, 0x5d, 0xf5, 0x15, 0xba, 0x2e, 0xd0, 0x65, 0x1f, 0x20, 0xcb, 0x2e, 0xbb, 0x6c, 0xdc, 638 0x57, 0xe8, 0xb2, 0x05, 0x0a, 0x0e, 0x29, 0x69, 0xa8, 0x6b, 0xdb, 0x95, 0x74, 0x2e, 0xdf, 0x77, 639 0x3e, 0x9e, 0x33, 0x73, 0x30, 0x90, 0xd3, 0xdc, 0xa0, 0xe2, 0x7a, 0x94, 0x51, 0x04, 0x9a, 0x1b, 640 0x60, 0xbd, 0x6f, 0xfa, 0xd4, 0x93, 0x4f, 0x0c, 0x93, 0x75, 0x83, 0x4e, 0x45, 0xa3, 0x76, 0xd5, 641 0xa0, 0x06, 0xad, 0xf2, 0x94, 0x4e, 0x70, 0xc5, 0x2d, 0x6e, 0xf0, 0x7f, 0x31, 0x54, 0x6e, 0x0b, 642 0xe9, 0xbd, 0xa0, 0x43, 0xae, 0xbb, 0xd8, 0xbb, 0xaa, 0xf6, 0x30, 0xc3, 0x56, 0xe8, 0xb3, 0x13, 643 0x8d, 0x7a, 0xa4, 0xea, 0xf6, 0x8c, 0x2a, 0x36, 0x88, 0xc3, 0xaa, 0xaf, 0x3d, 0xfb, 0xc4, 0xb5, 644 0x02, 0xc3, 0x74, 0xfc, 0x6a, 0x52, 0xd0, 0xef, 0x6b, 0x83, 0xbf, 0xaa, 0xdf, 0xd7, 0x62, 0xda, 645 0xf2, 0xf7, 0x4b, 0xb0, 0xf7, 0xb9, 0xe9, 0xb3, 0xba, 0xa3, 0x5f, 0x62, 0xa6, 0x75, 0x15, 0xe2, 646 0xbb, 0xd4, 0xf1, 0x09, 0xba, 0x80, 0x55, 0xe2, 0x30, 0xcf, 0x24, 0x7e, 0x5e, 0x2a, 0x65, 0x8f, 647 0xd6, 0x6b, 0x27, 0x95, 0x91, 0xf8, 0xca, 0x34, 0x48, 0xe5, 0x3c, 0xce, 0x8f, 0x7e, 0x42, 0x65, 648 0x80, 0x46, 0x2f, 0xe1, 0x18, 0x5b, 0x16, 0xbd, 0x56, 0xfd, 0x2e, 0xf6, 0x88, 0xae, 0x46, 0x4a, 649 0x7d, 0x95, 0xf6, 0x89, 0x67, 0x61, 0x57, 0xf5, 0x88, 0x66, 0x61, 0xd3, 0x1e, 0xf8, 0xf3, 0x4b, 650 0x25, 0xe9, 0x68, 0x4d, 0xb9, 0xcf, 0x11, 0xcf, 0x38, 0xa0, 0x19, 0xf9, 0x9f, 0xc6, 0xe9, 0xca, 651 0x20, 0x9b, 0x3b, 0xe5, 0x57, 0xb0, 0x21, 0xd6, 0x44, 0x3b, 0x90, 0xed, 0x91, 0x30, 0x2f, 0x95, 652 0xa4, 0xa3, 0x9c, 0x12, 0xfd, 0x45, 0x1f, 0xc0, 0xad, 0x3e, 0xb6, 0x02, 0xc2, 0x79, 0xd7, 0x6b, 653 0x45, 0xf1, 0x1b, 0x9a, 0xd8, 0xd2, 0x02, 0x0b, 0x33, 0x93, 0x3a, 0x09, 0x8b, 0x12, 0x27, 0x9f, 654 0x2d, 0x3d, 0x92, 0xca, 0x3f, 0x49, 0x80, 0x26, 0x33, 0xd0, 0xf9, 0x78, 0x5b, 0xde, 0x9b, 0x4f, 655 0x39, 0xbd, 0x29, 0xf2, 0xe5, 0x42, 0xe5, 0xef, 0xa7, 0x95, 0xff, 0x6f, 0x46, 0x99, 0xc7, 0xce, 656 0x15, 0x15, 0x65, 0xff, 0xb0, 0x04, 0xdb, 0x63, 0x61, 0xf4, 0x0e, 0x6c, 0xd3, 0x6b, 0x87, 0x78, 657 0xaa, 0x4b, 0xa9, 0xa5, 0x3a, 0xd8, 0x26, 0x49, 0xa1, 0x4d, 0xee, 0x6e, 0x51, 0x6a, 0x3d, 0xc1, 658 0x36, 0x41, 0x5f, 0xc1, 0x5d, 0x6d, 0x04, 0x55, 0x3d, 0xe2, 0x07, 0x16, 0xf3, 0xd5, 0x4e, 0xa8, 659 0x3a, 0x81, 0x8d, 0xa3, 0xd9, 0x44, 0x1f, 0x7c, 0x36, 0x47, 0x89, 0x68, 0x2b, 0x31, 0xbc, 0x11, 660 0x3e, 0x89, 0xc0, 0xf1, 0xf7, 0x1f, 0x6a, 0xb3, 0xe2, 0x32, 0x85, 0xe2, 0x7c, 0xb0, 0xd8, 0xa3, 661 0x6c, 0xdc, 0xa3, 0x87, 0xe9, 0x1e, 0xdd, 0x13, 0x95, 0x45, 0xc0, 0x09, 0x42, 0xb1, 0x53, 0x0d, 662 0xd8, 0x9f, 0x9a, 0x83, 0xde, 0x85, 0x95, 0x8e, 0x45, 0xb5, 0xde, 0xe0, 0x83, 0x77, 0x45, 0xda, 663 0x46, 0x14, 0x51, 0x92, 0x84, 0xf2, 0xd7, 0x70, 0x8b, 0x3b, 0xd0, 0x01, 0xac, 0xc4, 0xed, 0xe2, 664 0xf2, 0x96, 0x95, 0xc4, 0x42, 0x0d, 0xd8, 0x1e, 0x9c, 0x74, 0x86, 0x3d, 0x83, 0xb0, 0x01, 0xe9, 665 0xa1, 0x48, 0x9a, 0x9c, 0xee, 0xe7, 0x3c, 0x43, 0xd9, 0xa2, 0xa2, 0xe9, 0xa3, 0x43, 0x58, 0xe3, 666 0xe5, 0x54, 0x53, 0xcf, 0x67, 0xf9, 0xdc, 0x56, 0xb9, 0xfd, 0x58, 0x2f, 0xff, 0x29, 0xc1, 0x66, 667 0x0a, 0x8c, 0x1e, 0x42, 0x3e, 0x5d, 0x70, 0x62, 0xe8, 0xfb, 0x29, 0xfa, 0xe1, 0xf0, 0x4f, 0xe1, 668 0x60, 0x02, 0xa8, 0xab, 0x81, 0xa9, 0xf3, 0xe6, 0xe6, 0x94, 0xdb, 0x63, 0x30, 0xbd, 0x6d, 0xea, 669 0xa8, 0x0e, 0x85, 0x31, 0x90, 0x46, 0x1d, 0x86, 0xcd, 0xe8, 0xb0, 0xf1, 0x92, 0xb1, 0x5e, 0x39, 670 0x85, 0x6d, 0x0e, 0x52, 0x78, 0xdd, 0x33, 0xd8, 0x18, 0x52, 0x84, 0x2e, 0xc9, 0x2f, 0x97, 0xa4, 671 0xa3, 0xad, 0xda, 0x9d, 0x69, 0xed, 0x09, 0x5d, 0xa2, 0xac, 0xd3, 0x91, 0x51, 0x3e, 0x80, 0xbd, 672 0x0b, 0xc2, 0x9a, 0x5d, 0xa2, 0xf5, 0x5c, 0x6a, 0x3a, 0x4c, 0x21, 0xaf, 0x03, 0xe2, 0xb3, 0xf2, 673 0xcf, 0x12, 0xec, 0x8f, 0x05, 0x92, 0xad, 0xf6, 0xc9, 0xf8, 0xf5, 0xad, 0x88, 0x85, 0xa6, 0x62, 674 0x66, 0xdc, 0xe0, 0x97, 0x0b, 0x6f, 0xf0, 0x69, 0xfa, 0x74, 0x16, 0xc4, 0x4a, 0x75, 0xcb, 0xa2, 675 0xda, 0xac, 0xd5, 0xf3, 0xa3, 0x04, 0xbb, 0x13, 0x09, 0xe8, 0xe3, 0x71, 0xe9, 0xc7, 0x73, 0x09, 676 0x67, 0xc8, 0x7e, 0xb1, 0x50, 0xf6, 0x83, 0xb4, 0x6c, 0x79, 0x7a, 0x95, 0xf1, 0xbd, 0xf3, 0x57, 677 0x16, 0xb6, 0xd2, 0x51, 0x74, 0x07, 0x56, 0x3d, 0x6c, 0xbb, 0x6a, 0xe0, 0x72, 0xfa, 0x35, 0x65, 678 0x25, 0x32, 0xdb, 0xee, 0xb4, 0x7d, 0xb4, 0x34, 0x6d, 0x1f, 0xf5, 0x41, 0x66, 0xd4, 0xa5, 0x16, 679 0x35, 0x42, 0x15, 0x5f, 0x63, 0x8f, 0xa8, 0xd8, 0xf7, 0x4d, 0xc3, 0xb1, 0x89, 0xc3, 0xfc, 0x7c, 680 0x96, 0x37, 0xe1, 0xd1, 0x6c, 0x79, 0x95, 0xe7, 0x09, 0xb8, 0x1e, 0x61, 0xeb, 0x23, 0x68, 0xdc, 681 0x92, 0x3c, 0x9b, 0x11, 0x46, 0xdf, 0x4a, 0xf0, 0x7f, 0xea, 0x99, 0x86, 0xe9, 0x60, 0x4b, 0x9d, 682 0xa3, 0x60, 0x99, 0x2b, 0xf8, 0x68, 0x8e, 0x82, 0xa7, 0x09, 0xcb, 0x7c, 0x25, 0x25, 0xba, 0x20, 683 0x4d, 0xfe, 0x0c, 0x0a, 0x73, 0x29, 0xc4, 0x31, 0x2e, 0xc7, 0x63, 0xdc, 0x13, 0xc7, 0x98, 0x13, 684 0x46, 0x25, 0x3f, 0x83, 0xfb, 0xff, 0x48, 0xd7, 0xbf, 0x21, 0x3d, 0xfe, 0x10, 0xd6, 0x85, 0x6b, 685 0x8a, 0x10, 0x6c, 0x25, 0xe6, 0xa5, 0xc9, 0xba, 0x2d, 0xaa, 0xef, 0x64, 0xd0, 0x6d, 0xd8, 0x4e, 686 0xf9, 0xa8, 0xb5, 0x23, 0xd5, 0xfe, 0x90, 0x00, 0x9a, 0xad, 0x76, 0x3d, 0xee, 0x1f, 0xfa, 0x12, 687 0x36, 0xea, 0xba, 0x3e, 0xdc, 0x10, 0xa8, 0x50, 0x19, 0xbd, 0x5e, 0x2a, 0x43, 0xf7, 0x17, 0x84, 688 0x61, 0x1d, 0x33, 0x2c, 0x97, 0xc4, 0xb0, 0x08, 0x1c, 0x5c, 0xde, 0x72, 0x06, 0x7d, 0x0a, 0x39, 689 0x85, 0xd8, 0xb4, 0x4f, 0x5a, 0x54, 0x47, 0x77, 0x45, 0xc0, 0xd0, 0x9d, 0xec, 0x0d, 0xb9, 0x30, 690 0x23, 0x3a, 0xe4, 0xba, 0x80, 0x0d, 0xf1, 0xe5, 0x83, 0x76, 0x45, 0xc0, 0xb9, 0xed, 0xb2, 0x50, 691 0x2e, 0x2d, 0x7a, 0x26, 0x95, 0x33, 0x0f, 0xa4, 0x9a, 0x06, 0xb9, 0x66, 0xab, 0xdd, 0xe2, 0x2f, 692 0x34, 0xf4, 0x02, 0x36, 0x53, 0x9b, 0x07, 0x95, 0xe6, 0x2c, 0xa5, 0x58, 0xe9, 0xbd, 0x85, 0x6b, 693 0xab, 0x9c, 0x69, 0xf8, 0x6f, 0xde, 0x16, 0xa5, 0x5f, 0xdf, 0x16, 0x33, 0xdf, 0xdc, 0x14, 0xa5, 694 0x37, 0x37, 0x45, 0xe9, 0x97, 0x9b, 0xa2, 0xf4, 0xdb, 0x4d, 0x51, 0xfa, 0xee, 0xf7, 0x62, 0xe6, 695 0xd5, 0x7f, 0x7f, 0x50, 0x6a, 0x6e, 0x50, 0xd5, 0x43, 0x07, 0xdb, 0xa6, 0xe6, 0x52, 0xcb, 0xd4, 696 0xc2, 0xea, 0x48, 0x4c, 0x67, 0x85, 0xbf, 0x2b, 0x4f, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x9e, 697 0xab, 0x59, 0x4c, 0xf6, 0x0a, 0x00, 0x00, 698 } 699 700 // Reference imports to suppress errors if they are not otherwise used. 701 var _ context.Context 702 var _ grpc.ClientConn 703 704 // This is a compile-time assertion to ensure that this generated file 705 // is compatible with the grpc package it is being compiled against. 706 const _ = grpc.SupportPackageIsVersion4 707 708 // CPUAdvisorClient is the client API for CPUAdvisor service. 709 // 710 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 711 type CPUAdvisorClient interface { 712 AddContainer(ctx context.Context, in *advisorsvc.ContainerMetadata, opts ...grpc.CallOption) (*advisorsvc.AddContainerResponse, error) 713 RemovePod(ctx context.Context, in *advisorsvc.RemovePodRequest, opts ...grpc.CallOption) (*advisorsvc.RemovePodResponse, error) 714 ListAndWatch(ctx context.Context, in *advisorsvc.Empty, opts ...grpc.CallOption) (CPUAdvisor_ListAndWatchClient, error) 715 } 716 717 type cPUAdvisorClient struct { 718 cc *grpc.ClientConn 719 } 720 721 func NewCPUAdvisorClient(cc *grpc.ClientConn) CPUAdvisorClient { 722 return &cPUAdvisorClient{cc} 723 } 724 725 func (c *cPUAdvisorClient) AddContainer(ctx context.Context, in *advisorsvc.ContainerMetadata, opts ...grpc.CallOption) (*advisorsvc.AddContainerResponse, error) { 726 out := new(advisorsvc.AddContainerResponse) 727 err := c.cc.Invoke(ctx, "/cpuadvisor.CPUAdvisor/AddContainer", in, out, opts...) 728 if err != nil { 729 return nil, err 730 } 731 return out, nil 732 } 733 734 func (c *cPUAdvisorClient) RemovePod(ctx context.Context, in *advisorsvc.RemovePodRequest, opts ...grpc.CallOption) (*advisorsvc.RemovePodResponse, error) { 735 out := new(advisorsvc.RemovePodResponse) 736 err := c.cc.Invoke(ctx, "/cpuadvisor.CPUAdvisor/RemovePod", in, out, opts...) 737 if err != nil { 738 return nil, err 739 } 740 return out, nil 741 } 742 743 func (c *cPUAdvisorClient) ListAndWatch(ctx context.Context, in *advisorsvc.Empty, opts ...grpc.CallOption) (CPUAdvisor_ListAndWatchClient, error) { 744 stream, err := c.cc.NewStream(ctx, &_CPUAdvisor_serviceDesc.Streams[0], "/cpuadvisor.CPUAdvisor/ListAndWatch", opts...) 745 if err != nil { 746 return nil, err 747 } 748 x := &cPUAdvisorListAndWatchClient{stream} 749 if err := x.ClientStream.SendMsg(in); err != nil { 750 return nil, err 751 } 752 if err := x.ClientStream.CloseSend(); err != nil { 753 return nil, err 754 } 755 return x, nil 756 } 757 758 type CPUAdvisor_ListAndWatchClient interface { 759 Recv() (*ListAndWatchResponse, error) 760 grpc.ClientStream 761 } 762 763 type cPUAdvisorListAndWatchClient struct { 764 grpc.ClientStream 765 } 766 767 func (x *cPUAdvisorListAndWatchClient) Recv() (*ListAndWatchResponse, error) { 768 m := new(ListAndWatchResponse) 769 if err := x.ClientStream.RecvMsg(m); err != nil { 770 return nil, err 771 } 772 return m, nil 773 } 774 775 // CPUAdvisorServer is the server API for CPUAdvisor service. 776 type CPUAdvisorServer interface { 777 AddContainer(context.Context, *advisorsvc.ContainerMetadata) (*advisorsvc.AddContainerResponse, error) 778 RemovePod(context.Context, *advisorsvc.RemovePodRequest) (*advisorsvc.RemovePodResponse, error) 779 ListAndWatch(*advisorsvc.Empty, CPUAdvisor_ListAndWatchServer) error 780 } 781 782 // UnimplementedCPUAdvisorServer can be embedded to have forward compatible implementations. 783 type UnimplementedCPUAdvisorServer struct { 784 } 785 786 func (*UnimplementedCPUAdvisorServer) AddContainer(ctx context.Context, req *advisorsvc.ContainerMetadata) (*advisorsvc.AddContainerResponse, error) { 787 return nil, status.Errorf(codes.Unimplemented, "method AddContainer not implemented") 788 } 789 func (*UnimplementedCPUAdvisorServer) RemovePod(ctx context.Context, req *advisorsvc.RemovePodRequest) (*advisorsvc.RemovePodResponse, error) { 790 return nil, status.Errorf(codes.Unimplemented, "method RemovePod not implemented") 791 } 792 func (*UnimplementedCPUAdvisorServer) ListAndWatch(req *advisorsvc.Empty, srv CPUAdvisor_ListAndWatchServer) error { 793 return status.Errorf(codes.Unimplemented, "method ListAndWatch not implemented") 794 } 795 796 func RegisterCPUAdvisorServer(s *grpc.Server, srv CPUAdvisorServer) { 797 s.RegisterService(&_CPUAdvisor_serviceDesc, srv) 798 } 799 800 func _CPUAdvisor_AddContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 801 in := new(advisorsvc.ContainerMetadata) 802 if err := dec(in); err != nil { 803 return nil, err 804 } 805 if interceptor == nil { 806 return srv.(CPUAdvisorServer).AddContainer(ctx, in) 807 } 808 info := &grpc.UnaryServerInfo{ 809 Server: srv, 810 FullMethod: "/cpuadvisor.CPUAdvisor/AddContainer", 811 } 812 handler := func(ctx context.Context, req interface{}) (interface{}, error) { 813 return srv.(CPUAdvisorServer).AddContainer(ctx, req.(*advisorsvc.ContainerMetadata)) 814 } 815 return interceptor(ctx, in, info, handler) 816 } 817 818 func _CPUAdvisor_RemovePod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 819 in := new(advisorsvc.RemovePodRequest) 820 if err := dec(in); err != nil { 821 return nil, err 822 } 823 if interceptor == nil { 824 return srv.(CPUAdvisorServer).RemovePod(ctx, in) 825 } 826 info := &grpc.UnaryServerInfo{ 827 Server: srv, 828 FullMethod: "/cpuadvisor.CPUAdvisor/RemovePod", 829 } 830 handler := func(ctx context.Context, req interface{}) (interface{}, error) { 831 return srv.(CPUAdvisorServer).RemovePod(ctx, req.(*advisorsvc.RemovePodRequest)) 832 } 833 return interceptor(ctx, in, info, handler) 834 } 835 836 func _CPUAdvisor_ListAndWatch_Handler(srv interface{}, stream grpc.ServerStream) error { 837 m := new(advisorsvc.Empty) 838 if err := stream.RecvMsg(m); err != nil { 839 return err 840 } 841 return srv.(CPUAdvisorServer).ListAndWatch(m, &cPUAdvisorListAndWatchServer{stream}) 842 } 843 844 type CPUAdvisor_ListAndWatchServer interface { 845 Send(*ListAndWatchResponse) error 846 grpc.ServerStream 847 } 848 849 type cPUAdvisorListAndWatchServer struct { 850 grpc.ServerStream 851 } 852 853 func (x *cPUAdvisorListAndWatchServer) Send(m *ListAndWatchResponse) error { 854 return x.ServerStream.SendMsg(m) 855 } 856 857 var _CPUAdvisor_serviceDesc = grpc.ServiceDesc{ 858 ServiceName: "cpuadvisor.CPUAdvisor", 859 HandlerType: (*CPUAdvisorServer)(nil), 860 Methods: []grpc.MethodDesc{ 861 { 862 MethodName: "AddContainer", 863 Handler: _CPUAdvisor_AddContainer_Handler, 864 }, 865 { 866 MethodName: "RemovePod", 867 Handler: _CPUAdvisor_RemovePod_Handler, 868 }, 869 }, 870 Streams: []grpc.StreamDesc{ 871 { 872 StreamName: "ListAndWatch", 873 Handler: _CPUAdvisor_ListAndWatch_Handler, 874 ServerStreams: true, 875 }, 876 }, 877 Metadata: "cpu.proto", 878 } 879 880 // CPUPluginClient is the client API for CPUPlugin service. 881 // 882 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 883 type CPUPluginClient interface { 884 GetCheckpoint(ctx context.Context, in *GetCheckpointRequest, opts ...grpc.CallOption) (*GetCheckpointResponse, error) 885 } 886 887 type cPUPluginClient struct { 888 cc *grpc.ClientConn 889 } 890 891 func NewCPUPluginClient(cc *grpc.ClientConn) CPUPluginClient { 892 return &cPUPluginClient{cc} 893 } 894 895 func (c *cPUPluginClient) GetCheckpoint(ctx context.Context, in *GetCheckpointRequest, opts ...grpc.CallOption) (*GetCheckpointResponse, error) { 896 out := new(GetCheckpointResponse) 897 err := c.cc.Invoke(ctx, "/cpuadvisor.CPUPlugin/GetCheckpoint", in, out, opts...) 898 if err != nil { 899 return nil, err 900 } 901 return out, nil 902 } 903 904 // CPUPluginServer is the server API for CPUPlugin service. 905 type CPUPluginServer interface { 906 GetCheckpoint(context.Context, *GetCheckpointRequest) (*GetCheckpointResponse, error) 907 } 908 909 // UnimplementedCPUPluginServer can be embedded to have forward compatible implementations. 910 type UnimplementedCPUPluginServer struct { 911 } 912 913 func (*UnimplementedCPUPluginServer) GetCheckpoint(ctx context.Context, req *GetCheckpointRequest) (*GetCheckpointResponse, error) { 914 return nil, status.Errorf(codes.Unimplemented, "method GetCheckpoint not implemented") 915 } 916 917 func RegisterCPUPluginServer(s *grpc.Server, srv CPUPluginServer) { 918 s.RegisterService(&_CPUPlugin_serviceDesc, srv) 919 } 920 921 func _CPUPlugin_GetCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 922 in := new(GetCheckpointRequest) 923 if err := dec(in); err != nil { 924 return nil, err 925 } 926 if interceptor == nil { 927 return srv.(CPUPluginServer).GetCheckpoint(ctx, in) 928 } 929 info := &grpc.UnaryServerInfo{ 930 Server: srv, 931 FullMethod: "/cpuadvisor.CPUPlugin/GetCheckpoint", 932 } 933 handler := func(ctx context.Context, req interface{}) (interface{}, error) { 934 return srv.(CPUPluginServer).GetCheckpoint(ctx, req.(*GetCheckpointRequest)) 935 } 936 return interceptor(ctx, in, info, handler) 937 } 938 939 var _CPUPlugin_serviceDesc = grpc.ServiceDesc{ 940 ServiceName: "cpuadvisor.CPUPlugin", 941 HandlerType: (*CPUPluginServer)(nil), 942 Methods: []grpc.MethodDesc{ 943 { 944 MethodName: "GetCheckpoint", 945 Handler: _CPUPlugin_GetCheckpoint_Handler, 946 }, 947 }, 948 Streams: []grpc.StreamDesc{}, 949 Metadata: "cpu.proto", 950 } 951 952 func (m *ListAndWatchResponse) Marshal() (dAtA []byte, err error) { 953 size := m.Size() 954 dAtA = make([]byte, size) 955 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 956 if err != nil { 957 return nil, err 958 } 959 return dAtA[:n], nil 960 } 961 962 func (m *ListAndWatchResponse) MarshalTo(dAtA []byte) (int, error) { 963 size := m.Size() 964 return m.MarshalToSizedBuffer(dAtA[:size]) 965 } 966 967 func (m *ListAndWatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { 968 i := len(dAtA) 969 _ = i 970 var l int 971 _ = l 972 if m.AllowSharedCoresOverlapReclaimedCores { 973 i-- 974 if m.AllowSharedCoresOverlapReclaimedCores { 975 dAtA[i] = 1 976 } else { 977 dAtA[i] = 0 978 } 979 i-- 980 dAtA[i] = 0x10 981 } 982 if len(m.Entries) > 0 { 983 for k := range m.Entries { 984 v := m.Entries[k] 985 baseI := i 986 if v != nil { 987 { 988 size, err := v.MarshalToSizedBuffer(dAtA[:i]) 989 if err != nil { 990 return 0, err 991 } 992 i -= size 993 i = encodeVarintCpu(dAtA, i, uint64(size)) 994 } 995 i-- 996 dAtA[i] = 0x12 997 } 998 i -= len(k) 999 copy(dAtA[i:], k) 1000 i = encodeVarintCpu(dAtA, i, uint64(len(k))) 1001 i-- 1002 dAtA[i] = 0xa 1003 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1004 i-- 1005 dAtA[i] = 0xa 1006 } 1007 } 1008 return len(dAtA) - i, nil 1009 } 1010 1011 func (m *CalculationEntries) Marshal() (dAtA []byte, err error) { 1012 size := m.Size() 1013 dAtA = make([]byte, size) 1014 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1015 if err != nil { 1016 return nil, err 1017 } 1018 return dAtA[:n], nil 1019 } 1020 1021 func (m *CalculationEntries) MarshalTo(dAtA []byte) (int, error) { 1022 size := m.Size() 1023 return m.MarshalToSizedBuffer(dAtA[:size]) 1024 } 1025 1026 func (m *CalculationEntries) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1027 i := len(dAtA) 1028 _ = i 1029 var l int 1030 _ = l 1031 if len(m.Entries) > 0 { 1032 for k := range m.Entries { 1033 v := m.Entries[k] 1034 baseI := i 1035 if v != nil { 1036 { 1037 size, err := v.MarshalToSizedBuffer(dAtA[:i]) 1038 if err != nil { 1039 return 0, err 1040 } 1041 i -= size 1042 i = encodeVarintCpu(dAtA, i, uint64(size)) 1043 } 1044 i-- 1045 dAtA[i] = 0x12 1046 } 1047 i -= len(k) 1048 copy(dAtA[i:], k) 1049 i = encodeVarintCpu(dAtA, i, uint64(len(k))) 1050 i-- 1051 dAtA[i] = 0xa 1052 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1053 i-- 1054 dAtA[i] = 0xa 1055 } 1056 } 1057 return len(dAtA) - i, nil 1058 } 1059 1060 func (m *CalculationInfo) Marshal() (dAtA []byte, err error) { 1061 size := m.Size() 1062 dAtA = make([]byte, size) 1063 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1064 if err != nil { 1065 return nil, err 1066 } 1067 return dAtA[:n], nil 1068 } 1069 1070 func (m *CalculationInfo) MarshalTo(dAtA []byte) (int, error) { 1071 size := m.Size() 1072 return m.MarshalToSizedBuffer(dAtA[:size]) 1073 } 1074 1075 func (m *CalculationInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1076 i := len(dAtA) 1077 _ = i 1078 var l int 1079 _ = l 1080 if len(m.CalculationResultsByNumas) > 0 { 1081 for k := range m.CalculationResultsByNumas { 1082 v := m.CalculationResultsByNumas[k] 1083 baseI := i 1084 if v != nil { 1085 { 1086 size, err := v.MarshalToSizedBuffer(dAtA[:i]) 1087 if err != nil { 1088 return 0, err 1089 } 1090 i -= size 1091 i = encodeVarintCpu(dAtA, i, uint64(size)) 1092 } 1093 i-- 1094 dAtA[i] = 0x12 1095 } 1096 i = encodeVarintCpu(dAtA, i, uint64(k)) 1097 i-- 1098 dAtA[i] = 0x8 1099 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1100 i-- 1101 dAtA[i] = 0x12 1102 } 1103 } 1104 if len(m.OwnerPoolName) > 0 { 1105 i -= len(m.OwnerPoolName) 1106 copy(dAtA[i:], m.OwnerPoolName) 1107 i = encodeVarintCpu(dAtA, i, uint64(len(m.OwnerPoolName))) 1108 i-- 1109 dAtA[i] = 0xa 1110 } 1111 return len(dAtA) - i, nil 1112 } 1113 1114 func (m *NumaCalculationResult) Marshal() (dAtA []byte, err error) { 1115 size := m.Size() 1116 dAtA = make([]byte, size) 1117 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1118 if err != nil { 1119 return nil, err 1120 } 1121 return dAtA[:n], nil 1122 } 1123 1124 func (m *NumaCalculationResult) MarshalTo(dAtA []byte) (int, error) { 1125 size := m.Size() 1126 return m.MarshalToSizedBuffer(dAtA[:size]) 1127 } 1128 1129 func (m *NumaCalculationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1130 i := len(dAtA) 1131 _ = i 1132 var l int 1133 _ = l 1134 if len(m.Blocks) > 0 { 1135 for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { 1136 { 1137 size, err := m.Blocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) 1138 if err != nil { 1139 return 0, err 1140 } 1141 i -= size 1142 i = encodeVarintCpu(dAtA, i, uint64(size)) 1143 } 1144 i-- 1145 dAtA[i] = 0x12 1146 } 1147 } 1148 return len(dAtA) - i, nil 1149 } 1150 1151 func (m *Block) Marshal() (dAtA []byte, err error) { 1152 size := m.Size() 1153 dAtA = make([]byte, size) 1154 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1155 if err != nil { 1156 return nil, err 1157 } 1158 return dAtA[:n], nil 1159 } 1160 1161 func (m *Block) MarshalTo(dAtA []byte) (int, error) { 1162 size := m.Size() 1163 return m.MarshalToSizedBuffer(dAtA[:size]) 1164 } 1165 1166 func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1167 i := len(dAtA) 1168 _ = i 1169 var l int 1170 _ = l 1171 if len(m.BlockId) > 0 { 1172 i -= len(m.BlockId) 1173 copy(dAtA[i:], m.BlockId) 1174 i = encodeVarintCpu(dAtA, i, uint64(len(m.BlockId))) 1175 i-- 1176 dAtA[i] = 0x1a 1177 } 1178 if len(m.OverlapTargets) > 0 { 1179 for iNdEx := len(m.OverlapTargets) - 1; iNdEx >= 0; iNdEx-- { 1180 { 1181 size, err := m.OverlapTargets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) 1182 if err != nil { 1183 return 0, err 1184 } 1185 i -= size 1186 i = encodeVarintCpu(dAtA, i, uint64(size)) 1187 } 1188 i-- 1189 dAtA[i] = 0x12 1190 } 1191 } 1192 if m.Result != 0 { 1193 i = encodeVarintCpu(dAtA, i, uint64(m.Result)) 1194 i-- 1195 dAtA[i] = 0x8 1196 } 1197 return len(dAtA) - i, nil 1198 } 1199 1200 func (m *OverlapTarget) Marshal() (dAtA []byte, err error) { 1201 size := m.Size() 1202 dAtA = make([]byte, size) 1203 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1204 if err != nil { 1205 return nil, err 1206 } 1207 return dAtA[:n], nil 1208 } 1209 1210 func (m *OverlapTarget) MarshalTo(dAtA []byte) (int, error) { 1211 size := m.Size() 1212 return m.MarshalToSizedBuffer(dAtA[:size]) 1213 } 1214 1215 func (m *OverlapTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1216 i := len(dAtA) 1217 _ = i 1218 var l int 1219 _ = l 1220 if m.OverlapType != 0 { 1221 i = encodeVarintCpu(dAtA, i, uint64(m.OverlapType)) 1222 i-- 1223 dAtA[i] = 0x20 1224 } 1225 if len(m.OverlapTargetContainerName) > 0 { 1226 i -= len(m.OverlapTargetContainerName) 1227 copy(dAtA[i:], m.OverlapTargetContainerName) 1228 i = encodeVarintCpu(dAtA, i, uint64(len(m.OverlapTargetContainerName))) 1229 i-- 1230 dAtA[i] = 0x1a 1231 } 1232 if len(m.OverlapTargetPodUid) > 0 { 1233 i -= len(m.OverlapTargetPodUid) 1234 copy(dAtA[i:], m.OverlapTargetPodUid) 1235 i = encodeVarintCpu(dAtA, i, uint64(len(m.OverlapTargetPodUid))) 1236 i-- 1237 dAtA[i] = 0x12 1238 } 1239 if len(m.OverlapTargetPoolName) > 0 { 1240 i -= len(m.OverlapTargetPoolName) 1241 copy(dAtA[i:], m.OverlapTargetPoolName) 1242 i = encodeVarintCpu(dAtA, i, uint64(len(m.OverlapTargetPoolName))) 1243 i-- 1244 dAtA[i] = 0xa 1245 } 1246 return len(dAtA) - i, nil 1247 } 1248 1249 func (m *GetCheckpointRequest) Marshal() (dAtA []byte, err error) { 1250 size := m.Size() 1251 dAtA = make([]byte, size) 1252 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1253 if err != nil { 1254 return nil, err 1255 } 1256 return dAtA[:n], nil 1257 } 1258 1259 func (m *GetCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { 1260 size := m.Size() 1261 return m.MarshalToSizedBuffer(dAtA[:size]) 1262 } 1263 1264 func (m *GetCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1265 i := len(dAtA) 1266 _ = i 1267 var l int 1268 _ = l 1269 return len(dAtA) - i, nil 1270 } 1271 1272 func (m *GetCheckpointResponse) Marshal() (dAtA []byte, err error) { 1273 size := m.Size() 1274 dAtA = make([]byte, size) 1275 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1276 if err != nil { 1277 return nil, err 1278 } 1279 return dAtA[:n], nil 1280 } 1281 1282 func (m *GetCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { 1283 size := m.Size() 1284 return m.MarshalToSizedBuffer(dAtA[:size]) 1285 } 1286 1287 func (m *GetCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1288 i := len(dAtA) 1289 _ = i 1290 var l int 1291 _ = l 1292 if len(m.Entries) > 0 { 1293 for k := range m.Entries { 1294 v := m.Entries[k] 1295 baseI := i 1296 if v != nil { 1297 { 1298 size, err := v.MarshalToSizedBuffer(dAtA[:i]) 1299 if err != nil { 1300 return 0, err 1301 } 1302 i -= size 1303 i = encodeVarintCpu(dAtA, i, uint64(size)) 1304 } 1305 i-- 1306 dAtA[i] = 0x12 1307 } 1308 i -= len(k) 1309 copy(dAtA[i:], k) 1310 i = encodeVarintCpu(dAtA, i, uint64(len(k))) 1311 i-- 1312 dAtA[i] = 0xa 1313 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1314 i-- 1315 dAtA[i] = 0xa 1316 } 1317 } 1318 return len(dAtA) - i, nil 1319 } 1320 1321 func (m *AllocationEntries) Marshal() (dAtA []byte, err error) { 1322 size := m.Size() 1323 dAtA = make([]byte, size) 1324 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1325 if err != nil { 1326 return nil, err 1327 } 1328 return dAtA[:n], nil 1329 } 1330 1331 func (m *AllocationEntries) MarshalTo(dAtA []byte) (int, error) { 1332 size := m.Size() 1333 return m.MarshalToSizedBuffer(dAtA[:size]) 1334 } 1335 1336 func (m *AllocationEntries) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1337 i := len(dAtA) 1338 _ = i 1339 var l int 1340 _ = l 1341 if len(m.Entries) > 0 { 1342 for k := range m.Entries { 1343 v := m.Entries[k] 1344 baseI := i 1345 if v != nil { 1346 { 1347 size, err := v.MarshalToSizedBuffer(dAtA[:i]) 1348 if err != nil { 1349 return 0, err 1350 } 1351 i -= size 1352 i = encodeVarintCpu(dAtA, i, uint64(size)) 1353 } 1354 i-- 1355 dAtA[i] = 0x12 1356 } 1357 i -= len(k) 1358 copy(dAtA[i:], k) 1359 i = encodeVarintCpu(dAtA, i, uint64(len(k))) 1360 i-- 1361 dAtA[i] = 0xa 1362 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1363 i-- 1364 dAtA[i] = 0xa 1365 } 1366 } 1367 return len(dAtA) - i, nil 1368 } 1369 1370 func (m *AllocationInfo) Marshal() (dAtA []byte, err error) { 1371 size := m.Size() 1372 dAtA = make([]byte, size) 1373 n, err := m.MarshalToSizedBuffer(dAtA[:size]) 1374 if err != nil { 1375 return nil, err 1376 } 1377 return dAtA[:n], nil 1378 } 1379 1380 func (m *AllocationInfo) MarshalTo(dAtA []byte) (int, error) { 1381 size := m.Size() 1382 return m.MarshalToSizedBuffer(dAtA[:size]) 1383 } 1384 1385 func (m *AllocationInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { 1386 i := len(dAtA) 1387 _ = i 1388 var l int 1389 _ = l 1390 if len(m.OriginalTopologyAwareAssignments) > 0 { 1391 for k := range m.OriginalTopologyAwareAssignments { 1392 v := m.OriginalTopologyAwareAssignments[k] 1393 baseI := i 1394 i -= len(v) 1395 copy(dAtA[i:], v) 1396 i = encodeVarintCpu(dAtA, i, uint64(len(v))) 1397 i-- 1398 dAtA[i] = 0x12 1399 i = encodeVarintCpu(dAtA, i, uint64(k)) 1400 i-- 1401 dAtA[i] = 0x8 1402 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1403 i-- 1404 dAtA[i] = 0x22 1405 } 1406 } 1407 if len(m.TopologyAwareAssignments) > 0 { 1408 for k := range m.TopologyAwareAssignments { 1409 v := m.TopologyAwareAssignments[k] 1410 baseI := i 1411 i -= len(v) 1412 copy(dAtA[i:], v) 1413 i = encodeVarintCpu(dAtA, i, uint64(len(v))) 1414 i-- 1415 dAtA[i] = 0x12 1416 i = encodeVarintCpu(dAtA, i, uint64(k)) 1417 i-- 1418 dAtA[i] = 0x8 1419 i = encodeVarintCpu(dAtA, i, uint64(baseI-i)) 1420 i-- 1421 dAtA[i] = 0x1a 1422 } 1423 } 1424 if len(m.OwnerPoolName) > 0 { 1425 i -= len(m.OwnerPoolName) 1426 copy(dAtA[i:], m.OwnerPoolName) 1427 i = encodeVarintCpu(dAtA, i, uint64(len(m.OwnerPoolName))) 1428 i-- 1429 dAtA[i] = 0x12 1430 } 1431 if m.RampUp { 1432 i-- 1433 if m.RampUp { 1434 dAtA[i] = 1 1435 } else { 1436 dAtA[i] = 0 1437 } 1438 i-- 1439 dAtA[i] = 0x8 1440 } 1441 return len(dAtA) - i, nil 1442 } 1443 1444 func encodeVarintCpu(dAtA []byte, offset int, v uint64) int { 1445 offset -= sovCpu(v) 1446 base := offset 1447 for v >= 1<<7 { 1448 dAtA[offset] = uint8(v&0x7f | 0x80) 1449 v >>= 7 1450 offset++ 1451 } 1452 dAtA[offset] = uint8(v) 1453 return base 1454 } 1455 func (m *ListAndWatchResponse) Size() (n int) { 1456 if m == nil { 1457 return 0 1458 } 1459 var l int 1460 _ = l 1461 if len(m.Entries) > 0 { 1462 for k, v := range m.Entries { 1463 _ = k 1464 _ = v 1465 l = 0 1466 if v != nil { 1467 l = v.Size() 1468 l += 1 + sovCpu(uint64(l)) 1469 } 1470 mapEntrySize := 1 + len(k) + sovCpu(uint64(len(k))) + l 1471 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1472 } 1473 } 1474 if m.AllowSharedCoresOverlapReclaimedCores { 1475 n += 2 1476 } 1477 return n 1478 } 1479 1480 func (m *CalculationEntries) Size() (n int) { 1481 if m == nil { 1482 return 0 1483 } 1484 var l int 1485 _ = l 1486 if len(m.Entries) > 0 { 1487 for k, v := range m.Entries { 1488 _ = k 1489 _ = v 1490 l = 0 1491 if v != nil { 1492 l = v.Size() 1493 l += 1 + sovCpu(uint64(l)) 1494 } 1495 mapEntrySize := 1 + len(k) + sovCpu(uint64(len(k))) + l 1496 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1497 } 1498 } 1499 return n 1500 } 1501 1502 func (m *CalculationInfo) Size() (n int) { 1503 if m == nil { 1504 return 0 1505 } 1506 var l int 1507 _ = l 1508 l = len(m.OwnerPoolName) 1509 if l > 0 { 1510 n += 1 + l + sovCpu(uint64(l)) 1511 } 1512 if len(m.CalculationResultsByNumas) > 0 { 1513 for k, v := range m.CalculationResultsByNumas { 1514 _ = k 1515 _ = v 1516 l = 0 1517 if v != nil { 1518 l = v.Size() 1519 l += 1 + sovCpu(uint64(l)) 1520 } 1521 mapEntrySize := 1 + sovCpu(uint64(k)) + l 1522 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1523 } 1524 } 1525 return n 1526 } 1527 1528 func (m *NumaCalculationResult) Size() (n int) { 1529 if m == nil { 1530 return 0 1531 } 1532 var l int 1533 _ = l 1534 if len(m.Blocks) > 0 { 1535 for _, e := range m.Blocks { 1536 l = e.Size() 1537 n += 1 + l + sovCpu(uint64(l)) 1538 } 1539 } 1540 return n 1541 } 1542 1543 func (m *Block) Size() (n int) { 1544 if m == nil { 1545 return 0 1546 } 1547 var l int 1548 _ = l 1549 if m.Result != 0 { 1550 n += 1 + sovCpu(uint64(m.Result)) 1551 } 1552 if len(m.OverlapTargets) > 0 { 1553 for _, e := range m.OverlapTargets { 1554 l = e.Size() 1555 n += 1 + l + sovCpu(uint64(l)) 1556 } 1557 } 1558 l = len(m.BlockId) 1559 if l > 0 { 1560 n += 1 + l + sovCpu(uint64(l)) 1561 } 1562 return n 1563 } 1564 1565 func (m *OverlapTarget) Size() (n int) { 1566 if m == nil { 1567 return 0 1568 } 1569 var l int 1570 _ = l 1571 l = len(m.OverlapTargetPoolName) 1572 if l > 0 { 1573 n += 1 + l + sovCpu(uint64(l)) 1574 } 1575 l = len(m.OverlapTargetPodUid) 1576 if l > 0 { 1577 n += 1 + l + sovCpu(uint64(l)) 1578 } 1579 l = len(m.OverlapTargetContainerName) 1580 if l > 0 { 1581 n += 1 + l + sovCpu(uint64(l)) 1582 } 1583 if m.OverlapType != 0 { 1584 n += 1 + sovCpu(uint64(m.OverlapType)) 1585 } 1586 return n 1587 } 1588 1589 func (m *GetCheckpointRequest) Size() (n int) { 1590 if m == nil { 1591 return 0 1592 } 1593 var l int 1594 _ = l 1595 return n 1596 } 1597 1598 func (m *GetCheckpointResponse) Size() (n int) { 1599 if m == nil { 1600 return 0 1601 } 1602 var l int 1603 _ = l 1604 if len(m.Entries) > 0 { 1605 for k, v := range m.Entries { 1606 _ = k 1607 _ = v 1608 l = 0 1609 if v != nil { 1610 l = v.Size() 1611 l += 1 + sovCpu(uint64(l)) 1612 } 1613 mapEntrySize := 1 + len(k) + sovCpu(uint64(len(k))) + l 1614 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1615 } 1616 } 1617 return n 1618 } 1619 1620 func (m *AllocationEntries) Size() (n int) { 1621 if m == nil { 1622 return 0 1623 } 1624 var l int 1625 _ = l 1626 if len(m.Entries) > 0 { 1627 for k, v := range m.Entries { 1628 _ = k 1629 _ = v 1630 l = 0 1631 if v != nil { 1632 l = v.Size() 1633 l += 1 + sovCpu(uint64(l)) 1634 } 1635 mapEntrySize := 1 + len(k) + sovCpu(uint64(len(k))) + l 1636 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1637 } 1638 } 1639 return n 1640 } 1641 1642 func (m *AllocationInfo) Size() (n int) { 1643 if m == nil { 1644 return 0 1645 } 1646 var l int 1647 _ = l 1648 if m.RampUp { 1649 n += 2 1650 } 1651 l = len(m.OwnerPoolName) 1652 if l > 0 { 1653 n += 1 + l + sovCpu(uint64(l)) 1654 } 1655 if len(m.TopologyAwareAssignments) > 0 { 1656 for k, v := range m.TopologyAwareAssignments { 1657 _ = k 1658 _ = v 1659 mapEntrySize := 1 + sovCpu(uint64(k)) + 1 + len(v) + sovCpu(uint64(len(v))) 1660 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1661 } 1662 } 1663 if len(m.OriginalTopologyAwareAssignments) > 0 { 1664 for k, v := range m.OriginalTopologyAwareAssignments { 1665 _ = k 1666 _ = v 1667 mapEntrySize := 1 + sovCpu(uint64(k)) + 1 + len(v) + sovCpu(uint64(len(v))) 1668 n += mapEntrySize + 1 + sovCpu(uint64(mapEntrySize)) 1669 } 1670 } 1671 return n 1672 } 1673 1674 func sovCpu(x uint64) (n int) { 1675 return (math_bits.Len64(x|1) + 6) / 7 1676 } 1677 func sozCpu(x uint64) (n int) { 1678 return sovCpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) 1679 } 1680 func (this *ListAndWatchResponse) String() string { 1681 if this == nil { 1682 return "nil" 1683 } 1684 keysForEntries := make([]string, 0, len(this.Entries)) 1685 for k, _ := range this.Entries { 1686 keysForEntries = append(keysForEntries, k) 1687 } 1688 github_com_gogo_protobuf_sortkeys.Strings(keysForEntries) 1689 mapStringForEntries := "map[string]*CalculationEntries{" 1690 for _, k := range keysForEntries { 1691 mapStringForEntries += fmt.Sprintf("%v: %v,", k, this.Entries[k]) 1692 } 1693 mapStringForEntries += "}" 1694 s := strings.Join([]string{`&ListAndWatchResponse{`, 1695 `Entries:` + mapStringForEntries + `,`, 1696 `AllowSharedCoresOverlapReclaimedCores:` + fmt.Sprintf("%v", this.AllowSharedCoresOverlapReclaimedCores) + `,`, 1697 `}`, 1698 }, "") 1699 return s 1700 } 1701 func (this *CalculationEntries) String() string { 1702 if this == nil { 1703 return "nil" 1704 } 1705 keysForEntries := make([]string, 0, len(this.Entries)) 1706 for k, _ := range this.Entries { 1707 keysForEntries = append(keysForEntries, k) 1708 } 1709 github_com_gogo_protobuf_sortkeys.Strings(keysForEntries) 1710 mapStringForEntries := "map[string]*CalculationInfo{" 1711 for _, k := range keysForEntries { 1712 mapStringForEntries += fmt.Sprintf("%v: %v,", k, this.Entries[k]) 1713 } 1714 mapStringForEntries += "}" 1715 s := strings.Join([]string{`&CalculationEntries{`, 1716 `Entries:` + mapStringForEntries + `,`, 1717 `}`, 1718 }, "") 1719 return s 1720 } 1721 func (this *CalculationInfo) String() string { 1722 if this == nil { 1723 return "nil" 1724 } 1725 keysForCalculationResultsByNumas := make([]int64, 0, len(this.CalculationResultsByNumas)) 1726 for k, _ := range this.CalculationResultsByNumas { 1727 keysForCalculationResultsByNumas = append(keysForCalculationResultsByNumas, k) 1728 } 1729 github_com_gogo_protobuf_sortkeys.Int64s(keysForCalculationResultsByNumas) 1730 mapStringForCalculationResultsByNumas := "map[int64]*NumaCalculationResult{" 1731 for _, k := range keysForCalculationResultsByNumas { 1732 mapStringForCalculationResultsByNumas += fmt.Sprintf("%v: %v,", k, this.CalculationResultsByNumas[k]) 1733 } 1734 mapStringForCalculationResultsByNumas += "}" 1735 s := strings.Join([]string{`&CalculationInfo{`, 1736 `OwnerPoolName:` + fmt.Sprintf("%v", this.OwnerPoolName) + `,`, 1737 `CalculationResultsByNumas:` + mapStringForCalculationResultsByNumas + `,`, 1738 `}`, 1739 }, "") 1740 return s 1741 } 1742 func (this *NumaCalculationResult) String() string { 1743 if this == nil { 1744 return "nil" 1745 } 1746 repeatedStringForBlocks := "[]*Block{" 1747 for _, f := range this.Blocks { 1748 repeatedStringForBlocks += strings.Replace(f.String(), "Block", "Block", 1) + "," 1749 } 1750 repeatedStringForBlocks += "}" 1751 s := strings.Join([]string{`&NumaCalculationResult{`, 1752 `Blocks:` + repeatedStringForBlocks + `,`, 1753 `}`, 1754 }, "") 1755 return s 1756 } 1757 func (this *Block) String() string { 1758 if this == nil { 1759 return "nil" 1760 } 1761 repeatedStringForOverlapTargets := "[]*OverlapTarget{" 1762 for _, f := range this.OverlapTargets { 1763 repeatedStringForOverlapTargets += strings.Replace(f.String(), "OverlapTarget", "OverlapTarget", 1) + "," 1764 } 1765 repeatedStringForOverlapTargets += "}" 1766 s := strings.Join([]string{`&Block{`, 1767 `Result:` + fmt.Sprintf("%v", this.Result) + `,`, 1768 `OverlapTargets:` + repeatedStringForOverlapTargets + `,`, 1769 `BlockId:` + fmt.Sprintf("%v", this.BlockId) + `,`, 1770 `}`, 1771 }, "") 1772 return s 1773 } 1774 func (this *OverlapTarget) String() string { 1775 if this == nil { 1776 return "nil" 1777 } 1778 s := strings.Join([]string{`&OverlapTarget{`, 1779 `OverlapTargetPoolName:` + fmt.Sprintf("%v", this.OverlapTargetPoolName) + `,`, 1780 `OverlapTargetPodUid:` + fmt.Sprintf("%v", this.OverlapTargetPodUid) + `,`, 1781 `OverlapTargetContainerName:` + fmt.Sprintf("%v", this.OverlapTargetContainerName) + `,`, 1782 `OverlapType:` + fmt.Sprintf("%v", this.OverlapType) + `,`, 1783 `}`, 1784 }, "") 1785 return s 1786 } 1787 func (this *GetCheckpointRequest) String() string { 1788 if this == nil { 1789 return "nil" 1790 } 1791 s := strings.Join([]string{`&GetCheckpointRequest{`, 1792 `}`, 1793 }, "") 1794 return s 1795 } 1796 func (this *GetCheckpointResponse) String() string { 1797 if this == nil { 1798 return "nil" 1799 } 1800 keysForEntries := make([]string, 0, len(this.Entries)) 1801 for k, _ := range this.Entries { 1802 keysForEntries = append(keysForEntries, k) 1803 } 1804 github_com_gogo_protobuf_sortkeys.Strings(keysForEntries) 1805 mapStringForEntries := "map[string]*AllocationEntries{" 1806 for _, k := range keysForEntries { 1807 mapStringForEntries += fmt.Sprintf("%v: %v,", k, this.Entries[k]) 1808 } 1809 mapStringForEntries += "}" 1810 s := strings.Join([]string{`&GetCheckpointResponse{`, 1811 `Entries:` + mapStringForEntries + `,`, 1812 `}`, 1813 }, "") 1814 return s 1815 } 1816 func (this *AllocationEntries) String() string { 1817 if this == nil { 1818 return "nil" 1819 } 1820 keysForEntries := make([]string, 0, len(this.Entries)) 1821 for k, _ := range this.Entries { 1822 keysForEntries = append(keysForEntries, k) 1823 } 1824 github_com_gogo_protobuf_sortkeys.Strings(keysForEntries) 1825 mapStringForEntries := "map[string]*AllocationInfo{" 1826 for _, k := range keysForEntries { 1827 mapStringForEntries += fmt.Sprintf("%v: %v,", k, this.Entries[k]) 1828 } 1829 mapStringForEntries += "}" 1830 s := strings.Join([]string{`&AllocationEntries{`, 1831 `Entries:` + mapStringForEntries + `,`, 1832 `}`, 1833 }, "") 1834 return s 1835 } 1836 func (this *AllocationInfo) String() string { 1837 if this == nil { 1838 return "nil" 1839 } 1840 keysForTopologyAwareAssignments := make([]uint64, 0, len(this.TopologyAwareAssignments)) 1841 for k, _ := range this.TopologyAwareAssignments { 1842 keysForTopologyAwareAssignments = append(keysForTopologyAwareAssignments, k) 1843 } 1844 github_com_gogo_protobuf_sortkeys.Uint64s(keysForTopologyAwareAssignments) 1845 mapStringForTopologyAwareAssignments := "map[uint64]string{" 1846 for _, k := range keysForTopologyAwareAssignments { 1847 mapStringForTopologyAwareAssignments += fmt.Sprintf("%v: %v,", k, this.TopologyAwareAssignments[k]) 1848 } 1849 mapStringForTopologyAwareAssignments += "}" 1850 keysForOriginalTopologyAwareAssignments := make([]uint64, 0, len(this.OriginalTopologyAwareAssignments)) 1851 for k, _ := range this.OriginalTopologyAwareAssignments { 1852 keysForOriginalTopologyAwareAssignments = append(keysForOriginalTopologyAwareAssignments, k) 1853 } 1854 github_com_gogo_protobuf_sortkeys.Uint64s(keysForOriginalTopologyAwareAssignments) 1855 mapStringForOriginalTopologyAwareAssignments := "map[uint64]string{" 1856 for _, k := range keysForOriginalTopologyAwareAssignments { 1857 mapStringForOriginalTopologyAwareAssignments += fmt.Sprintf("%v: %v,", k, this.OriginalTopologyAwareAssignments[k]) 1858 } 1859 mapStringForOriginalTopologyAwareAssignments += "}" 1860 s := strings.Join([]string{`&AllocationInfo{`, 1861 `RampUp:` + fmt.Sprintf("%v", this.RampUp) + `,`, 1862 `OwnerPoolName:` + fmt.Sprintf("%v", this.OwnerPoolName) + `,`, 1863 `TopologyAwareAssignments:` + mapStringForTopologyAwareAssignments + `,`, 1864 `OriginalTopologyAwareAssignments:` + mapStringForOriginalTopologyAwareAssignments + `,`, 1865 `}`, 1866 }, "") 1867 return s 1868 } 1869 func valueToStringCpu(v interface{}) string { 1870 rv := reflect.ValueOf(v) 1871 if rv.IsNil() { 1872 return "nil" 1873 } 1874 pv := reflect.Indirect(rv).Interface() 1875 return fmt.Sprintf("*%v", pv) 1876 } 1877 func (m *ListAndWatchResponse) Unmarshal(dAtA []byte) error { 1878 l := len(dAtA) 1879 iNdEx := 0 1880 for iNdEx < l { 1881 preIndex := iNdEx 1882 var wire uint64 1883 for shift := uint(0); ; shift += 7 { 1884 if shift >= 64 { 1885 return ErrIntOverflowCpu 1886 } 1887 if iNdEx >= l { 1888 return io.ErrUnexpectedEOF 1889 } 1890 b := dAtA[iNdEx] 1891 iNdEx++ 1892 wire |= uint64(b&0x7F) << shift 1893 if b < 0x80 { 1894 break 1895 } 1896 } 1897 fieldNum := int32(wire >> 3) 1898 wireType := int(wire & 0x7) 1899 if wireType == 4 { 1900 return fmt.Errorf("proto: ListAndWatchResponse: wiretype end group for non-group") 1901 } 1902 if fieldNum <= 0 { 1903 return fmt.Errorf("proto: ListAndWatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) 1904 } 1905 switch fieldNum { 1906 case 1: 1907 if wireType != 2 { 1908 return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) 1909 } 1910 var msglen int 1911 for shift := uint(0); ; shift += 7 { 1912 if shift >= 64 { 1913 return ErrIntOverflowCpu 1914 } 1915 if iNdEx >= l { 1916 return io.ErrUnexpectedEOF 1917 } 1918 b := dAtA[iNdEx] 1919 iNdEx++ 1920 msglen |= int(b&0x7F) << shift 1921 if b < 0x80 { 1922 break 1923 } 1924 } 1925 if msglen < 0 { 1926 return ErrInvalidLengthCpu 1927 } 1928 postIndex := iNdEx + msglen 1929 if postIndex < 0 { 1930 return ErrInvalidLengthCpu 1931 } 1932 if postIndex > l { 1933 return io.ErrUnexpectedEOF 1934 } 1935 if m.Entries == nil { 1936 m.Entries = make(map[string]*CalculationEntries) 1937 } 1938 var mapkey string 1939 var mapvalue *CalculationEntries 1940 for iNdEx < postIndex { 1941 entryPreIndex := iNdEx 1942 var wire uint64 1943 for shift := uint(0); ; shift += 7 { 1944 if shift >= 64 { 1945 return ErrIntOverflowCpu 1946 } 1947 if iNdEx >= l { 1948 return io.ErrUnexpectedEOF 1949 } 1950 b := dAtA[iNdEx] 1951 iNdEx++ 1952 wire |= uint64(b&0x7F) << shift 1953 if b < 0x80 { 1954 break 1955 } 1956 } 1957 fieldNum := int32(wire >> 3) 1958 if fieldNum == 1 { 1959 var stringLenmapkey uint64 1960 for shift := uint(0); ; shift += 7 { 1961 if shift >= 64 { 1962 return ErrIntOverflowCpu 1963 } 1964 if iNdEx >= l { 1965 return io.ErrUnexpectedEOF 1966 } 1967 b := dAtA[iNdEx] 1968 iNdEx++ 1969 stringLenmapkey |= uint64(b&0x7F) << shift 1970 if b < 0x80 { 1971 break 1972 } 1973 } 1974 intStringLenmapkey := int(stringLenmapkey) 1975 if intStringLenmapkey < 0 { 1976 return ErrInvalidLengthCpu 1977 } 1978 postStringIndexmapkey := iNdEx + intStringLenmapkey 1979 if postStringIndexmapkey < 0 { 1980 return ErrInvalidLengthCpu 1981 } 1982 if postStringIndexmapkey > l { 1983 return io.ErrUnexpectedEOF 1984 } 1985 mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) 1986 iNdEx = postStringIndexmapkey 1987 } else if fieldNum == 2 { 1988 var mapmsglen int 1989 for shift := uint(0); ; shift += 7 { 1990 if shift >= 64 { 1991 return ErrIntOverflowCpu 1992 } 1993 if iNdEx >= l { 1994 return io.ErrUnexpectedEOF 1995 } 1996 b := dAtA[iNdEx] 1997 iNdEx++ 1998 mapmsglen |= int(b&0x7F) << shift 1999 if b < 0x80 { 2000 break 2001 } 2002 } 2003 if mapmsglen < 0 { 2004 return ErrInvalidLengthCpu 2005 } 2006 postmsgIndex := iNdEx + mapmsglen 2007 if postmsgIndex < 0 { 2008 return ErrInvalidLengthCpu 2009 } 2010 if postmsgIndex > l { 2011 return io.ErrUnexpectedEOF 2012 } 2013 mapvalue = &CalculationEntries{} 2014 if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { 2015 return err 2016 } 2017 iNdEx = postmsgIndex 2018 } else { 2019 iNdEx = entryPreIndex 2020 skippy, err := skipCpu(dAtA[iNdEx:]) 2021 if err != nil { 2022 return err 2023 } 2024 if skippy < 0 { 2025 return ErrInvalidLengthCpu 2026 } 2027 if (iNdEx + skippy) > postIndex { 2028 return io.ErrUnexpectedEOF 2029 } 2030 iNdEx += skippy 2031 } 2032 } 2033 m.Entries[mapkey] = mapvalue 2034 iNdEx = postIndex 2035 case 2: 2036 if wireType != 0 { 2037 return fmt.Errorf("proto: wrong wireType = %d for field AllowSharedCoresOverlapReclaimedCores", wireType) 2038 } 2039 var v int 2040 for shift := uint(0); ; shift += 7 { 2041 if shift >= 64 { 2042 return ErrIntOverflowCpu 2043 } 2044 if iNdEx >= l { 2045 return io.ErrUnexpectedEOF 2046 } 2047 b := dAtA[iNdEx] 2048 iNdEx++ 2049 v |= int(b&0x7F) << shift 2050 if b < 0x80 { 2051 break 2052 } 2053 } 2054 m.AllowSharedCoresOverlapReclaimedCores = bool(v != 0) 2055 default: 2056 iNdEx = preIndex 2057 skippy, err := skipCpu(dAtA[iNdEx:]) 2058 if err != nil { 2059 return err 2060 } 2061 if skippy < 0 { 2062 return ErrInvalidLengthCpu 2063 } 2064 if (iNdEx + skippy) < 0 { 2065 return ErrInvalidLengthCpu 2066 } 2067 if (iNdEx + skippy) > l { 2068 return io.ErrUnexpectedEOF 2069 } 2070 iNdEx += skippy 2071 } 2072 } 2073 2074 if iNdEx > l { 2075 return io.ErrUnexpectedEOF 2076 } 2077 return nil 2078 } 2079 func (m *CalculationEntries) Unmarshal(dAtA []byte) error { 2080 l := len(dAtA) 2081 iNdEx := 0 2082 for iNdEx < l { 2083 preIndex := iNdEx 2084 var wire uint64 2085 for shift := uint(0); ; shift += 7 { 2086 if shift >= 64 { 2087 return ErrIntOverflowCpu 2088 } 2089 if iNdEx >= l { 2090 return io.ErrUnexpectedEOF 2091 } 2092 b := dAtA[iNdEx] 2093 iNdEx++ 2094 wire |= uint64(b&0x7F) << shift 2095 if b < 0x80 { 2096 break 2097 } 2098 } 2099 fieldNum := int32(wire >> 3) 2100 wireType := int(wire & 0x7) 2101 if wireType == 4 { 2102 return fmt.Errorf("proto: CalculationEntries: wiretype end group for non-group") 2103 } 2104 if fieldNum <= 0 { 2105 return fmt.Errorf("proto: CalculationEntries: illegal tag %d (wire type %d)", fieldNum, wire) 2106 } 2107 switch fieldNum { 2108 case 1: 2109 if wireType != 2 { 2110 return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) 2111 } 2112 var msglen int 2113 for shift := uint(0); ; shift += 7 { 2114 if shift >= 64 { 2115 return ErrIntOverflowCpu 2116 } 2117 if iNdEx >= l { 2118 return io.ErrUnexpectedEOF 2119 } 2120 b := dAtA[iNdEx] 2121 iNdEx++ 2122 msglen |= int(b&0x7F) << shift 2123 if b < 0x80 { 2124 break 2125 } 2126 } 2127 if msglen < 0 { 2128 return ErrInvalidLengthCpu 2129 } 2130 postIndex := iNdEx + msglen 2131 if postIndex < 0 { 2132 return ErrInvalidLengthCpu 2133 } 2134 if postIndex > l { 2135 return io.ErrUnexpectedEOF 2136 } 2137 if m.Entries == nil { 2138 m.Entries = make(map[string]*CalculationInfo) 2139 } 2140 var mapkey string 2141 var mapvalue *CalculationInfo 2142 for iNdEx < postIndex { 2143 entryPreIndex := iNdEx 2144 var wire uint64 2145 for shift := uint(0); ; shift += 7 { 2146 if shift >= 64 { 2147 return ErrIntOverflowCpu 2148 } 2149 if iNdEx >= l { 2150 return io.ErrUnexpectedEOF 2151 } 2152 b := dAtA[iNdEx] 2153 iNdEx++ 2154 wire |= uint64(b&0x7F) << shift 2155 if b < 0x80 { 2156 break 2157 } 2158 } 2159 fieldNum := int32(wire >> 3) 2160 if fieldNum == 1 { 2161 var stringLenmapkey uint64 2162 for shift := uint(0); ; shift += 7 { 2163 if shift >= 64 { 2164 return ErrIntOverflowCpu 2165 } 2166 if iNdEx >= l { 2167 return io.ErrUnexpectedEOF 2168 } 2169 b := dAtA[iNdEx] 2170 iNdEx++ 2171 stringLenmapkey |= uint64(b&0x7F) << shift 2172 if b < 0x80 { 2173 break 2174 } 2175 } 2176 intStringLenmapkey := int(stringLenmapkey) 2177 if intStringLenmapkey < 0 { 2178 return ErrInvalidLengthCpu 2179 } 2180 postStringIndexmapkey := iNdEx + intStringLenmapkey 2181 if postStringIndexmapkey < 0 { 2182 return ErrInvalidLengthCpu 2183 } 2184 if postStringIndexmapkey > l { 2185 return io.ErrUnexpectedEOF 2186 } 2187 mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) 2188 iNdEx = postStringIndexmapkey 2189 } else if fieldNum == 2 { 2190 var mapmsglen int 2191 for shift := uint(0); ; shift += 7 { 2192 if shift >= 64 { 2193 return ErrIntOverflowCpu 2194 } 2195 if iNdEx >= l { 2196 return io.ErrUnexpectedEOF 2197 } 2198 b := dAtA[iNdEx] 2199 iNdEx++ 2200 mapmsglen |= int(b&0x7F) << shift 2201 if b < 0x80 { 2202 break 2203 } 2204 } 2205 if mapmsglen < 0 { 2206 return ErrInvalidLengthCpu 2207 } 2208 postmsgIndex := iNdEx + mapmsglen 2209 if postmsgIndex < 0 { 2210 return ErrInvalidLengthCpu 2211 } 2212 if postmsgIndex > l { 2213 return io.ErrUnexpectedEOF 2214 } 2215 mapvalue = &CalculationInfo{} 2216 if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { 2217 return err 2218 } 2219 iNdEx = postmsgIndex 2220 } else { 2221 iNdEx = entryPreIndex 2222 skippy, err := skipCpu(dAtA[iNdEx:]) 2223 if err != nil { 2224 return err 2225 } 2226 if skippy < 0 { 2227 return ErrInvalidLengthCpu 2228 } 2229 if (iNdEx + skippy) > postIndex { 2230 return io.ErrUnexpectedEOF 2231 } 2232 iNdEx += skippy 2233 } 2234 } 2235 m.Entries[mapkey] = mapvalue 2236 iNdEx = postIndex 2237 default: 2238 iNdEx = preIndex 2239 skippy, err := skipCpu(dAtA[iNdEx:]) 2240 if err != nil { 2241 return err 2242 } 2243 if skippy < 0 { 2244 return ErrInvalidLengthCpu 2245 } 2246 if (iNdEx + skippy) < 0 { 2247 return ErrInvalidLengthCpu 2248 } 2249 if (iNdEx + skippy) > l { 2250 return io.ErrUnexpectedEOF 2251 } 2252 iNdEx += skippy 2253 } 2254 } 2255 2256 if iNdEx > l { 2257 return io.ErrUnexpectedEOF 2258 } 2259 return nil 2260 } 2261 func (m *CalculationInfo) Unmarshal(dAtA []byte) error { 2262 l := len(dAtA) 2263 iNdEx := 0 2264 for iNdEx < l { 2265 preIndex := iNdEx 2266 var wire uint64 2267 for shift := uint(0); ; shift += 7 { 2268 if shift >= 64 { 2269 return ErrIntOverflowCpu 2270 } 2271 if iNdEx >= l { 2272 return io.ErrUnexpectedEOF 2273 } 2274 b := dAtA[iNdEx] 2275 iNdEx++ 2276 wire |= uint64(b&0x7F) << shift 2277 if b < 0x80 { 2278 break 2279 } 2280 } 2281 fieldNum := int32(wire >> 3) 2282 wireType := int(wire & 0x7) 2283 if wireType == 4 { 2284 return fmt.Errorf("proto: CalculationInfo: wiretype end group for non-group") 2285 } 2286 if fieldNum <= 0 { 2287 return fmt.Errorf("proto: CalculationInfo: illegal tag %d (wire type %d)", fieldNum, wire) 2288 } 2289 switch fieldNum { 2290 case 1: 2291 if wireType != 2 { 2292 return fmt.Errorf("proto: wrong wireType = %d for field OwnerPoolName", wireType) 2293 } 2294 var stringLen uint64 2295 for shift := uint(0); ; shift += 7 { 2296 if shift >= 64 { 2297 return ErrIntOverflowCpu 2298 } 2299 if iNdEx >= l { 2300 return io.ErrUnexpectedEOF 2301 } 2302 b := dAtA[iNdEx] 2303 iNdEx++ 2304 stringLen |= uint64(b&0x7F) << shift 2305 if b < 0x80 { 2306 break 2307 } 2308 } 2309 intStringLen := int(stringLen) 2310 if intStringLen < 0 { 2311 return ErrInvalidLengthCpu 2312 } 2313 postIndex := iNdEx + intStringLen 2314 if postIndex < 0 { 2315 return ErrInvalidLengthCpu 2316 } 2317 if postIndex > l { 2318 return io.ErrUnexpectedEOF 2319 } 2320 m.OwnerPoolName = string(dAtA[iNdEx:postIndex]) 2321 iNdEx = postIndex 2322 case 2: 2323 if wireType != 2 { 2324 return fmt.Errorf("proto: wrong wireType = %d for field CalculationResultsByNumas", wireType) 2325 } 2326 var msglen int 2327 for shift := uint(0); ; shift += 7 { 2328 if shift >= 64 { 2329 return ErrIntOverflowCpu 2330 } 2331 if iNdEx >= l { 2332 return io.ErrUnexpectedEOF 2333 } 2334 b := dAtA[iNdEx] 2335 iNdEx++ 2336 msglen |= int(b&0x7F) << shift 2337 if b < 0x80 { 2338 break 2339 } 2340 } 2341 if msglen < 0 { 2342 return ErrInvalidLengthCpu 2343 } 2344 postIndex := iNdEx + msglen 2345 if postIndex < 0 { 2346 return ErrInvalidLengthCpu 2347 } 2348 if postIndex > l { 2349 return io.ErrUnexpectedEOF 2350 } 2351 if m.CalculationResultsByNumas == nil { 2352 m.CalculationResultsByNumas = make(map[int64]*NumaCalculationResult) 2353 } 2354 var mapkey int64 2355 var mapvalue *NumaCalculationResult 2356 for iNdEx < postIndex { 2357 entryPreIndex := iNdEx 2358 var wire uint64 2359 for shift := uint(0); ; shift += 7 { 2360 if shift >= 64 { 2361 return ErrIntOverflowCpu 2362 } 2363 if iNdEx >= l { 2364 return io.ErrUnexpectedEOF 2365 } 2366 b := dAtA[iNdEx] 2367 iNdEx++ 2368 wire |= uint64(b&0x7F) << shift 2369 if b < 0x80 { 2370 break 2371 } 2372 } 2373 fieldNum := int32(wire >> 3) 2374 if fieldNum == 1 { 2375 for shift := uint(0); ; shift += 7 { 2376 if shift >= 64 { 2377 return ErrIntOverflowCpu 2378 } 2379 if iNdEx >= l { 2380 return io.ErrUnexpectedEOF 2381 } 2382 b := dAtA[iNdEx] 2383 iNdEx++ 2384 mapkey |= int64(b&0x7F) << shift 2385 if b < 0x80 { 2386 break 2387 } 2388 } 2389 } else if fieldNum == 2 { 2390 var mapmsglen int 2391 for shift := uint(0); ; shift += 7 { 2392 if shift >= 64 { 2393 return ErrIntOverflowCpu 2394 } 2395 if iNdEx >= l { 2396 return io.ErrUnexpectedEOF 2397 } 2398 b := dAtA[iNdEx] 2399 iNdEx++ 2400 mapmsglen |= int(b&0x7F) << shift 2401 if b < 0x80 { 2402 break 2403 } 2404 } 2405 if mapmsglen < 0 { 2406 return ErrInvalidLengthCpu 2407 } 2408 postmsgIndex := iNdEx + mapmsglen 2409 if postmsgIndex < 0 { 2410 return ErrInvalidLengthCpu 2411 } 2412 if postmsgIndex > l { 2413 return io.ErrUnexpectedEOF 2414 } 2415 mapvalue = &NumaCalculationResult{} 2416 if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { 2417 return err 2418 } 2419 iNdEx = postmsgIndex 2420 } else { 2421 iNdEx = entryPreIndex 2422 skippy, err := skipCpu(dAtA[iNdEx:]) 2423 if err != nil { 2424 return err 2425 } 2426 if skippy < 0 { 2427 return ErrInvalidLengthCpu 2428 } 2429 if (iNdEx + skippy) > postIndex { 2430 return io.ErrUnexpectedEOF 2431 } 2432 iNdEx += skippy 2433 } 2434 } 2435 m.CalculationResultsByNumas[mapkey] = mapvalue 2436 iNdEx = postIndex 2437 default: 2438 iNdEx = preIndex 2439 skippy, err := skipCpu(dAtA[iNdEx:]) 2440 if err != nil { 2441 return err 2442 } 2443 if skippy < 0 { 2444 return ErrInvalidLengthCpu 2445 } 2446 if (iNdEx + skippy) < 0 { 2447 return ErrInvalidLengthCpu 2448 } 2449 if (iNdEx + skippy) > l { 2450 return io.ErrUnexpectedEOF 2451 } 2452 iNdEx += skippy 2453 } 2454 } 2455 2456 if iNdEx > l { 2457 return io.ErrUnexpectedEOF 2458 } 2459 return nil 2460 } 2461 func (m *NumaCalculationResult) Unmarshal(dAtA []byte) error { 2462 l := len(dAtA) 2463 iNdEx := 0 2464 for iNdEx < l { 2465 preIndex := iNdEx 2466 var wire uint64 2467 for shift := uint(0); ; shift += 7 { 2468 if shift >= 64 { 2469 return ErrIntOverflowCpu 2470 } 2471 if iNdEx >= l { 2472 return io.ErrUnexpectedEOF 2473 } 2474 b := dAtA[iNdEx] 2475 iNdEx++ 2476 wire |= uint64(b&0x7F) << shift 2477 if b < 0x80 { 2478 break 2479 } 2480 } 2481 fieldNum := int32(wire >> 3) 2482 wireType := int(wire & 0x7) 2483 if wireType == 4 { 2484 return fmt.Errorf("proto: NumaCalculationResult: wiretype end group for non-group") 2485 } 2486 if fieldNum <= 0 { 2487 return fmt.Errorf("proto: NumaCalculationResult: illegal tag %d (wire type %d)", fieldNum, wire) 2488 } 2489 switch fieldNum { 2490 case 2: 2491 if wireType != 2 { 2492 return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) 2493 } 2494 var msglen int 2495 for shift := uint(0); ; shift += 7 { 2496 if shift >= 64 { 2497 return ErrIntOverflowCpu 2498 } 2499 if iNdEx >= l { 2500 return io.ErrUnexpectedEOF 2501 } 2502 b := dAtA[iNdEx] 2503 iNdEx++ 2504 msglen |= int(b&0x7F) << shift 2505 if b < 0x80 { 2506 break 2507 } 2508 } 2509 if msglen < 0 { 2510 return ErrInvalidLengthCpu 2511 } 2512 postIndex := iNdEx + msglen 2513 if postIndex < 0 { 2514 return ErrInvalidLengthCpu 2515 } 2516 if postIndex > l { 2517 return io.ErrUnexpectedEOF 2518 } 2519 m.Blocks = append(m.Blocks, &Block{}) 2520 if err := m.Blocks[len(m.Blocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { 2521 return err 2522 } 2523 iNdEx = postIndex 2524 default: 2525 iNdEx = preIndex 2526 skippy, err := skipCpu(dAtA[iNdEx:]) 2527 if err != nil { 2528 return err 2529 } 2530 if skippy < 0 { 2531 return ErrInvalidLengthCpu 2532 } 2533 if (iNdEx + skippy) < 0 { 2534 return ErrInvalidLengthCpu 2535 } 2536 if (iNdEx + skippy) > l { 2537 return io.ErrUnexpectedEOF 2538 } 2539 iNdEx += skippy 2540 } 2541 } 2542 2543 if iNdEx > l { 2544 return io.ErrUnexpectedEOF 2545 } 2546 return nil 2547 } 2548 func (m *Block) Unmarshal(dAtA []byte) error { 2549 l := len(dAtA) 2550 iNdEx := 0 2551 for iNdEx < l { 2552 preIndex := iNdEx 2553 var wire uint64 2554 for shift := uint(0); ; shift += 7 { 2555 if shift >= 64 { 2556 return ErrIntOverflowCpu 2557 } 2558 if iNdEx >= l { 2559 return io.ErrUnexpectedEOF 2560 } 2561 b := dAtA[iNdEx] 2562 iNdEx++ 2563 wire |= uint64(b&0x7F) << shift 2564 if b < 0x80 { 2565 break 2566 } 2567 } 2568 fieldNum := int32(wire >> 3) 2569 wireType := int(wire & 0x7) 2570 if wireType == 4 { 2571 return fmt.Errorf("proto: Block: wiretype end group for non-group") 2572 } 2573 if fieldNum <= 0 { 2574 return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) 2575 } 2576 switch fieldNum { 2577 case 1: 2578 if wireType != 0 { 2579 return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) 2580 } 2581 m.Result = 0 2582 for shift := uint(0); ; shift += 7 { 2583 if shift >= 64 { 2584 return ErrIntOverflowCpu 2585 } 2586 if iNdEx >= l { 2587 return io.ErrUnexpectedEOF 2588 } 2589 b := dAtA[iNdEx] 2590 iNdEx++ 2591 m.Result |= uint64(b&0x7F) << shift 2592 if b < 0x80 { 2593 break 2594 } 2595 } 2596 case 2: 2597 if wireType != 2 { 2598 return fmt.Errorf("proto: wrong wireType = %d for field OverlapTargets", wireType) 2599 } 2600 var msglen int 2601 for shift := uint(0); ; shift += 7 { 2602 if shift >= 64 { 2603 return ErrIntOverflowCpu 2604 } 2605 if iNdEx >= l { 2606 return io.ErrUnexpectedEOF 2607 } 2608 b := dAtA[iNdEx] 2609 iNdEx++ 2610 msglen |= int(b&0x7F) << shift 2611 if b < 0x80 { 2612 break 2613 } 2614 } 2615 if msglen < 0 { 2616 return ErrInvalidLengthCpu 2617 } 2618 postIndex := iNdEx + msglen 2619 if postIndex < 0 { 2620 return ErrInvalidLengthCpu 2621 } 2622 if postIndex > l { 2623 return io.ErrUnexpectedEOF 2624 } 2625 m.OverlapTargets = append(m.OverlapTargets, &OverlapTarget{}) 2626 if err := m.OverlapTargets[len(m.OverlapTargets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { 2627 return err 2628 } 2629 iNdEx = postIndex 2630 case 3: 2631 if wireType != 2 { 2632 return fmt.Errorf("proto: wrong wireType = %d for field BlockId", wireType) 2633 } 2634 var stringLen uint64 2635 for shift := uint(0); ; shift += 7 { 2636 if shift >= 64 { 2637 return ErrIntOverflowCpu 2638 } 2639 if iNdEx >= l { 2640 return io.ErrUnexpectedEOF 2641 } 2642 b := dAtA[iNdEx] 2643 iNdEx++ 2644 stringLen |= uint64(b&0x7F) << shift 2645 if b < 0x80 { 2646 break 2647 } 2648 } 2649 intStringLen := int(stringLen) 2650 if intStringLen < 0 { 2651 return ErrInvalidLengthCpu 2652 } 2653 postIndex := iNdEx + intStringLen 2654 if postIndex < 0 { 2655 return ErrInvalidLengthCpu 2656 } 2657 if postIndex > l { 2658 return io.ErrUnexpectedEOF 2659 } 2660 m.BlockId = string(dAtA[iNdEx:postIndex]) 2661 iNdEx = postIndex 2662 default: 2663 iNdEx = preIndex 2664 skippy, err := skipCpu(dAtA[iNdEx:]) 2665 if err != nil { 2666 return err 2667 } 2668 if skippy < 0 { 2669 return ErrInvalidLengthCpu 2670 } 2671 if (iNdEx + skippy) < 0 { 2672 return ErrInvalidLengthCpu 2673 } 2674 if (iNdEx + skippy) > l { 2675 return io.ErrUnexpectedEOF 2676 } 2677 iNdEx += skippy 2678 } 2679 } 2680 2681 if iNdEx > l { 2682 return io.ErrUnexpectedEOF 2683 } 2684 return nil 2685 } 2686 func (m *OverlapTarget) Unmarshal(dAtA []byte) error { 2687 l := len(dAtA) 2688 iNdEx := 0 2689 for iNdEx < l { 2690 preIndex := iNdEx 2691 var wire uint64 2692 for shift := uint(0); ; shift += 7 { 2693 if shift >= 64 { 2694 return ErrIntOverflowCpu 2695 } 2696 if iNdEx >= l { 2697 return io.ErrUnexpectedEOF 2698 } 2699 b := dAtA[iNdEx] 2700 iNdEx++ 2701 wire |= uint64(b&0x7F) << shift 2702 if b < 0x80 { 2703 break 2704 } 2705 } 2706 fieldNum := int32(wire >> 3) 2707 wireType := int(wire & 0x7) 2708 if wireType == 4 { 2709 return fmt.Errorf("proto: OverlapTarget: wiretype end group for non-group") 2710 } 2711 if fieldNum <= 0 { 2712 return fmt.Errorf("proto: OverlapTarget: illegal tag %d (wire type %d)", fieldNum, wire) 2713 } 2714 switch fieldNum { 2715 case 1: 2716 if wireType != 2 { 2717 return fmt.Errorf("proto: wrong wireType = %d for field OverlapTargetPoolName", wireType) 2718 } 2719 var stringLen uint64 2720 for shift := uint(0); ; shift += 7 { 2721 if shift >= 64 { 2722 return ErrIntOverflowCpu 2723 } 2724 if iNdEx >= l { 2725 return io.ErrUnexpectedEOF 2726 } 2727 b := dAtA[iNdEx] 2728 iNdEx++ 2729 stringLen |= uint64(b&0x7F) << shift 2730 if b < 0x80 { 2731 break 2732 } 2733 } 2734 intStringLen := int(stringLen) 2735 if intStringLen < 0 { 2736 return ErrInvalidLengthCpu 2737 } 2738 postIndex := iNdEx + intStringLen 2739 if postIndex < 0 { 2740 return ErrInvalidLengthCpu 2741 } 2742 if postIndex > l { 2743 return io.ErrUnexpectedEOF 2744 } 2745 m.OverlapTargetPoolName = string(dAtA[iNdEx:postIndex]) 2746 iNdEx = postIndex 2747 case 2: 2748 if wireType != 2 { 2749 return fmt.Errorf("proto: wrong wireType = %d for field OverlapTargetPodUid", wireType) 2750 } 2751 var stringLen uint64 2752 for shift := uint(0); ; shift += 7 { 2753 if shift >= 64 { 2754 return ErrIntOverflowCpu 2755 } 2756 if iNdEx >= l { 2757 return io.ErrUnexpectedEOF 2758 } 2759 b := dAtA[iNdEx] 2760 iNdEx++ 2761 stringLen |= uint64(b&0x7F) << shift 2762 if b < 0x80 { 2763 break 2764 } 2765 } 2766 intStringLen := int(stringLen) 2767 if intStringLen < 0 { 2768 return ErrInvalidLengthCpu 2769 } 2770 postIndex := iNdEx + intStringLen 2771 if postIndex < 0 { 2772 return ErrInvalidLengthCpu 2773 } 2774 if postIndex > l { 2775 return io.ErrUnexpectedEOF 2776 } 2777 m.OverlapTargetPodUid = string(dAtA[iNdEx:postIndex]) 2778 iNdEx = postIndex 2779 case 3: 2780 if wireType != 2 { 2781 return fmt.Errorf("proto: wrong wireType = %d for field OverlapTargetContainerName", wireType) 2782 } 2783 var stringLen uint64 2784 for shift := uint(0); ; shift += 7 { 2785 if shift >= 64 { 2786 return ErrIntOverflowCpu 2787 } 2788 if iNdEx >= l { 2789 return io.ErrUnexpectedEOF 2790 } 2791 b := dAtA[iNdEx] 2792 iNdEx++ 2793 stringLen |= uint64(b&0x7F) << shift 2794 if b < 0x80 { 2795 break 2796 } 2797 } 2798 intStringLen := int(stringLen) 2799 if intStringLen < 0 { 2800 return ErrInvalidLengthCpu 2801 } 2802 postIndex := iNdEx + intStringLen 2803 if postIndex < 0 { 2804 return ErrInvalidLengthCpu 2805 } 2806 if postIndex > l { 2807 return io.ErrUnexpectedEOF 2808 } 2809 m.OverlapTargetContainerName = string(dAtA[iNdEx:postIndex]) 2810 iNdEx = postIndex 2811 case 4: 2812 if wireType != 0 { 2813 return fmt.Errorf("proto: wrong wireType = %d for field OverlapType", wireType) 2814 } 2815 m.OverlapType = 0 2816 for shift := uint(0); ; shift += 7 { 2817 if shift >= 64 { 2818 return ErrIntOverflowCpu 2819 } 2820 if iNdEx >= l { 2821 return io.ErrUnexpectedEOF 2822 } 2823 b := dAtA[iNdEx] 2824 iNdEx++ 2825 m.OverlapType |= OverlapType(b&0x7F) << shift 2826 if b < 0x80 { 2827 break 2828 } 2829 } 2830 default: 2831 iNdEx = preIndex 2832 skippy, err := skipCpu(dAtA[iNdEx:]) 2833 if err != nil { 2834 return err 2835 } 2836 if skippy < 0 { 2837 return ErrInvalidLengthCpu 2838 } 2839 if (iNdEx + skippy) < 0 { 2840 return ErrInvalidLengthCpu 2841 } 2842 if (iNdEx + skippy) > l { 2843 return io.ErrUnexpectedEOF 2844 } 2845 iNdEx += skippy 2846 } 2847 } 2848 2849 if iNdEx > l { 2850 return io.ErrUnexpectedEOF 2851 } 2852 return nil 2853 } 2854 func (m *GetCheckpointRequest) Unmarshal(dAtA []byte) error { 2855 l := len(dAtA) 2856 iNdEx := 0 2857 for iNdEx < l { 2858 preIndex := iNdEx 2859 var wire uint64 2860 for shift := uint(0); ; shift += 7 { 2861 if shift >= 64 { 2862 return ErrIntOverflowCpu 2863 } 2864 if iNdEx >= l { 2865 return io.ErrUnexpectedEOF 2866 } 2867 b := dAtA[iNdEx] 2868 iNdEx++ 2869 wire |= uint64(b&0x7F) << shift 2870 if b < 0x80 { 2871 break 2872 } 2873 } 2874 fieldNum := int32(wire >> 3) 2875 wireType := int(wire & 0x7) 2876 if wireType == 4 { 2877 return fmt.Errorf("proto: GetCheckpointRequest: wiretype end group for non-group") 2878 } 2879 if fieldNum <= 0 { 2880 return fmt.Errorf("proto: GetCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) 2881 } 2882 switch fieldNum { 2883 default: 2884 iNdEx = preIndex 2885 skippy, err := skipCpu(dAtA[iNdEx:]) 2886 if err != nil { 2887 return err 2888 } 2889 if skippy < 0 { 2890 return ErrInvalidLengthCpu 2891 } 2892 if (iNdEx + skippy) < 0 { 2893 return ErrInvalidLengthCpu 2894 } 2895 if (iNdEx + skippy) > l { 2896 return io.ErrUnexpectedEOF 2897 } 2898 iNdEx += skippy 2899 } 2900 } 2901 2902 if iNdEx > l { 2903 return io.ErrUnexpectedEOF 2904 } 2905 return nil 2906 } 2907 func (m *GetCheckpointResponse) Unmarshal(dAtA []byte) error { 2908 l := len(dAtA) 2909 iNdEx := 0 2910 for iNdEx < l { 2911 preIndex := iNdEx 2912 var wire uint64 2913 for shift := uint(0); ; shift += 7 { 2914 if shift >= 64 { 2915 return ErrIntOverflowCpu 2916 } 2917 if iNdEx >= l { 2918 return io.ErrUnexpectedEOF 2919 } 2920 b := dAtA[iNdEx] 2921 iNdEx++ 2922 wire |= uint64(b&0x7F) << shift 2923 if b < 0x80 { 2924 break 2925 } 2926 } 2927 fieldNum := int32(wire >> 3) 2928 wireType := int(wire & 0x7) 2929 if wireType == 4 { 2930 return fmt.Errorf("proto: GetCheckpointResponse: wiretype end group for non-group") 2931 } 2932 if fieldNum <= 0 { 2933 return fmt.Errorf("proto: GetCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) 2934 } 2935 switch fieldNum { 2936 case 1: 2937 if wireType != 2 { 2938 return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) 2939 } 2940 var msglen int 2941 for shift := uint(0); ; shift += 7 { 2942 if shift >= 64 { 2943 return ErrIntOverflowCpu 2944 } 2945 if iNdEx >= l { 2946 return io.ErrUnexpectedEOF 2947 } 2948 b := dAtA[iNdEx] 2949 iNdEx++ 2950 msglen |= int(b&0x7F) << shift 2951 if b < 0x80 { 2952 break 2953 } 2954 } 2955 if msglen < 0 { 2956 return ErrInvalidLengthCpu 2957 } 2958 postIndex := iNdEx + msglen 2959 if postIndex < 0 { 2960 return ErrInvalidLengthCpu 2961 } 2962 if postIndex > l { 2963 return io.ErrUnexpectedEOF 2964 } 2965 if m.Entries == nil { 2966 m.Entries = make(map[string]*AllocationEntries) 2967 } 2968 var mapkey string 2969 var mapvalue *AllocationEntries 2970 for iNdEx < postIndex { 2971 entryPreIndex := iNdEx 2972 var wire uint64 2973 for shift := uint(0); ; shift += 7 { 2974 if shift >= 64 { 2975 return ErrIntOverflowCpu 2976 } 2977 if iNdEx >= l { 2978 return io.ErrUnexpectedEOF 2979 } 2980 b := dAtA[iNdEx] 2981 iNdEx++ 2982 wire |= uint64(b&0x7F) << shift 2983 if b < 0x80 { 2984 break 2985 } 2986 } 2987 fieldNum := int32(wire >> 3) 2988 if fieldNum == 1 { 2989 var stringLenmapkey uint64 2990 for shift := uint(0); ; shift += 7 { 2991 if shift >= 64 { 2992 return ErrIntOverflowCpu 2993 } 2994 if iNdEx >= l { 2995 return io.ErrUnexpectedEOF 2996 } 2997 b := dAtA[iNdEx] 2998 iNdEx++ 2999 stringLenmapkey |= uint64(b&0x7F) << shift 3000 if b < 0x80 { 3001 break 3002 } 3003 } 3004 intStringLenmapkey := int(stringLenmapkey) 3005 if intStringLenmapkey < 0 { 3006 return ErrInvalidLengthCpu 3007 } 3008 postStringIndexmapkey := iNdEx + intStringLenmapkey 3009 if postStringIndexmapkey < 0 { 3010 return ErrInvalidLengthCpu 3011 } 3012 if postStringIndexmapkey > l { 3013 return io.ErrUnexpectedEOF 3014 } 3015 mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) 3016 iNdEx = postStringIndexmapkey 3017 } else if fieldNum == 2 { 3018 var mapmsglen int 3019 for shift := uint(0); ; shift += 7 { 3020 if shift >= 64 { 3021 return ErrIntOverflowCpu 3022 } 3023 if iNdEx >= l { 3024 return io.ErrUnexpectedEOF 3025 } 3026 b := dAtA[iNdEx] 3027 iNdEx++ 3028 mapmsglen |= int(b&0x7F) << shift 3029 if b < 0x80 { 3030 break 3031 } 3032 } 3033 if mapmsglen < 0 { 3034 return ErrInvalidLengthCpu 3035 } 3036 postmsgIndex := iNdEx + mapmsglen 3037 if postmsgIndex < 0 { 3038 return ErrInvalidLengthCpu 3039 } 3040 if postmsgIndex > l { 3041 return io.ErrUnexpectedEOF 3042 } 3043 mapvalue = &AllocationEntries{} 3044 if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { 3045 return err 3046 } 3047 iNdEx = postmsgIndex 3048 } else { 3049 iNdEx = entryPreIndex 3050 skippy, err := skipCpu(dAtA[iNdEx:]) 3051 if err != nil { 3052 return err 3053 } 3054 if skippy < 0 { 3055 return ErrInvalidLengthCpu 3056 } 3057 if (iNdEx + skippy) > postIndex { 3058 return io.ErrUnexpectedEOF 3059 } 3060 iNdEx += skippy 3061 } 3062 } 3063 m.Entries[mapkey] = mapvalue 3064 iNdEx = postIndex 3065 default: 3066 iNdEx = preIndex 3067 skippy, err := skipCpu(dAtA[iNdEx:]) 3068 if err != nil { 3069 return err 3070 } 3071 if skippy < 0 { 3072 return ErrInvalidLengthCpu 3073 } 3074 if (iNdEx + skippy) < 0 { 3075 return ErrInvalidLengthCpu 3076 } 3077 if (iNdEx + skippy) > l { 3078 return io.ErrUnexpectedEOF 3079 } 3080 iNdEx += skippy 3081 } 3082 } 3083 3084 if iNdEx > l { 3085 return io.ErrUnexpectedEOF 3086 } 3087 return nil 3088 } 3089 func (m *AllocationEntries) Unmarshal(dAtA []byte) error { 3090 l := len(dAtA) 3091 iNdEx := 0 3092 for iNdEx < l { 3093 preIndex := iNdEx 3094 var wire uint64 3095 for shift := uint(0); ; shift += 7 { 3096 if shift >= 64 { 3097 return ErrIntOverflowCpu 3098 } 3099 if iNdEx >= l { 3100 return io.ErrUnexpectedEOF 3101 } 3102 b := dAtA[iNdEx] 3103 iNdEx++ 3104 wire |= uint64(b&0x7F) << shift 3105 if b < 0x80 { 3106 break 3107 } 3108 } 3109 fieldNum := int32(wire >> 3) 3110 wireType := int(wire & 0x7) 3111 if wireType == 4 { 3112 return fmt.Errorf("proto: AllocationEntries: wiretype end group for non-group") 3113 } 3114 if fieldNum <= 0 { 3115 return fmt.Errorf("proto: AllocationEntries: illegal tag %d (wire type %d)", fieldNum, wire) 3116 } 3117 switch fieldNum { 3118 case 1: 3119 if wireType != 2 { 3120 return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) 3121 } 3122 var msglen int 3123 for shift := uint(0); ; shift += 7 { 3124 if shift >= 64 { 3125 return ErrIntOverflowCpu 3126 } 3127 if iNdEx >= l { 3128 return io.ErrUnexpectedEOF 3129 } 3130 b := dAtA[iNdEx] 3131 iNdEx++ 3132 msglen |= int(b&0x7F) << shift 3133 if b < 0x80 { 3134 break 3135 } 3136 } 3137 if msglen < 0 { 3138 return ErrInvalidLengthCpu 3139 } 3140 postIndex := iNdEx + msglen 3141 if postIndex < 0 { 3142 return ErrInvalidLengthCpu 3143 } 3144 if postIndex > l { 3145 return io.ErrUnexpectedEOF 3146 } 3147 if m.Entries == nil { 3148 m.Entries = make(map[string]*AllocationInfo) 3149 } 3150 var mapkey string 3151 var mapvalue *AllocationInfo 3152 for iNdEx < postIndex { 3153 entryPreIndex := iNdEx 3154 var wire uint64 3155 for shift := uint(0); ; shift += 7 { 3156 if shift >= 64 { 3157 return ErrIntOverflowCpu 3158 } 3159 if iNdEx >= l { 3160 return io.ErrUnexpectedEOF 3161 } 3162 b := dAtA[iNdEx] 3163 iNdEx++ 3164 wire |= uint64(b&0x7F) << shift 3165 if b < 0x80 { 3166 break 3167 } 3168 } 3169 fieldNum := int32(wire >> 3) 3170 if fieldNum == 1 { 3171 var stringLenmapkey uint64 3172 for shift := uint(0); ; shift += 7 { 3173 if shift >= 64 { 3174 return ErrIntOverflowCpu 3175 } 3176 if iNdEx >= l { 3177 return io.ErrUnexpectedEOF 3178 } 3179 b := dAtA[iNdEx] 3180 iNdEx++ 3181 stringLenmapkey |= uint64(b&0x7F) << shift 3182 if b < 0x80 { 3183 break 3184 } 3185 } 3186 intStringLenmapkey := int(stringLenmapkey) 3187 if intStringLenmapkey < 0 { 3188 return ErrInvalidLengthCpu 3189 } 3190 postStringIndexmapkey := iNdEx + intStringLenmapkey 3191 if postStringIndexmapkey < 0 { 3192 return ErrInvalidLengthCpu 3193 } 3194 if postStringIndexmapkey > l { 3195 return io.ErrUnexpectedEOF 3196 } 3197 mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) 3198 iNdEx = postStringIndexmapkey 3199 } else if fieldNum == 2 { 3200 var mapmsglen int 3201 for shift := uint(0); ; shift += 7 { 3202 if shift >= 64 { 3203 return ErrIntOverflowCpu 3204 } 3205 if iNdEx >= l { 3206 return io.ErrUnexpectedEOF 3207 } 3208 b := dAtA[iNdEx] 3209 iNdEx++ 3210 mapmsglen |= int(b&0x7F) << shift 3211 if b < 0x80 { 3212 break 3213 } 3214 } 3215 if mapmsglen < 0 { 3216 return ErrInvalidLengthCpu 3217 } 3218 postmsgIndex := iNdEx + mapmsglen 3219 if postmsgIndex < 0 { 3220 return ErrInvalidLengthCpu 3221 } 3222 if postmsgIndex > l { 3223 return io.ErrUnexpectedEOF 3224 } 3225 mapvalue = &AllocationInfo{} 3226 if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { 3227 return err 3228 } 3229 iNdEx = postmsgIndex 3230 } else { 3231 iNdEx = entryPreIndex 3232 skippy, err := skipCpu(dAtA[iNdEx:]) 3233 if err != nil { 3234 return err 3235 } 3236 if skippy < 0 { 3237 return ErrInvalidLengthCpu 3238 } 3239 if (iNdEx + skippy) > postIndex { 3240 return io.ErrUnexpectedEOF 3241 } 3242 iNdEx += skippy 3243 } 3244 } 3245 m.Entries[mapkey] = mapvalue 3246 iNdEx = postIndex 3247 default: 3248 iNdEx = preIndex 3249 skippy, err := skipCpu(dAtA[iNdEx:]) 3250 if err != nil { 3251 return err 3252 } 3253 if skippy < 0 { 3254 return ErrInvalidLengthCpu 3255 } 3256 if (iNdEx + skippy) < 0 { 3257 return ErrInvalidLengthCpu 3258 } 3259 if (iNdEx + skippy) > l { 3260 return io.ErrUnexpectedEOF 3261 } 3262 iNdEx += skippy 3263 } 3264 } 3265 3266 if iNdEx > l { 3267 return io.ErrUnexpectedEOF 3268 } 3269 return nil 3270 } 3271 func (m *AllocationInfo) Unmarshal(dAtA []byte) error { 3272 l := len(dAtA) 3273 iNdEx := 0 3274 for iNdEx < l { 3275 preIndex := iNdEx 3276 var wire uint64 3277 for shift := uint(0); ; shift += 7 { 3278 if shift >= 64 { 3279 return ErrIntOverflowCpu 3280 } 3281 if iNdEx >= l { 3282 return io.ErrUnexpectedEOF 3283 } 3284 b := dAtA[iNdEx] 3285 iNdEx++ 3286 wire |= uint64(b&0x7F) << shift 3287 if b < 0x80 { 3288 break 3289 } 3290 } 3291 fieldNum := int32(wire >> 3) 3292 wireType := int(wire & 0x7) 3293 if wireType == 4 { 3294 return fmt.Errorf("proto: AllocationInfo: wiretype end group for non-group") 3295 } 3296 if fieldNum <= 0 { 3297 return fmt.Errorf("proto: AllocationInfo: illegal tag %d (wire type %d)", fieldNum, wire) 3298 } 3299 switch fieldNum { 3300 case 1: 3301 if wireType != 0 { 3302 return fmt.Errorf("proto: wrong wireType = %d for field RampUp", wireType) 3303 } 3304 var v int 3305 for shift := uint(0); ; shift += 7 { 3306 if shift >= 64 { 3307 return ErrIntOverflowCpu 3308 } 3309 if iNdEx >= l { 3310 return io.ErrUnexpectedEOF 3311 } 3312 b := dAtA[iNdEx] 3313 iNdEx++ 3314 v |= int(b&0x7F) << shift 3315 if b < 0x80 { 3316 break 3317 } 3318 } 3319 m.RampUp = bool(v != 0) 3320 case 2: 3321 if wireType != 2 { 3322 return fmt.Errorf("proto: wrong wireType = %d for field OwnerPoolName", wireType) 3323 } 3324 var stringLen uint64 3325 for shift := uint(0); ; shift += 7 { 3326 if shift >= 64 { 3327 return ErrIntOverflowCpu 3328 } 3329 if iNdEx >= l { 3330 return io.ErrUnexpectedEOF 3331 } 3332 b := dAtA[iNdEx] 3333 iNdEx++ 3334 stringLen |= uint64(b&0x7F) << shift 3335 if b < 0x80 { 3336 break 3337 } 3338 } 3339 intStringLen := int(stringLen) 3340 if intStringLen < 0 { 3341 return ErrInvalidLengthCpu 3342 } 3343 postIndex := iNdEx + intStringLen 3344 if postIndex < 0 { 3345 return ErrInvalidLengthCpu 3346 } 3347 if postIndex > l { 3348 return io.ErrUnexpectedEOF 3349 } 3350 m.OwnerPoolName = string(dAtA[iNdEx:postIndex]) 3351 iNdEx = postIndex 3352 case 3: 3353 if wireType != 2 { 3354 return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareAssignments", wireType) 3355 } 3356 var msglen int 3357 for shift := uint(0); ; shift += 7 { 3358 if shift >= 64 { 3359 return ErrIntOverflowCpu 3360 } 3361 if iNdEx >= l { 3362 return io.ErrUnexpectedEOF 3363 } 3364 b := dAtA[iNdEx] 3365 iNdEx++ 3366 msglen |= int(b&0x7F) << shift 3367 if b < 0x80 { 3368 break 3369 } 3370 } 3371 if msglen < 0 { 3372 return ErrInvalidLengthCpu 3373 } 3374 postIndex := iNdEx + msglen 3375 if postIndex < 0 { 3376 return ErrInvalidLengthCpu 3377 } 3378 if postIndex > l { 3379 return io.ErrUnexpectedEOF 3380 } 3381 if m.TopologyAwareAssignments == nil { 3382 m.TopologyAwareAssignments = make(map[uint64]string) 3383 } 3384 var mapkey uint64 3385 var mapvalue string 3386 for iNdEx < postIndex { 3387 entryPreIndex := iNdEx 3388 var wire uint64 3389 for shift := uint(0); ; shift += 7 { 3390 if shift >= 64 { 3391 return ErrIntOverflowCpu 3392 } 3393 if iNdEx >= l { 3394 return io.ErrUnexpectedEOF 3395 } 3396 b := dAtA[iNdEx] 3397 iNdEx++ 3398 wire |= uint64(b&0x7F) << shift 3399 if b < 0x80 { 3400 break 3401 } 3402 } 3403 fieldNum := int32(wire >> 3) 3404 if fieldNum == 1 { 3405 for shift := uint(0); ; shift += 7 { 3406 if shift >= 64 { 3407 return ErrIntOverflowCpu 3408 } 3409 if iNdEx >= l { 3410 return io.ErrUnexpectedEOF 3411 } 3412 b := dAtA[iNdEx] 3413 iNdEx++ 3414 mapkey |= uint64(b&0x7F) << shift 3415 if b < 0x80 { 3416 break 3417 } 3418 } 3419 } else if fieldNum == 2 { 3420 var stringLenmapvalue uint64 3421 for shift := uint(0); ; shift += 7 { 3422 if shift >= 64 { 3423 return ErrIntOverflowCpu 3424 } 3425 if iNdEx >= l { 3426 return io.ErrUnexpectedEOF 3427 } 3428 b := dAtA[iNdEx] 3429 iNdEx++ 3430 stringLenmapvalue |= uint64(b&0x7F) << shift 3431 if b < 0x80 { 3432 break 3433 } 3434 } 3435 intStringLenmapvalue := int(stringLenmapvalue) 3436 if intStringLenmapvalue < 0 { 3437 return ErrInvalidLengthCpu 3438 } 3439 postStringIndexmapvalue := iNdEx + intStringLenmapvalue 3440 if postStringIndexmapvalue < 0 { 3441 return ErrInvalidLengthCpu 3442 } 3443 if postStringIndexmapvalue > l { 3444 return io.ErrUnexpectedEOF 3445 } 3446 mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) 3447 iNdEx = postStringIndexmapvalue 3448 } else { 3449 iNdEx = entryPreIndex 3450 skippy, err := skipCpu(dAtA[iNdEx:]) 3451 if err != nil { 3452 return err 3453 } 3454 if skippy < 0 { 3455 return ErrInvalidLengthCpu 3456 } 3457 if (iNdEx + skippy) > postIndex { 3458 return io.ErrUnexpectedEOF 3459 } 3460 iNdEx += skippy 3461 } 3462 } 3463 m.TopologyAwareAssignments[mapkey] = mapvalue 3464 iNdEx = postIndex 3465 case 4: 3466 if wireType != 2 { 3467 return fmt.Errorf("proto: wrong wireType = %d for field OriginalTopologyAwareAssignments", wireType) 3468 } 3469 var msglen int 3470 for shift := uint(0); ; shift += 7 { 3471 if shift >= 64 { 3472 return ErrIntOverflowCpu 3473 } 3474 if iNdEx >= l { 3475 return io.ErrUnexpectedEOF 3476 } 3477 b := dAtA[iNdEx] 3478 iNdEx++ 3479 msglen |= int(b&0x7F) << shift 3480 if b < 0x80 { 3481 break 3482 } 3483 } 3484 if msglen < 0 { 3485 return ErrInvalidLengthCpu 3486 } 3487 postIndex := iNdEx + msglen 3488 if postIndex < 0 { 3489 return ErrInvalidLengthCpu 3490 } 3491 if postIndex > l { 3492 return io.ErrUnexpectedEOF 3493 } 3494 if m.OriginalTopologyAwareAssignments == nil { 3495 m.OriginalTopologyAwareAssignments = make(map[uint64]string) 3496 } 3497 var mapkey uint64 3498 var mapvalue string 3499 for iNdEx < postIndex { 3500 entryPreIndex := iNdEx 3501 var wire uint64 3502 for shift := uint(0); ; shift += 7 { 3503 if shift >= 64 { 3504 return ErrIntOverflowCpu 3505 } 3506 if iNdEx >= l { 3507 return io.ErrUnexpectedEOF 3508 } 3509 b := dAtA[iNdEx] 3510 iNdEx++ 3511 wire |= uint64(b&0x7F) << shift 3512 if b < 0x80 { 3513 break 3514 } 3515 } 3516 fieldNum := int32(wire >> 3) 3517 if fieldNum == 1 { 3518 for shift := uint(0); ; shift += 7 { 3519 if shift >= 64 { 3520 return ErrIntOverflowCpu 3521 } 3522 if iNdEx >= l { 3523 return io.ErrUnexpectedEOF 3524 } 3525 b := dAtA[iNdEx] 3526 iNdEx++ 3527 mapkey |= uint64(b&0x7F) << shift 3528 if b < 0x80 { 3529 break 3530 } 3531 } 3532 } else if fieldNum == 2 { 3533 var stringLenmapvalue uint64 3534 for shift := uint(0); ; shift += 7 { 3535 if shift >= 64 { 3536 return ErrIntOverflowCpu 3537 } 3538 if iNdEx >= l { 3539 return io.ErrUnexpectedEOF 3540 } 3541 b := dAtA[iNdEx] 3542 iNdEx++ 3543 stringLenmapvalue |= uint64(b&0x7F) << shift 3544 if b < 0x80 { 3545 break 3546 } 3547 } 3548 intStringLenmapvalue := int(stringLenmapvalue) 3549 if intStringLenmapvalue < 0 { 3550 return ErrInvalidLengthCpu 3551 } 3552 postStringIndexmapvalue := iNdEx + intStringLenmapvalue 3553 if postStringIndexmapvalue < 0 { 3554 return ErrInvalidLengthCpu 3555 } 3556 if postStringIndexmapvalue > l { 3557 return io.ErrUnexpectedEOF 3558 } 3559 mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) 3560 iNdEx = postStringIndexmapvalue 3561 } else { 3562 iNdEx = entryPreIndex 3563 skippy, err := skipCpu(dAtA[iNdEx:]) 3564 if err != nil { 3565 return err 3566 } 3567 if skippy < 0 { 3568 return ErrInvalidLengthCpu 3569 } 3570 if (iNdEx + skippy) > postIndex { 3571 return io.ErrUnexpectedEOF 3572 } 3573 iNdEx += skippy 3574 } 3575 } 3576 m.OriginalTopologyAwareAssignments[mapkey] = mapvalue 3577 iNdEx = postIndex 3578 default: 3579 iNdEx = preIndex 3580 skippy, err := skipCpu(dAtA[iNdEx:]) 3581 if err != nil { 3582 return err 3583 } 3584 if skippy < 0 { 3585 return ErrInvalidLengthCpu 3586 } 3587 if (iNdEx + skippy) < 0 { 3588 return ErrInvalidLengthCpu 3589 } 3590 if (iNdEx + skippy) > l { 3591 return io.ErrUnexpectedEOF 3592 } 3593 iNdEx += skippy 3594 } 3595 } 3596 3597 if iNdEx > l { 3598 return io.ErrUnexpectedEOF 3599 } 3600 return nil 3601 } 3602 func skipCpu(dAtA []byte) (n int, err error) { 3603 l := len(dAtA) 3604 iNdEx := 0 3605 depth := 0 3606 for iNdEx < l { 3607 var wire uint64 3608 for shift := uint(0); ; shift += 7 { 3609 if shift >= 64 { 3610 return 0, ErrIntOverflowCpu 3611 } 3612 if iNdEx >= l { 3613 return 0, io.ErrUnexpectedEOF 3614 } 3615 b := dAtA[iNdEx] 3616 iNdEx++ 3617 wire |= (uint64(b) & 0x7F) << shift 3618 if b < 0x80 { 3619 break 3620 } 3621 } 3622 wireType := int(wire & 0x7) 3623 switch wireType { 3624 case 0: 3625 for shift := uint(0); ; shift += 7 { 3626 if shift >= 64 { 3627 return 0, ErrIntOverflowCpu 3628 } 3629 if iNdEx >= l { 3630 return 0, io.ErrUnexpectedEOF 3631 } 3632 iNdEx++ 3633 if dAtA[iNdEx-1] < 0x80 { 3634 break 3635 } 3636 } 3637 case 1: 3638 iNdEx += 8 3639 case 2: 3640 var length int 3641 for shift := uint(0); ; shift += 7 { 3642 if shift >= 64 { 3643 return 0, ErrIntOverflowCpu 3644 } 3645 if iNdEx >= l { 3646 return 0, io.ErrUnexpectedEOF 3647 } 3648 b := dAtA[iNdEx] 3649 iNdEx++ 3650 length |= (int(b) & 0x7F) << shift 3651 if b < 0x80 { 3652 break 3653 } 3654 } 3655 if length < 0 { 3656 return 0, ErrInvalidLengthCpu 3657 } 3658 iNdEx += length 3659 case 3: 3660 depth++ 3661 case 4: 3662 if depth == 0 { 3663 return 0, ErrUnexpectedEndOfGroupCpu 3664 } 3665 depth-- 3666 case 5: 3667 iNdEx += 4 3668 default: 3669 return 0, fmt.Errorf("proto: illegal wireType %d", wireType) 3670 } 3671 if iNdEx < 0 { 3672 return 0, ErrInvalidLengthCpu 3673 } 3674 if depth == 0 { 3675 return iNdEx, nil 3676 } 3677 } 3678 return 0, io.ErrUnexpectedEOF 3679 } 3680 3681 var ( 3682 ErrInvalidLengthCpu = fmt.Errorf("proto: negative length found during unmarshaling") 3683 ErrIntOverflowCpu = fmt.Errorf("proto: integer overflow") 3684 ErrUnexpectedEndOfGroupCpu = fmt.Errorf("proto: unexpected end of group") 3685 )