github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/execinfra/readerbase.go (about) 1 // Copyright 2017 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package execinfra 12 13 import ( 14 "context" 15 "fmt" 16 17 "github.com/cockroachdb/cockroach/pkg/roachpb" 18 "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" 19 "github.com/cockroachdb/cockroach/pkg/util/log" 20 ) 21 22 // We ignore any limits that are higher than this value to avoid integer 23 // overflows. See limitHint for how this bound is used. 24 const readerOverflowProtection = 1000000000000000 /* 10^15 */ 25 26 // LimitHint returns the limit hint to set for a KVFetcher based on 27 // the spec's limit hint and the PostProcessSpec. 28 func LimitHint(specLimitHint int64, post *execinfrapb.PostProcessSpec) (limitHint int64) { 29 // We prioritize the post process's limit since ProcOutputHelper 30 // will tell us to stop once we emit enough rows. 31 if post.Limit != 0 && post.Limit <= readerOverflowProtection { 32 limitHint = int64(post.Limit) 33 } else if specLimitHint != 0 && specLimitHint <= readerOverflowProtection { 34 // If it turns out that limiHint rows are sufficient for our consumer, we 35 // want to avoid asking for another batch. Currently, the only way for us to 36 // "stop" is if we block on sending rows and the consumer sets 37 // ConsumerDone() on the RowChannel while we block. So we want to block 38 // *after* sending all the rows in the limit hint; to do this, we request 39 // rowChannelBufSize + 1 more rows: 40 // - rowChannelBufSize rows guarantee that we will fill the row channel 41 // even after limitHint rows are consumed 42 // - the extra row gives us chance to call Push again after we unblock, 43 // which will notice that ConsumerDone() was called. 44 // 45 // This flimsy mechanism is only useful in the (optimistic) case that the 46 // processor that only needs this many rows is our direct, local consumer. 47 // If we have a chain of processors and RowChannels, or remote streams, this 48 // reasoning goes out the door. 49 // 50 // TODO(radu, andrei): work on a real mechanism for limits. 51 limitHint = specLimitHint + RowChannelBufSize + 1 52 } 53 54 if !post.Filter.Empty() { 55 // We have a filter so we will likely need to read more rows. 56 limitHint *= 2 57 } 58 59 return limitHint 60 } 61 62 // MisplannedRanges filters out the misplanned ranges and their RangeInfo for a 63 // given node. 64 func MisplannedRanges( 65 ctx context.Context, rangeInfos []roachpb.RangeInfo, nodeID roachpb.NodeID, 66 ) (misplannedRanges []roachpb.RangeInfo) { 67 for _, ri := range rangeInfos { 68 if ri.Lease.Replica.NodeID != nodeID { 69 misplannedRanges = append(misplannedRanges, ri) 70 } 71 } 72 73 if len(misplannedRanges) != 0 { 74 var msg string 75 if len(misplannedRanges) < 3 { 76 msg = fmt.Sprintf("%+v", misplannedRanges[0].Desc) 77 } else { 78 msg = fmt.Sprintf("%+v...", misplannedRanges[:3]) 79 } 80 log.VEventf(ctx, 2, "tableReader pushing metadata about misplanned ranges: %s", 81 msg) 82 } 83 84 return misplannedRanges 85 }