github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/clients/pkg/promtail/targets/cloudflare/target_test.go (about) 1 package cloudflare 2 3 import ( 4 "context" 5 "errors" 6 "os" 7 "sort" 8 "testing" 9 "time" 10 11 "github.com/go-kit/log" 12 "github.com/prometheus/client_golang/prometheus" 13 "github.com/prometheus/common/model" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/mock" 16 "github.com/stretchr/testify/require" 17 18 "github.com/grafana/loki/clients/pkg/promtail/client/fake" 19 "github.com/grafana/loki/clients/pkg/promtail/positions" 20 "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" 21 ) 22 23 func Test_CloudflareTarget(t *testing.T) { 24 var ( 25 w = log.NewSyncWriter(os.Stderr) 26 logger = log.NewLogfmtLogger(w) 27 cfg = &scrapeconfig.CloudflareConfig{ 28 APIToken: "foo", 29 ZoneID: "bar", 30 Labels: model.LabelSet{"job": "cloudflare"}, 31 PullRange: model.Duration(time.Minute), 32 } 33 end = time.Unix(0, time.Hour.Nanoseconds()) 34 start = time.Unix(0, time.Hour.Nanoseconds()-int64(cfg.PullRange)) 35 client = fake.New(func() {}) 36 cfClient = newFakeCloudflareClient() 37 ) 38 ps, err := positions.New(logger, positions.Config{ 39 SyncPeriod: 10 * time.Second, 40 PositionsFile: t.TempDir() + "/positions.yml", 41 }) 42 // set our end time to be the last time we have a position 43 ps.Put(positions.CursorKey(cfg.ZoneID), end.UnixNano()) 44 require.NoError(t, err) 45 46 // setup response for the first pull batch of 1 minutes. 47 cfClient.On("LogpullReceived", mock.Anything, start, start.Add(time.Duration(cfg.PullRange/3))).Return(&fakeLogIterator{ 48 logs: []string{ 49 `{"EdgeStartTimestamp":1, "EdgeRequestHost":"foo.com"}`, 50 }, 51 }, nil) 52 cfClient.On("LogpullReceived", mock.Anything, start.Add(time.Duration(cfg.PullRange/3)), start.Add(time.Duration(2*cfg.PullRange/3))).Return(&fakeLogIterator{ 53 logs: []string{ 54 `{"EdgeStartTimestamp":2, "EdgeRequestHost":"bar.com"}`, 55 }, 56 }, nil) 57 cfClient.On("LogpullReceived", mock.Anything, start.Add(time.Duration(2*cfg.PullRange/3)), end).Return(&fakeLogIterator{ 58 logs: []string{ 59 `{"EdgeStartTimestamp":3, "EdgeRequestHost":"buzz.com"}`, 60 `{"EdgeRequestHost":"fuzz.com"}`, 61 }, 62 }, nil) 63 // setup empty response for the rest. 64 cfClient.On("LogpullReceived", mock.Anything, mock.Anything, mock.Anything).Return(&fakeLogIterator{ 65 logs: []string{}, 66 }, nil) 67 // replace the client. 68 getClient = func(apiKey, zoneID string, fields []string) (Client, error) { 69 return cfClient, nil 70 } 71 72 ta, err := NewTarget(NewMetrics(prometheus.NewRegistry()), logger, client, ps, cfg) 73 require.NoError(t, err) 74 require.True(t, ta.Ready()) 75 76 require.Eventually(t, func() bool { 77 return len(client.Received()) == 4 78 }, 5*time.Second, 100*time.Millisecond) 79 80 received := client.Received() 81 sort.Slice(received, func(i, j int) bool { 82 return received[i].Timestamp.After(received[j].Timestamp) 83 }) 84 for _, e := range received { 85 require.Equal(t, model.LabelValue("cloudflare"), e.Labels["job"]) 86 } 87 require.WithinDuration(t, time.Now(), received[0].Timestamp, time.Minute) // no timestamp default to now. 88 require.Equal(t, `{"EdgeRequestHost":"fuzz.com"}`, received[0].Line) 89 90 require.Equal(t, `{"EdgeStartTimestamp":3, "EdgeRequestHost":"buzz.com"}`, received[1].Line) 91 require.Equal(t, time.Unix(0, 3), received[1].Timestamp) 92 require.Equal(t, `{"EdgeStartTimestamp":2, "EdgeRequestHost":"bar.com"}`, received[2].Line) 93 require.Equal(t, time.Unix(0, 2), received[2].Timestamp) 94 require.Equal(t, `{"EdgeStartTimestamp":1, "EdgeRequestHost":"foo.com"}`, received[3].Line) 95 require.Equal(t, time.Unix(0, 1), received[3].Timestamp) 96 cfClient.AssertExpectations(t) 97 ta.Stop() 98 ps.Stop() 99 // Make sure we save the last position. 100 newPos, _ := ps.Get(positions.CursorKey(cfg.ZoneID)) 101 require.Greater(t, newPos, end.UnixNano()) 102 } 103 104 func Test_RetryErrorIterating(t *testing.T) { 105 var ( 106 w = log.NewSyncWriter(os.Stderr) 107 logger = log.NewLogfmtLogger(w) 108 end = time.Unix(0, time.Hour.Nanoseconds()) 109 start = time.Unix(0, end.Add(-30*time.Minute).UnixNano()) 110 client = fake.New(func() {}) 111 cfClient = newFakeCloudflareClient() 112 ) 113 cfClient.On("LogpullReceived", mock.Anything, start, end).Return(&fakeLogIterator{ 114 logs: []string{ 115 `{"EdgeStartTimestamp":1, "EdgeRequestHost":"foo.com"}`, 116 `error`, 117 }, 118 }, nil).Once() 119 // setup response for the first pull batch of 1 minutes. 120 cfClient.On("LogpullReceived", mock.Anything, start, end).Return(&fakeLogIterator{ 121 logs: []string{ 122 `{"EdgeStartTimestamp":1, "EdgeRequestHost":"foo.com"}`, 123 `{"EdgeStartTimestamp":2, "EdgeRequestHost":"foo.com"}`, 124 `{"EdgeStartTimestamp":3, "EdgeRequestHost":"foo.com"}`, 125 }, 126 }, nil).Once() 127 // replace the client. 128 getClient = func(apiKey, zoneID string, fields []string) (Client, error) { 129 return cfClient, nil 130 } 131 // retries as fast as possible. 132 defaultBackoff.MinBackoff = 0 133 defaultBackoff.MaxBackoff = 0 134 ta := &Target{ 135 logger: logger, 136 handler: client, 137 client: cfClient, 138 config: &scrapeconfig.CloudflareConfig{ 139 Labels: make(model.LabelSet), 140 }, 141 metrics: NewMetrics(prometheus.DefaultRegisterer), 142 } 143 144 require.NoError(t, ta.pull(context.Background(), start, end)) 145 require.Eventually(t, func() bool { 146 return len(client.Received()) == 4 147 }, 5*time.Second, 100*time.Millisecond) 148 } 149 150 func Test_CloudflareTargetError(t *testing.T) { 151 var ( 152 w = log.NewSyncWriter(os.Stderr) 153 logger = log.NewLogfmtLogger(w) 154 cfg = &scrapeconfig.CloudflareConfig{ 155 APIToken: "foo", 156 ZoneID: "bar", 157 Labels: model.LabelSet{"job": "cloudflare"}, 158 PullRange: model.Duration(time.Minute), 159 } 160 end = time.Unix(0, time.Hour.Nanoseconds()) 161 client = fake.New(func() {}) 162 cfClient = newFakeCloudflareClient() 163 ) 164 ps, err := positions.New(logger, positions.Config{ 165 SyncPeriod: 10 * time.Second, 166 PositionsFile: t.TempDir() + "/positions.yml", 167 }) 168 // retries as fast as possible. 169 defaultBackoff.MinBackoff = 0 170 defaultBackoff.MaxBackoff = 0 171 172 // set our end time to be the last time we have a position 173 ps.Put(positions.CursorKey(cfg.ZoneID), end.UnixNano()) 174 require.NoError(t, err) 175 176 // setup errors for all retries 177 cfClient.On("LogpullReceived", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("no logs")) 178 // replace the client. 179 getClient = func(apiKey, zoneID string, fields []string) (Client, error) { 180 return cfClient, nil 181 } 182 183 ta, err := NewTarget(NewMetrics(prometheus.NewRegistry()), logger, client, ps, cfg) 184 require.NoError(t, err) 185 require.True(t, ta.Ready()) 186 187 // wait for the target to be stopped. 188 require.Eventually(t, func() bool { 189 return !ta.Ready() 190 }, 5*time.Second, 100*time.Millisecond) 191 192 require.Len(t, client.Received(), 0) 193 require.GreaterOrEqual(t, cfClient.CallCount(), 5) 194 require.NotEmpty(t, ta.Details().(map[string]string)["error"]) 195 ta.Stop() 196 ps.Stop() 197 198 // Make sure we save the last position. 199 newEnd, _ := ps.Get(positions.CursorKey(cfg.ZoneID)) 200 require.Equal(t, newEnd, end.UnixNano()) 201 } 202 203 func Test_CloudflareTargetError168h(t *testing.T) { 204 var ( 205 w = log.NewSyncWriter(os.Stderr) 206 logger = log.NewLogfmtLogger(w) 207 cfg = &scrapeconfig.CloudflareConfig{ 208 APIToken: "foo", 209 ZoneID: "bar", 210 Labels: model.LabelSet{"job": "cloudflare"}, 211 PullRange: model.Duration(time.Minute), 212 } 213 end = time.Unix(0, time.Hour.Nanoseconds()) 214 client = fake.New(func() {}) 215 cfClient = newFakeCloudflareClient() 216 ) 217 ps, err := positions.New(logger, positions.Config{ 218 SyncPeriod: 10 * time.Second, 219 PositionsFile: t.TempDir() + "/positions.yml", 220 }) 221 // retries as fast as possible. 222 defaultBackoff.MinBackoff = 0 223 defaultBackoff.MaxBackoff = 0 224 225 // set our end time to be the last time we have a position 226 ps.Put(positions.CursorKey(cfg.ZoneID), end.UnixNano()) 227 require.NoError(t, err) 228 229 // setup errors for all retries 230 cfClient.On("LogpullReceived", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("HTTP status 400: bad query: error parsing time: invalid time range: too early: logs older than 168h0m0s are not available")) 231 // replace the client. 232 getClient = func(apiKey, zoneID string, fields []string) (Client, error) { 233 return cfClient, nil 234 } 235 236 ta, err := NewTarget(NewMetrics(prometheus.NewRegistry()), logger, client, ps, cfg) 237 require.NoError(t, err) 238 require.True(t, ta.Ready()) 239 240 // wait for the target to be stopped. 241 require.Eventually(t, func() bool { 242 return cfClient.CallCount() >= 5 243 }, 5*time.Second, 100*time.Millisecond) 244 245 require.Len(t, client.Received(), 0) 246 require.GreaterOrEqual(t, cfClient.CallCount(), 5) 247 ta.Stop() 248 ps.Stop() 249 250 // Make sure we move on from the save the last position. 251 newEnd, _ := ps.Get(positions.CursorKey(cfg.ZoneID)) 252 require.Greater(t, newEnd, end.UnixNano()) 253 } 254 255 func Test_validateConfig(t *testing.T) { 256 tests := []struct { 257 in *scrapeconfig.CloudflareConfig 258 out *scrapeconfig.CloudflareConfig 259 wantErr bool 260 }{ 261 { 262 &scrapeconfig.CloudflareConfig{ 263 APIToken: "foo", 264 ZoneID: "bar", 265 }, 266 &scrapeconfig.CloudflareConfig{ 267 APIToken: "foo", 268 ZoneID: "bar", 269 Workers: 3, 270 PullRange: model.Duration(time.Minute), 271 FieldsType: string(FieldsTypeDefault), 272 }, 273 false, 274 }, 275 { 276 &scrapeconfig.CloudflareConfig{ 277 APIToken: "foo", 278 }, 279 nil, 280 true, 281 }, 282 { 283 &scrapeconfig.CloudflareConfig{ 284 ZoneID: "foo", 285 }, 286 nil, 287 true, 288 }, 289 } 290 for _, tt := range tests { 291 t.Run("", func(t *testing.T) { 292 err := validateConfig(tt.in) 293 if tt.wantErr { 294 require.Error(t, err) 295 return 296 } 297 require.Equal(t, tt.out, tt.in) 298 }) 299 } 300 } 301 302 func Test_splitRequests(t *testing.T) { 303 tests := []struct { 304 start time.Time 305 end time.Time 306 want []pullRequest 307 }{ 308 // perfectly divisible 309 { 310 time.Unix(0, 0), 311 time.Unix(0, int64(time.Minute)), 312 []pullRequest{ 313 {start: time.Unix(0, 0), end: time.Unix(0, int64(time.Minute/3))}, 314 {start: time.Unix(0, int64(time.Minute/3)), end: time.Unix(0, int64(time.Minute*2/3))}, 315 {start: time.Unix(0, int64(time.Minute*2/3)), end: time.Unix(0, int64(time.Minute))}, 316 }, 317 }, 318 // not divisible 319 { 320 time.Unix(0, 0), 321 time.Unix(0, int64(time.Minute+1)), 322 []pullRequest{ 323 {start: time.Unix(0, 0), end: time.Unix(0, int64(time.Minute/3))}, 324 {start: time.Unix(0, int64(time.Minute/3)), end: time.Unix(0, int64(time.Minute*2/3))}, 325 {start: time.Unix(0, int64(time.Minute*2/3)), end: time.Unix(0, int64(time.Minute+1))}, 326 }, 327 }, 328 } 329 for _, tt := range tests { 330 t.Run("", func(t *testing.T) { 331 got := splitRequests(tt.start, tt.end, 3) 332 if !assert.Equal(t, tt.want, got) { 333 for i := range got { 334 if !assert.Equal(t, tt.want[i].start, got[i].start) { 335 t.Logf("expected i:%d start: %d , got: %d", i, tt.want[i].start.UnixNano(), got[i].start.UnixNano()) 336 } 337 if !assert.Equal(t, tt.want[i].end, got[i].end) { 338 t.Logf("expected i:%d end: %d , got: %d", i, tt.want[i].end.UnixNano(), got[i].end.UnixNano()) 339 } 340 } 341 } 342 }) 343 } 344 }