/Users/deen/code/yugabyte-db/src/yb/util/metrics-test.cc
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // |
18 | | // The following only applies to changes made to this file as part of YugaByte development. |
19 | | // |
20 | | // Portions Copyright (c) YugaByte, Inc. |
21 | | // |
22 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
23 | | // in compliance with the License. You may obtain a copy of the License at |
24 | | // |
25 | | // http://www.apache.org/licenses/LICENSE-2.0 |
26 | | // |
27 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
28 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
29 | | // or implied. See the License for the specific language governing permissions and limitations |
30 | | // under the License. |
31 | | // |
32 | | |
33 | | #include <memory> |
34 | | #include <string> |
35 | | #include <unordered_set> |
36 | | #include <vector> |
37 | | |
38 | | #include <glog/logging.h> |
39 | | #include <gtest/gtest.h> |
40 | | #include <rapidjson/document.h> |
41 | | |
42 | | #include "yb/gutil/bind.h" |
43 | | #include "yb/gutil/map-util.h" |
44 | | |
45 | | #include "yb/util/hdr_histogram.h" |
46 | | #include "yb/util/histogram.pb.h" |
47 | | #include "yb/util/jsonreader.h" |
48 | | #include "yb/util/jsonwriter.h" |
49 | | #include "yb/util/metrics.h" |
50 | | #include "yb/util/test_macros.h" |
51 | | #include "yb/util/test_util.h" |
52 | | |
53 | | using std::string; |
54 | | using std::unordered_set; |
55 | | using std::vector; |
56 | | |
57 | | DECLARE_int32(metrics_retirement_age_ms); |
58 | | |
59 | | namespace yb { |
60 | | |
61 | | METRIC_DEFINE_entity(test_entity); |
62 | | |
63 | | static const string kTableId = "table_id"; |
64 | | |
65 | | class MetricsTest : public YBTest { |
66 | | public: |
67 | 19 | void SetUp() override { |
68 | 19 | YBTest::SetUp(); |
69 | | |
70 | 19 | entity_ = METRIC_ENTITY_test_entity.Instantiate(®istry_, "my-test"); |
71 | 19 | } |
72 | | |
73 | | protected: |
74 | | template <class LagType> |
75 | 2 | void DoLagTest(const MillisLagPrototype& metric) { |
76 | 2 | scoped_refptr<LagType> lag = new LagType(&metric); |
77 | 2 | ASSERT_EQ(metric.description(), lag->prototype()->description()); |
78 | 2 | SleepFor(MonoDelta::FromMilliseconds(500)); |
79 | | // Internal timestamp is set to the time when the metric was created. |
80 | | // So this lag is measure of the time elapsed since the metric was |
81 | | // created and the check time. |
82 | 2 | ASSERT_GE(lag->lag_ms(), 500); |
83 | 2 | auto now_ms = std::chrono::duration_cast<std::chrono::milliseconds>( |
84 | 2 | std::chrono::system_clock::now().time_since_epoch()).count(); |
85 | 2 | lag->UpdateTimestampInMilliseconds(now_ms); |
86 | | // Verify that the update happened correctly. The lag time should |
87 | | // be close to 0, but giving it extra time to account for slow |
88 | | // tests. |
89 | 2 | ASSERT_LT(lag->lag_ms(), 200); |
90 | | // Set the timestamp to some time in the future to verify that the |
91 | | // metric can correctly deal with this case. |
92 | 2 | lag->UpdateTimestampInMilliseconds(now_ms * 2); |
93 | 2 | ASSERT_EQ(0, lag->lag_ms()); |
94 | 2 | } _ZN2yb11MetricsTest9DoLagTestINS_9MillisLagEEEvRKNS_18MillisLagPrototypeE Line | Count | Source | 75 | 1 | void DoLagTest(const MillisLagPrototype& metric) { | 76 | 1 | scoped_refptr<LagType> lag = new LagType(&metric); | 77 | 1 | ASSERT_EQ(metric.description(), lag->prototype()->description()); | 78 | 1 | SleepFor(MonoDelta::FromMilliseconds(500)); | 79 | | // Internal timestamp is set to the time when the metric was created. | 80 | | // So this lag is measure of the time elapsed since the metric was | 81 | | // created and the check time. | 82 | 1 | ASSERT_GE(lag->lag_ms(), 500); | 83 | 1 | auto now_ms = std::chrono::duration_cast<std::chrono::milliseconds>( | 84 | 1 | std::chrono::system_clock::now().time_since_epoch()).count(); | 85 | 1 | lag->UpdateTimestampInMilliseconds(now_ms); | 86 | | // Verify that the update happened correctly. The lag time should | 87 | | // be close to 0, but giving it extra time to account for slow | 88 | | // tests. | 89 | 1 | ASSERT_LT(lag->lag_ms(), 200); | 90 | | // Set the timestamp to some time in the future to verify that the | 91 | | // metric can correctly deal with this case. | 92 | 1 | lag->UpdateTimestampInMilliseconds(now_ms * 2); | 93 | 1 | ASSERT_EQ(0, lag->lag_ms()); | 94 | 1 | } |
_ZN2yb11MetricsTest9DoLagTestINS_15AtomicMillisLagEEEvRKNS_18MillisLagPrototypeE Line | Count | Source | 75 | 1 | void DoLagTest(const MillisLagPrototype& metric) { | 76 | 1 | scoped_refptr<LagType> lag = new LagType(&metric); | 77 | 1 | ASSERT_EQ(metric.description(), lag->prototype()->description()); | 78 | 1 | SleepFor(MonoDelta::FromMilliseconds(500)); | 79 | | // Internal timestamp is set to the time when the metric was created. | 80 | | // So this lag is measure of the time elapsed since the metric was | 81 | | // created and the check time. | 82 | 1 | ASSERT_GE(lag->lag_ms(), 500); | 83 | 1 | auto now_ms = std::chrono::duration_cast<std::chrono::milliseconds>( | 84 | 1 | std::chrono::system_clock::now().time_since_epoch()).count(); | 85 | 1 | lag->UpdateTimestampInMilliseconds(now_ms); | 86 | | // Verify that the update happened correctly. The lag time should | 87 | | // be close to 0, but giving it extra time to account for slow | 88 | | // tests. | 89 | 1 | ASSERT_LT(lag->lag_ms(), 200); | 90 | | // Set the timestamp to some time in the future to verify that the | 91 | | // metric can correctly deal with this case. | 92 | 1 | lag->UpdateTimestampInMilliseconds(now_ms * 2); | 93 | 1 | ASSERT_EQ(0, lag->lag_ms()); | 94 | 1 | } |
|
95 | | |
96 | | template <class Gauge> |
97 | | void DoAggregationTest(const vector<int>& values, |
98 | | const vector<MetricEntity::AttributeMap>& attrs, |
99 | | const scoped_refptr<Gauge>& gauge, |
100 | | const string& name, |
101 | | int expected_aggregation, |
102 | 2 | const MetricEntity::AttributeMap& expected_attrs) { |
103 | 2 | std::stringstream output; |
104 | 2 | PrometheusWriter writer(&output); |
105 | 10 | for (size_t i = 0; i < values.size(); ++i) { |
106 | 8 | gauge->set_value(values[i]); |
107 | 8 | ASSERT_OK(gauge->WriteForPrometheus(&writer, attrs[i], MetricPrometheusOptions())); |
108 | 8 | } |
109 | 2 | ASSERT_EQ(writer.per_table_values_[kTableId][name], expected_aggregation); |
110 | 2 | ASSERT_EQ(writer.per_table_attributes_[kTableId], expected_attrs); |
111 | 2 | } |
112 | | |
113 | 1 | std::string dumpPrometheusWriterOutput(const PrometheusWriter& w) { return w.output_->str(); } |
114 | | |
115 | | MetricRegistry registry_; |
116 | | scoped_refptr<MetricEntity> entity_; |
117 | | }; |
118 | | |
119 | | METRIC_DEFINE_counter(test_entity, reqs_pending, "Requests Pending", MetricUnit::kRequests, |
120 | | "Number of requests pending"); |
121 | | |
122 | 1 | TEST_F(MetricsTest, SimpleCounterTest) { |
123 | 1 | scoped_refptr<Counter> requests = |
124 | 1 | new Counter(&METRIC_reqs_pending); |
125 | 1 | ASSERT_EQ("Number of requests pending", requests->prototype()->description()); |
126 | 1 | ASSERT_EQ(0, requests->value()); |
127 | 1 | requests->Increment(); |
128 | 1 | ASSERT_EQ(1, requests->value()); |
129 | 1 | requests->IncrementBy(2); |
130 | 1 | ASSERT_EQ(3, requests->value()); |
131 | 1 | } |
132 | | |
133 | | METRIC_DEFINE_lag(test_entity, lag_simple, "Test MillisLag", "Test MillisLag Description"); |
134 | 1 | TEST_F(MetricsTest, SimpleLagTest) { |
135 | 1 | ASSERT_NO_FATALS(DoLagTest<MillisLag>(METRIC_lag_simple)); |
136 | 1 | } |
137 | | |
138 | | METRIC_DEFINE_lag(test_entity, atomic_lag_simple, "Test Atomic MillisLag", |
139 | | "Test Atomic MillisLag Description"); |
140 | 1 | TEST_F(MetricsTest, SimpleAtomicLagTest) { |
141 | 1 | ASSERT_NO_FATALS(DoLagTest<AtomicMillisLag>(METRIC_atomic_lag_simple)); |
142 | 1 | } |
143 | | |
144 | | METRIC_DEFINE_gauge_uint64(test_entity, fake_memory_usage, "Memory Usage", |
145 | | MetricUnit::kBytes, "Test Gauge 1"); |
146 | | |
147 | 1 | TEST_F(MetricsTest, SimpleAtomicGaugeTest) { |
148 | 1 | scoped_refptr<AtomicGauge<uint64_t> > mem_usage = |
149 | 1 | METRIC_fake_memory_usage.Instantiate(entity_, 0); |
150 | 1 | ASSERT_EQ(METRIC_fake_memory_usage.description(), mem_usage->prototype()->description()); |
151 | 1 | ASSERT_EQ(0, mem_usage->value()); |
152 | 1 | mem_usage->IncrementBy(7); |
153 | 1 | ASSERT_EQ(7, mem_usage->value()); |
154 | 1 | mem_usage->set_value(5); |
155 | 1 | ASSERT_EQ(5, mem_usage->value()); |
156 | 1 | } |
157 | | |
158 | | METRIC_DEFINE_gauge_int64(test_entity, test_func_gauge, "Test Gauge", MetricUnit::kBytes, |
159 | | "Test Gauge 2"); |
160 | | |
161 | 12 | static int64_t MyFunction(int* metric_val) { |
162 | 12 | return (*metric_val)++; |
163 | 12 | } |
164 | | |
165 | 1 | TEST_F(MetricsTest, SimpleFunctionGaugeTest) { |
166 | 1 | int metric_val = 1000; |
167 | 1 | scoped_refptr<FunctionGauge<int64_t> > gauge = |
168 | 1 | METRIC_test_func_gauge.InstantiateFunctionGauge( |
169 | 1 | entity_, Bind(&MyFunction, Unretained(&metric_val))); |
170 | | |
171 | 1 | ASSERT_EQ(1000, gauge->value()); |
172 | 1 | ASSERT_EQ(1001, gauge->value()); |
173 | | |
174 | 1 | gauge->DetachToCurrentValue(); |
175 | | // After detaching, it should continue to return the same constant value. |
176 | 1 | ASSERT_EQ(1002, gauge->value()); |
177 | 1 | ASSERT_EQ(1002, gauge->value()); |
178 | | |
179 | | // Test resetting to a constant. |
180 | 1 | gauge->DetachToConstant(2); |
181 | 1 | ASSERT_EQ(2, gauge->value()); |
182 | 1 | } |
183 | | |
184 | 1 | TEST_F(MetricsTest, AutoDetachToLastValue) { |
185 | 1 | int metric_val = 1000; |
186 | 1 | scoped_refptr<FunctionGauge<int64_t> > gauge = |
187 | 1 | METRIC_test_func_gauge.InstantiateFunctionGauge( |
188 | 1 | entity_, Bind(&MyFunction, Unretained(&metric_val))); |
189 | | |
190 | 1 | ASSERT_EQ(1000, gauge->value()); |
191 | 1 | ASSERT_EQ(1001, gauge->value()); |
192 | 1 | { |
193 | 1 | std::shared_ptr<void> detacher; |
194 | 1 | gauge->AutoDetachToLastValue(&detacher); |
195 | 1 | ASSERT_EQ(1002, gauge->value()); |
196 | 1 | ASSERT_EQ(1003, gauge->value()); |
197 | 1 | } |
198 | | |
199 | 1 | ASSERT_EQ(1004, gauge->value()); |
200 | 1 | ASSERT_EQ(1004, gauge->value()); |
201 | 1 | } |
202 | | |
203 | 1 | TEST_F(MetricsTest, AutoDetachToConstant) { |
204 | 1 | int metric_val = 1000; |
205 | 1 | scoped_refptr<FunctionGauge<int64_t> > gauge = |
206 | 1 | METRIC_test_func_gauge.InstantiateFunctionGauge( |
207 | 1 | entity_, Bind(&MyFunction, Unretained(&metric_val))); |
208 | | |
209 | 1 | ASSERT_EQ(1000, gauge->value()); |
210 | 1 | ASSERT_EQ(1001, gauge->value()); |
211 | 1 | { |
212 | 1 | std::shared_ptr<void> detacher; |
213 | 1 | gauge->AutoDetach(&detacher, 12345); |
214 | 1 | ASSERT_EQ(1002, gauge->value()); |
215 | 1 | ASSERT_EQ(1003, gauge->value()); |
216 | 1 | } |
217 | | |
218 | 1 | ASSERT_EQ(12345, gauge->value()); |
219 | 1 | } |
220 | | |
221 | | METRIC_DEFINE_gauge_uint64(test_entity, counter_as_gauge, "Gauge exposed as Counter", |
222 | | MetricUnit::kBytes, "Gauge exposed as Counter", |
223 | | EXPOSE_AS_COUNTER); |
224 | 1 | TEST_F(MetricsTest, TEstExposeGaugeAsCounter) { |
225 | 1 | ASSERT_EQ(MetricType::kCounter, METRIC_counter_as_gauge.type()); |
226 | 1 | } |
227 | | |
228 | | METRIC_DEFINE_histogram_with_percentiles(test_entity, test_hist, "Test Histogram", |
229 | | MetricUnit::kMilliseconds, "A default histogram.", 100000000L, 2); |
230 | | |
231 | | METRIC_DEFINE_gauge_int32(test_entity, test_sum_gauge, "Test Sum Gauge", MetricUnit::kMilliseconds, |
232 | | "Test Gauge with SUM aggregation."); |
233 | | METRIC_DEFINE_gauge_int32(test_entity, test_max_gauge, "Test Max", MetricUnit::kMilliseconds, |
234 | | "Test Gauge with MAX aggregation.", |
235 | | {0, yb::AggregationFunction::kMax} /* optional_args */); |
236 | | |
237 | 1 | TEST_F(MetricsTest, AggregationTest) { |
238 | 1 | vector<int> values{1, 2, 3, 4}; |
239 | 1 | vector<MetricEntity::AttributeMap> attrs; |
240 | 4 | for (const auto val : values) { |
241 | 4 | MetricEntity::AttributeMap attr; |
242 | 4 | attr["table_id"] = kTableId; |
243 | 4 | attr["val"] = val; |
244 | 4 | attrs.push_back(std::move(attr)); |
245 | 4 | } |
246 | | // Test SUM aggregation |
247 | 1 | auto sum_gauge = METRIC_test_sum_gauge.Instantiate(entity_, |
248 | 1 | 0 /* initial_value */); |
249 | 1 | ASSERT_NO_FATALS(DoAggregationTest(values, attrs, sum_gauge, "test_sum_gauge", 10, attrs[0])); |
250 | | // Test MAX aggregation |
251 | 1 | auto max_gauge = METRIC_test_max_gauge.Instantiate(entity_, |
252 | 1 | 0 /* initial_value */); |
253 | 1 | ASSERT_NO_FATALS(DoAggregationTest(values, attrs, max_gauge, "test_max_gauge", 4, attrs[3])); |
254 | 1 | } |
255 | | |
256 | 1 | TEST_F(MetricsTest, SimpleHistogramTest) { |
257 | 1 | scoped_refptr<Histogram> hist = METRIC_test_hist.Instantiate(entity_); |
258 | 1 | hist->Increment(2); |
259 | 1 | hist->IncrementBy(4, 1); |
260 | 1 | ASSERT_EQ(2, hist->histogram_->MinValue()); |
261 | 1 | ASSERT_EQ(3, hist->histogram_->MeanValue()); |
262 | 1 | ASSERT_EQ(4, hist->histogram_->MaxValue()); |
263 | 1 | ASSERT_EQ(2, hist->histogram_->TotalCount()); |
264 | 1 | ASSERT_EQ(6, hist->histogram_->TotalSum()); |
265 | | // TODO: Test coverage needs to be improved a lot. |
266 | 1 | } |
267 | | |
268 | 1 | TEST_F(MetricsTest, ResetHistogramTest) { |
269 | 1 | scoped_refptr<Histogram> hist = METRIC_test_hist.Instantiate(entity_); |
270 | 101 | for (int i = 1; i <= 100; i++) { |
271 | 100 | hist->Increment(i); |
272 | 100 | } |
273 | 1 | EXPECT_EQ(5050, hist->histogram_->TotalSum()); |
274 | 1 | EXPECT_EQ(100, hist->histogram_->TotalCount()); |
275 | 1 | EXPECT_EQ(5050, hist->histogram_->CurrentSum()); |
276 | 1 | EXPECT_EQ(100, hist->histogram_->CurrentCount()); |
277 | | |
278 | 1 | EXPECT_EQ(1, hist->histogram_->MinValue()); |
279 | 1 | EXPECT_EQ(50.5, hist->histogram_->MeanValue()); |
280 | 1 | EXPECT_EQ(100, hist->histogram_->MaxValue()); |
281 | 1 | EXPECT_EQ(10, hist->histogram_->ValueAtPercentile(10)); |
282 | 1 | EXPECT_EQ(25, hist->histogram_->ValueAtPercentile(25)); |
283 | 1 | EXPECT_EQ(50, hist->histogram_->ValueAtPercentile(50)); |
284 | 1 | EXPECT_EQ(75, hist->histogram_->ValueAtPercentile(75)); |
285 | 1 | EXPECT_EQ(99, hist->histogram_->ValueAtPercentile(99)); |
286 | 1 | EXPECT_EQ(100, hist->histogram_->ValueAtPercentile(99.9)); |
287 | 1 | EXPECT_EQ(100, hist->histogram_->ValueAtPercentile(100)); |
288 | | |
289 | 1 | hist->histogram_->DumpHumanReadable(&LOG(INFO)); |
290 | | // Test that the Histogram's percentiles are reset. |
291 | 1 | HistogramSnapshotPB snapshot_pb; |
292 | 1 | MetricJsonOptions options; |
293 | 1 | options.include_raw_histograms = true; |
294 | 1 | ASSERT_OK(hist->GetAndResetHistogramSnapshotPB(&snapshot_pb, options)); |
295 | 1 | hist->histogram_->DumpHumanReadable(&LOG(INFO)); |
296 | | |
297 | 1 | EXPECT_EQ(5050, hist->histogram_->TotalSum()); |
298 | 1 | EXPECT_EQ(100, hist->histogram_->TotalCount()); |
299 | 1 | EXPECT_EQ(0, hist->histogram_->CurrentSum()); |
300 | 1 | EXPECT_EQ(0, hist->histogram_->CurrentCount()); |
301 | | |
302 | 1 | EXPECT_EQ(0, hist->histogram_->MinValue()); |
303 | 1 | EXPECT_EQ(0, hist->histogram_->MeanValue()); |
304 | 1 | EXPECT_EQ(0, hist->histogram_->MaxValue()); |
305 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(10)); |
306 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(25)); |
307 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(50)); |
308 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(75)); |
309 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(99)); |
310 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(99.9)); |
311 | 1 | EXPECT_EQ(0, hist->histogram_->ValueAtPercentile(100)); |
312 | 1 | } |
313 | | |
314 | 1 | TEST_F(MetricsTest, JsonPrintTest) { |
315 | 1 | scoped_refptr<Counter> bytes_seen = METRIC_reqs_pending.Instantiate(entity_); |
316 | 1 | bytes_seen->Increment(); |
317 | 1 | entity_->SetAttribute("test_attr", "attr_val"); |
318 | | |
319 | | // Generate the JSON. |
320 | 1 | std::stringstream out; |
321 | 1 | JsonWriter writer(&out, JsonWriter::PRETTY); |
322 | 1 | ASSERT_OK(entity_->WriteAsJson(&writer, { "*" }, MetricJsonOptions())); |
323 | | |
324 | | // Now parse it back out. |
325 | 1 | JsonReader reader(out.str()); |
326 | 1 | ASSERT_OK(reader.Init()); |
327 | | |
328 | 1 | vector<const rapidjson::Value*> metrics; |
329 | 1 | ASSERT_OK(reader.ExtractObjectArray(reader.root(), "metrics", &metrics)); |
330 | 1 | ASSERT_EQ(1, metrics.size()); |
331 | 1 | string metric_name; |
332 | 1 | ASSERT_OK(reader.ExtractString(metrics[0], "name", &metric_name)); |
333 | 1 | ASSERT_EQ("reqs_pending", metric_name); |
334 | 1 | int64_t metric_value; |
335 | 1 | ASSERT_OK(reader.ExtractInt64(metrics[0], "value", &metric_value)); |
336 | 1 | ASSERT_EQ(1L, metric_value); |
337 | | |
338 | 1 | const rapidjson::Value* attributes; |
339 | 1 | ASSERT_OK(reader.ExtractObject(reader.root(), "attributes", &attributes)); |
340 | 1 | string attr_value; |
341 | 1 | ASSERT_OK(reader.ExtractString(attributes, "test_attr", &attr_value)); |
342 | 1 | ASSERT_EQ("attr_val", attr_value); |
343 | | |
344 | | // Verify that, if we filter for a metric that isn't in this entity, we get no result. |
345 | 1 | out.str(""); |
346 | 1 | ASSERT_OK(entity_->WriteAsJson(&writer, { "not_a_matching_metric" }, MetricJsonOptions())); |
347 | 1 | ASSERT_EQ("", out.str()); |
348 | 1 | } |
349 | | |
350 | | // Test that metrics are retired when they are no longer referenced. |
351 | 1 | TEST_F(MetricsTest, RetirementTest) { |
352 | 1 | FLAGS_metrics_retirement_age_ms = 100; |
353 | | |
354 | 1 | const string kMetricName = "foo"; |
355 | 1 | scoped_refptr<Counter> counter = METRIC_reqs_pending.Instantiate(entity_); |
356 | 1 | ASSERT_EQ(1, entity_->UnsafeMetricsMapForTests().size()); |
357 | | |
358 | | // Since we hold a reference to the counter, it should not get retired. |
359 | 1 | entity_->RetireOldMetrics(); |
360 | 1 | ASSERT_EQ(1, entity_->UnsafeMetricsMapForTests().size()); |
361 | | |
362 | | // When we de-ref it, it should not get immediately retired, either, because |
363 | | // we keep retirable metrics around for some amount of time. We try retiring |
364 | | // a number of times to hit all the cases. |
365 | 1 | counter = nullptr; |
366 | 4 | for (int i = 0; i < 3; i++) { |
367 | 3 | entity_->RetireOldMetrics(); |
368 | 3 | ASSERT_EQ(1, entity_->UnsafeMetricsMapForTests().size()); |
369 | 3 | } |
370 | | |
371 | | // If we wait for longer than the retirement time, and call retire again, we'll |
372 | | // actually retire it. |
373 | 1 | SleepFor(MonoDelta::FromMilliseconds(FLAGS_metrics_retirement_age_ms * 1.5)); |
374 | 1 | entity_->RetireOldMetrics(); |
375 | 1 | ASSERT_EQ(0, entity_->UnsafeMetricsMapForTests().size()); |
376 | 1 | } |
377 | | |
378 | 1 | TEST_F(MetricsTest, TestRetiringEntities) { |
379 | 1 | ASSERT_EQ(1, registry_.num_entities()); |
380 | | |
381 | | // Drop the reference to our entity. |
382 | 1 | entity_.reset(); |
383 | | |
384 | | // Retire metrics. Since there is nothing inside our entity, it should |
385 | | // retire immediately (no need to loop). |
386 | 1 | registry_.RetireOldMetrics(); |
387 | | |
388 | 1 | ASSERT_EQ(0, registry_.num_entities()); |
389 | 1 | } |
390 | | |
391 | | // Test that we can mark a metric to never be retired. |
392 | 1 | TEST_F(MetricsTest, NeverRetireTest) { |
393 | 1 | entity_->NeverRetire(METRIC_test_hist.Instantiate(entity_)); |
394 | 1 | FLAGS_metrics_retirement_age_ms = 0; |
395 | | |
396 | 4 | for (int i = 0; i < 3; i++) { |
397 | 3 | entity_->RetireOldMetrics(); |
398 | 3 | ASSERT_EQ(1, entity_->UnsafeMetricsMapForTests().size()); |
399 | 3 | } |
400 | 1 | } |
401 | | |
402 | 1 | TEST_F(MetricsTest, TestInstantiatingTwice) { |
403 | | // Test that re-instantiating the same entity ID returns the same object. |
404 | 1 | scoped_refptr<MetricEntity> new_entity = METRIC_ENTITY_test_entity.Instantiate( |
405 | 1 | ®istry_, entity_->id()); |
406 | 1 | ASSERT_EQ(new_entity.get(), entity_.get()); |
407 | 1 | } |
408 | | |
409 | 1 | TEST_F(MetricsTest, TestInstantiatingDifferentEntities) { |
410 | 1 | scoped_refptr<MetricEntity> new_entity = METRIC_ENTITY_test_entity.Instantiate( |
411 | 1 | ®istry_, "some other ID"); |
412 | 1 | ASSERT_NE(new_entity.get(), entity_.get()); |
413 | 1 | } |
414 | | |
415 | 1 | TEST_F(MetricsTest, TestDumpJsonPrototypes) { |
416 | | // Dump the prototype info. |
417 | 1 | std::stringstream out; |
418 | 1 | JsonWriter w(&out, JsonWriter::PRETTY); |
419 | 1 | WriteRegistryAsJson(&w); |
420 | 1 | string json = out.str(); |
421 | | |
422 | | // Quick sanity check for one of our metrics defined in this file. |
423 | 1 | const char* expected = |
424 | 1 | " {\n" |
425 | 1 | " \"name\": \"test_func_gauge\",\n" |
426 | 1 | " \"label\": \"Test Gauge\",\n" |
427 | 1 | " \"type\": \"gauge\",\n" |
428 | 1 | " \"unit\": \"bytes\",\n" |
429 | 1 | " \"description\": \"Test Gauge 2\",\n" |
430 | 1 | " \"level\": \"info\",\n" |
431 | 1 | " \"entity_type\": \"test_entity\"\n" |
432 | 1 | " }"; |
433 | 1 | ASSERT_STR_CONTAINS(json, expected); |
434 | | |
435 | | // Parse it. |
436 | 1 | rapidjson::Document d; |
437 | 1 | d.Parse<0>(json.c_str()); |
438 | | |
439 | | // Ensure that we got a reasonable number of metrics. |
440 | 1 | int num_metrics = d["metrics"].Size(); |
441 | 1 | int num_entities = d["entities"].Size(); |
442 | 1 | LOG(INFO) << "Parsed " << num_metrics << " metrics and " << num_entities << " entities"; |
443 | 1 | ASSERT_GT(num_metrics, 5); |
444 | 1 | ASSERT_EQ(num_entities, 2); |
445 | | |
446 | | // Spot-check that some metrics were properly registered and that the JSON was properly |
447 | | // formed. |
448 | 1 | unordered_set<string> seen_metrics; |
449 | 36 | for (rapidjson::SizeType i = 0; i < d["metrics"].Size(); i++) { |
450 | 35 | InsertOrDie(&seen_metrics, d["metrics"][i]["name"].GetString()); |
451 | 35 | } |
452 | 1 | ASSERT_TRUE(ContainsKey(seen_metrics, "threads_started")); |
453 | 1 | ASSERT_TRUE(ContainsKey(seen_metrics, "test_hist")); |
454 | 1 | } |
455 | | |
456 | | // A basic test to verify PrometheusWriter member functions |
457 | 1 | TEST_F(MetricsTest, PrometheusWriter) { |
458 | 1 | static const auto LABLE_1 = "lable1"; |
459 | 1 | static const auto LABLE_1_VAL = "lable1_value"; |
460 | 1 | static const auto TEST_METRIC_NAME = "test_metric_name"; |
461 | 1 | static const int ONCE = 1; |
462 | | |
463 | 1 | std::stringstream output; |
464 | 1 | PrometheusWriter writer(&output); |
465 | | |
466 | 1 | MetricEntity::AttributeMap attr; |
467 | 1 | attr[LABLE_1] = LABLE_1_VAL; |
468 | | |
469 | 1 | ASSERT_OK(writer.WriteSingleEntryNonTable(attr, TEST_METRIC_NAME, 1u)); |
470 | 1 | std::ostringstream expected; |
471 | 1 | expected << TEST_METRIC_NAME << "{" << LABLE_1 << "=\"" << LABLE_1_VAL << "\"} " << ONCE; |
472 | 1 | auto pw_output = dumpPrometheusWriterOutput(writer); |
473 | | |
474 | 1 | ASSERT_STR_CONTAINS(pw_output, expected.str()); |
475 | | |
476 | 1 | attr["table_id"] = "table_1"; |
477 | 1 | ASSERT_NOK(writer.WriteSingleEntryNonTable(attr, TEST_METRIC_NAME, 1u)); |
478 | 1 | } |
479 | | |
480 | | } // namespace yb |