-
Notifications
You must be signed in to change notification settings - Fork 966
Add support for batching in PeriodicMetricReader #8296
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
91b087c
9534dea
70d4e15
6b97875
b2b78c3
40226ae
af9fad2
3c3bc1a
7b3feb8
0083b54
1b3ad77
5e18a7c
a5b0120
1751b57
6ea146e
c5356f6
c02c3c8
1e8c645
48575c2
e90ab8d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,2 +1,4 @@ | ||
| Comparing source compatibility of opentelemetry-sdk-metrics-1.62.0-SNAPSHOT.jar against opentelemetry-sdk-metrics-1.61.0.jar | ||
| No changes. | ||
| *** MODIFIED CLASS: PUBLIC FINAL io.opentelemetry.sdk.metrics.export.PeriodicMetricReaderBuilder (not serializable) | ||
| === CLASS FILE FORMAT VERSION: 52.0 <- 52.0 | ||
| +++ NEW METHOD: PUBLIC(+) io.opentelemetry.sdk.metrics.export.PeriodicMetricReaderBuilder setMaxExportBatchSize(int) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,262 @@ | ||
| /* | ||
| * Copyright The OpenTelemetry Authors | ||
| * SPDX-License-Identifier: Apache-2.0 | ||
| */ | ||
|
|
||
| package io.opentelemetry.sdk.metrics.export; | ||
|
|
||
| import io.opentelemetry.sdk.metrics.data.Data; | ||
| import io.opentelemetry.sdk.metrics.data.DoublePointData; | ||
| import io.opentelemetry.sdk.metrics.data.ExponentialHistogramData; | ||
| import io.opentelemetry.sdk.metrics.data.ExponentialHistogramPointData; | ||
| import io.opentelemetry.sdk.metrics.data.HistogramData; | ||
| import io.opentelemetry.sdk.metrics.data.HistogramPointData; | ||
| import io.opentelemetry.sdk.metrics.data.LongPointData; | ||
| import io.opentelemetry.sdk.metrics.data.MetricData; | ||
| import io.opentelemetry.sdk.metrics.data.PointData; | ||
| import io.opentelemetry.sdk.metrics.data.SumData; | ||
| import io.opentelemetry.sdk.metrics.data.SummaryPointData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; | ||
| import io.opentelemetry.sdk.metrics.internal.data.ImmutableSummaryData; | ||
| import java.util.ArrayList; | ||
| import java.util.Collection; | ||
| import java.util.Collections; | ||
| import java.util.List; | ||
|
|
||
| /** | ||
| * Batches metric data into multiple batches based on the maximum export batch size. This is used by | ||
| * the {@link PeriodicMetricReader} to batch metric data before exporting it. | ||
| * | ||
| * <p>This class is internal and is hence not for public use. Its APIs are unstable and can change | ||
| * at any time. | ||
| */ | ||
| class MetricExportBatcher { | ||
| private final int maxExportBatchSize; | ||
|
|
||
| /** | ||
| * Creates a new {@link MetricExportBatcher} with the given maximum export batch size. | ||
| * | ||
| * @param maxExportBatchSize The maximum number of {@link Data#getPoints()} in each export. | ||
| */ | ||
| MetricExportBatcher(int maxExportBatchSize) { | ||
| if (maxExportBatchSize <= 0) { | ||
| throw new IllegalArgumentException("maxExportBatchSize must be positive"); | ||
| } | ||
| this.maxExportBatchSize = maxExportBatchSize; | ||
| } | ||
|
|
||
| @Override | ||
| public String toString() { | ||
| return "MetricExportBatcher{maxExportBatchSize=" + maxExportBatchSize + "}"; | ||
| } | ||
|
|
||
| /** | ||
| * Batches the given metric data into multiple batches based on the maximum export batch size. | ||
| * | ||
| * @param metrics The collection of metric data objects to batch based on the number of data | ||
| * points they contain. | ||
| * @return A collection of batches of metric data. | ||
| */ | ||
| Collection<Collection<MetricData>> batchMetrics(Collection<MetricData> metrics) { | ||
| if (metrics.isEmpty()) { | ||
| return Collections.emptyList(); | ||
| } | ||
| Collection<Collection<MetricData>> preparedBatchesForExport = new ArrayList<>(); | ||
| Collection<MetricData> currentBatch = new ArrayList<>(maxExportBatchSize); | ||
|
|
||
| // Fill active batch and split overlapping metric points if needed | ||
| for (MetricData metricData : metrics) { | ||
| MetricDataSplitOperationResult splitResult = prepareExportBatches(metricData, currentBatch); | ||
| preparedBatchesForExport.addAll(splitResult.getPreparedBatches()); | ||
| currentBatch = splitResult.getLastInProgressBatch(); | ||
| } | ||
|
|
||
| // Push trailing capacity block | ||
| if (!currentBatch.isEmpty()) { | ||
| preparedBatchesForExport.add(currentBatch); | ||
| } | ||
| return Collections.unmodifiableCollection(preparedBatchesForExport); | ||
| } | ||
|
|
||
| /** | ||
| * Prepares export batches from a single metric data object. This function only operates on a | ||
| * single metric data object, fills up the current batch with as many points as possible from the | ||
| * metric data object, and then creates new metric data objects for the remaining points. | ||
| * | ||
| * @param metricData The metric data object to split. | ||
| * @param currentBatch The current batch of metric data objects. | ||
| * @return A result containing the prepared batches and the last in-progress batch. | ||
| */ | ||
| private MetricDataSplitOperationResult prepareExportBatches( | ||
| MetricData metricData, Collection<MetricData> currentBatch) { | ||
| int currentBatchPoints = 0; | ||
| for (MetricData m : currentBatch) { | ||
| currentBatchPoints += m.getData().getPoints().size(); | ||
| } | ||
| int remainingCapacityInCurrentBatch = maxExportBatchSize - currentBatchPoints; | ||
|
Comment on lines
+96
to
+100
|
||
| int totalPointsInMetricData = metricData.getData().getPoints().size(); | ||
|
|
||
| if (remainingCapacityInCurrentBatch >= totalPointsInMetricData) { | ||
| currentBatch.add(metricData); | ||
| return new MetricDataSplitOperationResult(Collections.emptyList(), currentBatch); | ||
| } else { | ||
| // Remaining capacity can't hold all points, partition existing metric data object | ||
| List<PointData> originalPointsList = new ArrayList<>(metricData.getData().getPoints()); | ||
| Collection<Collection<MetricData>> preparedBatches = new ArrayList<>(); | ||
| int currentIndex = 0; | ||
|
|
||
| while (currentIndex < totalPointsInMetricData) { | ||
| int pointsToTake = | ||
| Math.min(totalPointsInMetricData - currentIndex, remainingCapacityInCurrentBatch); | ||
|
|
||
| if (pointsToTake > 0) { | ||
| currentBatch.add( | ||
| copyMetricData(metricData, originalPointsList, currentIndex, pointsToTake)); | ||
| currentIndex += pointsToTake; | ||
| remainingCapacityInCurrentBatch -= pointsToTake; | ||
| } | ||
|
|
||
| if (remainingCapacityInCurrentBatch == 0) { | ||
| preparedBatches.add(currentBatch); | ||
| currentBatch = new ArrayList<>(maxExportBatchSize); | ||
| remainingCapacityInCurrentBatch = maxExportBatchSize; | ||
| } | ||
|
psx95 marked this conversation as resolved.
|
||
| } | ||
| return new MetricDataSplitOperationResult(preparedBatches, currentBatch); | ||
| } | ||
| } | ||
|
|
||
| private static MetricData copyMetricData( | ||
| MetricData original, | ||
| List<PointData> originalPointsList, | ||
| int dataPointsOffset, | ||
| int dataPointsToTake) { | ||
| List<PointData> points = | ||
| Collections.unmodifiableList( | ||
| new ArrayList<>( | ||
| originalPointsList.subList(dataPointsOffset, dataPointsOffset + dataPointsToTake))); | ||
| return createMetricDataWithPoints(original, points); | ||
| } | ||
|
|
||
| /** | ||
| * Creates a new MetricData with the given points. | ||
| * | ||
| * @param original The original MetricData. | ||
| * @param points The points to use for the new MetricData. | ||
| * @return A new MetricData with the given points. | ||
| */ | ||
| @SuppressWarnings("unchecked") | ||
| private static MetricData createMetricDataWithPoints( | ||
| MetricData original, Collection<PointData> points) { | ||
| switch (original.getType()) { | ||
| case DOUBLE_GAUGE: | ||
| return ImmutableMetricData.createDoubleGauge( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableGaugeData.create((Collection<DoublePointData>) (Collection<?>) points)); | ||
| case LONG_GAUGE: | ||
| return ImmutableMetricData.createLongGauge( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableGaugeData.create((Collection<LongPointData>) (Collection<?>) points)); | ||
| case DOUBLE_SUM: | ||
| SumData<DoublePointData> doubleSumData = original.getDoubleSumData(); | ||
| return ImmutableMetricData.createDoubleSum( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableSumData.create( | ||
| doubleSumData.isMonotonic(), | ||
| doubleSumData.getAggregationTemporality(), | ||
| (Collection<DoublePointData>) (Collection<?>) points)); | ||
| case LONG_SUM: | ||
| SumData<LongPointData> longSumData = original.getLongSumData(); | ||
| return ImmutableMetricData.createLongSum( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableSumData.create( | ||
| longSumData.isMonotonic(), | ||
| longSumData.getAggregationTemporality(), | ||
| (Collection<LongPointData>) (Collection<?>) points)); | ||
| case HISTOGRAM: | ||
| HistogramData histogramData = original.getHistogramData(); | ||
| return ImmutableMetricData.createDoubleHistogram( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableHistogramData.create( | ||
| histogramData.getAggregationTemporality(), | ||
| (Collection<HistogramPointData>) (Collection<?>) points)); | ||
| case EXPONENTIAL_HISTOGRAM: | ||
| ExponentialHistogramData expHistogramData = original.getExponentialHistogramData(); | ||
| return ImmutableMetricData.createExponentialHistogram( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableExponentialHistogramData.create( | ||
| expHistogramData.getAggregationTemporality(), | ||
| (Collection<ExponentialHistogramPointData>) (Collection<?>) points)); | ||
| case SUMMARY: | ||
| return ImmutableMetricData.createDoubleSummary( | ||
| original.getResource(), | ||
| original.getInstrumentationScopeInfo(), | ||
| original.getName(), | ||
| original.getDescription(), | ||
| original.getUnit(), | ||
| ImmutableSummaryData.create((Collection<SummaryPointData>) (Collection<?>) points)); | ||
| } | ||
| throw new UnsupportedOperationException("Unsupported metric type: " + original.getType()); | ||
| } | ||
|
|
||
| /** | ||
| * A data class to store the result of a split operation performed on a single {@link MetricData} | ||
| * object. | ||
| */ | ||
| private static class MetricDataSplitOperationResult { | ||
| private final Collection<Collection<MetricData>> preparedBatches; | ||
| private final Collection<MetricData> lastInProgressBatch; | ||
|
|
||
| /** | ||
| * Creates a new MetricDataSplitOperationResult. | ||
| * | ||
| * @param preparedBatches The collection of prepared batches of metric data for export. Each | ||
| * batch of {@link MetricData} objects is guaranteed to have at most {@link | ||
| * #maxExportBatchSize} points. | ||
| * @param lastInProgressBatch The last batch that is still in progress. This batch may have less | ||
| * than {@link #maxExportBatchSize} points. | ||
| */ | ||
| MetricDataSplitOperationResult( | ||
| Collection<Collection<MetricData>> preparedBatches, | ||
| Collection<MetricData> lastInProgressBatch) { | ||
| this.preparedBatches = preparedBatches; | ||
| this.lastInProgressBatch = lastInProgressBatch; | ||
| } | ||
|
|
||
| Collection<Collection<MetricData>> getPreparedBatches() { | ||
| return preparedBatches; | ||
| } | ||
|
|
||
| Collection<MetricData> getLastInProgressBatch() { | ||
| return lastInProgressBatch; | ||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,6 +17,7 @@ | |
| import io.opentelemetry.sdk.metrics.data.AggregationTemporality; | ||
| import io.opentelemetry.sdk.metrics.data.MetricData; | ||
| import java.util.Collection; | ||
| import java.util.Iterator; | ||
| import java.util.concurrent.ScheduledExecutorService; | ||
| import java.util.concurrent.ScheduledFuture; | ||
| import java.util.concurrent.TimeUnit; | ||
|
|
@@ -51,6 +52,7 @@ public final class PeriodicMetricReader implements MetricReader { | |
| private volatile CollectionRegistration collectionRegistration = CollectionRegistration.noop(); | ||
|
|
||
| @Nullable private volatile ScheduledFuture<?> scheduledFuture; | ||
| @Nullable private final MetricExportBatcher metricsBatcher; | ||
|
|
||
| /** | ||
| * Returns a new {@link PeriodicMetricReader} which exports to the {@code exporter} once every | ||
|
|
@@ -66,10 +68,14 @@ public static PeriodicMetricReaderBuilder builder(MetricExporter exporter) { | |
| } | ||
|
|
||
| PeriodicMetricReader( | ||
| MetricExporter exporter, long intervalNanos, ScheduledExecutorService scheduler) { | ||
| MetricExporter exporter, | ||
| long intervalNanos, | ||
| ScheduledExecutorService scheduler, | ||
| @Nullable MetricExportBatcher metricsBatcher) { | ||
| this.exporter = exporter; | ||
| this.intervalNanos = intervalNanos; | ||
| this.scheduler = scheduler; | ||
| this.metricsBatcher = metricsBatcher; | ||
| this.scheduled = new Scheduled(); | ||
| } | ||
|
|
||
|
|
@@ -163,6 +169,8 @@ public String toString() { | |
| + exporter | ||
| + ", intervalNanos=" | ||
| + intervalNanos | ||
| + ", metricsBatcher=" | ||
| + metricsBatcher | ||
| + '}'; | ||
| } | ||
|
|
||
|
|
@@ -217,7 +225,47 @@ CompletableResultCode doRun() { | |
| flushResult.succeed(); | ||
| exportAvailable.set(true); | ||
| } else { | ||
| CompletableResultCode result = exporter.export(metricData); | ||
| CompletableResultCode result; | ||
| if (metricsBatcher != null) { | ||
| Collection<Collection<MetricData>> batches = metricsBatcher.batchMetrics(metricData); | ||
| CompletableResultCode sequentialResult = new CompletableResultCode(); | ||
| AtomicBoolean anyFailed = new AtomicBoolean(false); | ||
| Iterator<Collection<MetricData>> batchIterator = batches.iterator(); | ||
|
|
||
| Runnable exportNext = | ||
| new Runnable() { | ||
| @Override | ||
| public void run() { | ||
| while (batchIterator.hasNext()) { | ||
| Collection<MetricData> currentBatch = batchIterator.next(); | ||
| CompletableResultCode currentResult = exporter.export(currentBatch); | ||
| if (currentResult.isDone()) { | ||
| if (!currentResult.isSuccess()) { | ||
| anyFailed.set(true); | ||
| } | ||
| } else { | ||
| currentResult.whenComplete( | ||
| () -> { | ||
| if (!currentResult.isSuccess()) { | ||
| anyFailed.set(true); | ||
| } | ||
| this.run(); | ||
| }); | ||
| return; | ||
| } | ||
| } | ||
| if (anyFailed.get()) { | ||
| sequentialResult.fail(); | ||
| } else { | ||
| sequentialResult.succeed(); | ||
| } | ||
| } | ||
| }; | ||
| exportNext.run(); | ||
| result = sequentialResult; | ||
| } else { | ||
|
Comment on lines
+229
to
+266
|
||
| result = exporter.export(metricData); | ||
| } | ||
| result.whenComplete( | ||
| () -> { | ||
| if (!result.isSuccess()) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: i expected prepareExportBatches to take the Collection so that all of the batching logic is contained within the batcher (and we don't have to pass + update currentBatch).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
But we are not technically updating the currentBatch in this loop - the contents within the currentBatch is updated by the prepareExportBatches.
My intention behind the
prepareExportBatcheswas a function that splits a given MetricData into appropriate export batches. Since the batches need to be completely filled (except the last one), this function requires the currentBatch as well.Maybe if the function was renamed to
splitMetricDatait would make more sense?