diff --git a/CHANGELOG.md b/CHANGELOG.md
index 60244311072..0e0146d8bf0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -26,6 +26,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#5076](https://github.com/open-telemetry/opentelemetry-python/pull/5076))
- `opentelemetry-semantic-conventions`: use `X | Y` union annotation
([#5096](https://github.com/open-telemetry/opentelemetry-python/pull/5096))
+- `opentelemetry-exporter-prometheus`: add support for Resource attributes configuration for Prometheus exporter
+ ([#5122](https://github.com/open-telemetry/opentelemetry-python/pull/5122))
## Version 1.41.0/0.62b0 (2026-04-09)
diff --git a/docs/exporter/prometheus/prometheus.rst b/docs/exporter/prometheus/prometheus.rst
index d7a46793312..257ffac02f4 100644
--- a/docs/exporter/prometheus/prometheus.rst
+++ b/docs/exporter/prometheus/prometheus.rst
@@ -16,7 +16,7 @@ The OpenTelemetry Prometheus Exporter package is available on PyPI::
Usage
-----
-The Prometheus exporter starts an HTTP server that collects metrics and serializes them to
+The Prometheus exporter starts an HTTP server that collects metrics and serializes them to
Prometheus text format on request::
from prometheus_client import start_http_server
@@ -39,6 +39,47 @@ Prometheus text format on request::
provider = MeterProvider(resource=resource, metric_readers=[reader])
metrics.set_meter_provider(provider)
+Resource attributes
+-------------------
+
+By default, resource attributes are exported on the ``target_info`` metric. To
+also add selected resource attributes as Prometheus labels on every exported
+metric, pass a ``resource_attr_filter`` callback to ``PrometheusMetricReader``.
+The callback receives the original resource attribute key and returns ``True``
+for attributes that should be copied to metric labels::
+
+ from prometheus_client import start_http_server
+
+ from opentelemetry import metrics
+ from opentelemetry.exporter.prometheus import PrometheusMetricReader
+ from opentelemetry.sdk.metrics import MeterProvider
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+
+ resource = Resource.create(
+ attributes={
+ SERVICE_NAME: "checkout-service",
+ "service.namespace": "shop",
+ "deployment.environment": "production",
+ }
+ )
+
+ start_http_server(port=9464, addr="localhost")
+ included_resource_attrs = {SERVICE_NAME, "service.namespace"}
+ reader = PrometheusMetricReader(
+ resource_attr_filter=lambda key: key in included_resource_attrs
+ )
+ provider = MeterProvider(resource=resource, metric_readers=[reader])
+ metrics.set_meter_provider(provider)
+
+ meter = metrics.get_meter(__name__)
+ counter = meter.create_counter("orders")
+ counter.add(1)
+
+The exported metric includes ``service_name="checkout-service"`` and
+``service_namespace="shop"`` labels. Resource attribute keys are sanitized to
+valid Prometheus label names, and metric attributes with the same sanitized name
+take precedence over copied resource attributes.
+
Configuration
-------------
@@ -56,4 +97,4 @@ References
----------
* `Prometheus `_
-* `OpenTelemetry Project `_
\ No newline at end of file
+* `OpenTelemetry Project `_
diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py
index 608d8f6d302..cfc8d8bf397 100644
--- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py
+++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py
@@ -67,7 +67,7 @@
from json import dumps
from logging import getLogger
from os import environ
-from typing import Deque, Dict, Iterable, Sequence, Tuple, Union
+from typing import Callable, Deque, Dict, Iterable, Sequence, Tuple, Union
from prometheus_client import start_http_server
from prometheus_client.core import (
@@ -132,10 +132,22 @@ def _convert_buckets(
class PrometheusMetricReader(MetricReader):
- """Prometheus metric exporter for OpenTelemetry."""
+ """Prometheus metric exporter for OpenTelemetry.
+
+ Args:
+ disable_target_info: Whether to disable the ``target_info`` metric.
+ prefix: Prefix added to exported Prometheus metric names.
+ resource_attr_filter: Optional callback to select resource attributes
+ that are copied as labels on exported metrics. The callback receives
+ the original resource attribute key. Selected keys are sanitized to
+ valid Prometheus label names.
+ """
def __init__(
- self, disable_target_info: bool = False, prefix: str = ""
+ self,
+ disable_target_info: bool = False,
+ prefix: str = "",
+ resource_attr_filter: Callable[[str], bool] | None = None,
) -> None:
super().__init__(
preferred_temporality={
@@ -149,7 +161,9 @@ def __init__(
otel_component_type=OtelComponentTypeValues.PROMETHEUS_HTTP_TEXT_METRIC_EXPORTER,
)
self._collector = _CustomCollector(
- disable_target_info=disable_target_info, prefix=prefix
+ disable_target_info=disable_target_info,
+ prefix=prefix,
+ resource_attr_filter=resource_attr_filter,
)
REGISTRY.register(self._collector)
self._collector._callback = self.collect
@@ -176,12 +190,18 @@ class _CustomCollector:
https://github.com/prometheus/client_python#custom-collectors
"""
- def __init__(self, disable_target_info: bool = False, prefix: str = ""):
+ def __init__(
+ self,
+ disable_target_info: bool = False,
+ prefix: str = "",
+ resource_attr_filter: Callable[[str], bool] | None = None,
+ ):
self._callback = None
self._metrics_datas: Deque[MetricsData] = deque()
self._disable_target_info = disable_target_info
self._target_info = None
self._prefix = prefix
+ self._resource_attr_filter = resource_attr_filter
def add_metrics_data(self, metrics_data: MetricsData) -> None:
"""Add metrics to Prometheus data"""
@@ -226,161 +246,182 @@ def _translate_to_prometheus(
metrics_data: MetricsData,
metric_family_id_metric_family: Dict[str, PrometheusMetric],
):
- metrics = []
-
for resource_metrics in metrics_data.resource_metrics:
+ resource_attrs = (
+ {
+ key: value
+ for key, value in resource_metrics.resource.attributes.items()
+ if self._resource_attr_filter(key)
+ }
+ if self._resource_attr_filter is not None
+ else {}
+ )
+
for scope_metrics in resource_metrics.scope_metrics:
for metric in scope_metrics.metrics:
- metrics.append(metric)
-
- for metric in metrics:
- label_values_data_points = []
- values = []
-
- metric_name = metric.name
- if self._prefix:
- metric_name = self._prefix + "_" + metric_name
- metric_name = sanitize_full_name(metric_name)
- metric_description = metric.description or ""
- metric_unit = map_unit(metric.unit)
-
- # First pass: collect all unique label keys across all data points
- all_label_keys_set = set()
- data_point_attributes = []
- for number_data_point in metric.data.data_points:
- attrs = {}
- for key, value in number_data_point.attributes.items():
- sanitized_key = sanitize_attribute(key)
- all_label_keys_set.add(sanitized_key)
- attrs[sanitized_key] = self._check_value(value)
- data_point_attributes.append(attrs)
-
- if isinstance(number_data_point, HistogramDataPoint):
- values.append(
- {
- "bucket_counts": number_data_point.bucket_counts,
- "explicit_bounds": (
- number_data_point.explicit_bounds
- ),
- "sum": number_data_point.sum,
- }
+ label_values_data_points = []
+ values = []
+
+ metric_name = metric.name
+ if self._prefix:
+ metric_name = self._prefix + "_" + metric_name
+ metric_name = sanitize_full_name(metric_name)
+ metric_description = metric.description or ""
+ metric_unit = map_unit(metric.unit)
+
+ # First pass: collect all unique label keys across all data points
+ all_label_keys_set = set()
+ data_point_attributes = []
+ for number_data_point in metric.data.data_points:
+ attrs = {}
+ for key, value in chain(
+ resource_attrs.items(),
+ number_data_point.attributes.items(),
+ ):
+ sanitized_key = sanitize_attribute(key)
+ all_label_keys_set.add(sanitized_key)
+ attrs[sanitized_key] = self._check_value(value)
+ data_point_attributes.append(attrs)
+
+ if isinstance(number_data_point, HistogramDataPoint):
+ values.append(
+ {
+ "bucket_counts": number_data_point.bucket_counts,
+ "explicit_bounds": (
+ number_data_point.explicit_bounds
+ ),
+ "sum": number_data_point.sum,
+ }
+ )
+ else:
+ values.append(number_data_point.value)
+
+ # Sort label keys for consistent ordering
+ all_label_keys = sorted(all_label_keys_set)
+
+ # Second pass: build label values with empty strings for missing labels
+ for attrs in data_point_attributes:
+ label_values = []
+ for key in all_label_keys:
+ label_values.append(attrs.get(key, ""))
+ label_values_data_points.append(label_values)
+
+ # Create metric family ID without label keys
+ per_metric_family_id = "|".join(
+ [
+ metric_name,
+ metric_description,
+ metric_unit,
+ ]
)
- else:
- values.append(number_data_point.value)
-
- # Sort label keys for consistent ordering
- all_label_keys = sorted(all_label_keys_set)
-
- # Second pass: build label values with empty strings for missing labels
- for attrs in data_point_attributes:
- label_values = []
- for key in all_label_keys:
- label_values.append(attrs.get(key, ""))
- label_values_data_points.append(label_values)
-
- # Create metric family ID without label keys
- per_metric_family_id = "|".join(
- [
- metric_name,
- metric_description,
- metric_unit,
- ]
- )
- is_non_monotonic_sum = (
- isinstance(metric.data, Sum)
- and metric.data.is_monotonic is False
- )
- is_cumulative = (
- isinstance(metric.data, Sum)
- and metric.data.aggregation_temporality
- == AggregationTemporality.CUMULATIVE
- )
+ is_non_monotonic_sum = (
+ isinstance(metric.data, Sum)
+ and metric.data.is_monotonic is False
+ )
+ is_cumulative = (
+ isinstance(metric.data, Sum)
+ and metric.data.aggregation_temporality
+ == AggregationTemporality.CUMULATIVE
+ )
- # The prometheus compatibility spec for sums says: If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge.
- should_convert_sum_to_gauge = (
- is_non_monotonic_sum and is_cumulative
- )
+ # The prometheus compatibility spec for sums says: If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge.
+ should_convert_sum_to_gauge = (
+ is_non_monotonic_sum and is_cumulative
+ )
- if (
- isinstance(metric.data, Sum)
- and not should_convert_sum_to_gauge
- ):
- metric_family_id = "|".join(
- [per_metric_family_id, CounterMetricFamily.__name__]
- )
+ if (
+ isinstance(metric.data, Sum)
+ and not should_convert_sum_to_gauge
+ ):
+ metric_family_id = "|".join(
+ [
+ per_metric_family_id,
+ CounterMetricFamily.__name__,
+ ]
+ )
- if metric_family_id not in metric_family_id_metric_family:
- metric_family_id_metric_family[metric_family_id] = (
- CounterMetricFamily(
- name=metric_name,
- documentation=metric_description,
- labels=all_label_keys,
- unit=metric_unit,
+ if (
+ metric_family_id
+ not in metric_family_id_metric_family
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ] = CounterMetricFamily(
+ name=metric_name,
+ documentation=metric_description,
+ labels=all_label_keys,
+ unit=metric_unit,
+ )
+ for label_values, value in zip(
+ label_values_data_points, values
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ].add_metric(labels=label_values, value=value)
+ elif (
+ isinstance(metric.data, Gauge)
+ or should_convert_sum_to_gauge
+ ):
+ metric_family_id = "|".join(
+ [per_metric_family_id, GaugeMetricFamily.__name__]
)
- )
- for label_values, value in zip(
- label_values_data_points, values
- ):
- metric_family_id_metric_family[
- metric_family_id
- ].add_metric(labels=label_values, value=value)
- elif isinstance(metric.data, Gauge) or should_convert_sum_to_gauge:
- metric_family_id = "|".join(
- [per_metric_family_id, GaugeMetricFamily.__name__]
- )
- if (
- metric_family_id
- not in metric_family_id_metric_family.keys()
- ):
- metric_family_id_metric_family[metric_family_id] = (
- GaugeMetricFamily(
- name=metric_name,
- documentation=metric_description,
- labels=all_label_keys,
- unit=metric_unit,
+ if (
+ metric_family_id
+ not in metric_family_id_metric_family.keys()
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ] = GaugeMetricFamily(
+ name=metric_name,
+ documentation=metric_description,
+ labels=all_label_keys,
+ unit=metric_unit,
+ )
+ for label_values, value in zip(
+ label_values_data_points, values
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ].add_metric(labels=label_values, value=value)
+ elif isinstance(metric.data, Histogram):
+ metric_family_id = "|".join(
+ [
+ per_metric_family_id,
+ HistogramMetricFamily.__name__,
+ ]
)
- )
- for label_values, value in zip(
- label_values_data_points, values
- ):
- metric_family_id_metric_family[
- metric_family_id
- ].add_metric(labels=label_values, value=value)
- elif isinstance(metric.data, Histogram):
- metric_family_id = "|".join(
- [per_metric_family_id, HistogramMetricFamily.__name__]
- )
- if (
- metric_family_id
- not in metric_family_id_metric_family.keys()
- ):
- metric_family_id_metric_family[metric_family_id] = (
- HistogramMetricFamily(
- name=metric_name,
- documentation=metric_description,
- labels=all_label_keys,
- unit=metric_unit,
+ if (
+ metric_family_id
+ not in metric_family_id_metric_family.keys()
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ] = HistogramMetricFamily(
+ name=metric_name,
+ documentation=metric_description,
+ labels=all_label_keys,
+ unit=metric_unit,
+ )
+ for label_values, value in zip(
+ label_values_data_points, values
+ ):
+ metric_family_id_metric_family[
+ metric_family_id
+ ].add_metric(
+ labels=label_values,
+ buckets=_convert_buckets(
+ value["bucket_counts"],
+ value["explicit_bounds"],
+ ),
+ sum_value=value["sum"],
+ )
+ else:
+ _logger.warning(
+ "Unsupported metric data. %s", type(metric.data)
)
- )
- for label_values, value in zip(
- label_values_data_points, values
- ):
- metric_family_id_metric_family[
- metric_family_id
- ].add_metric(
- labels=label_values,
- buckets=_convert_buckets(
- value["bucket_counts"], value["explicit_bounds"]
- ),
- sum_value=value["sum"],
- )
- else:
- _logger.warning(
- "Unsupported metric data. %s", type(metric.data)
- )
# pylint: disable=no-self-use
def _check_value(self, value: Union[int, float, str, Sequence]) -> str:
diff --git a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py
index 26770c9e1f4..bc9c3286bea 100644
--- a/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py
+++ b/exporter/opentelemetry-exporter-prometheus/tests/test_prometheus_exporter.py
@@ -47,6 +47,7 @@
)
+# pylint: disable=too-many-public-methods
class TestPrometheusMetricReader(TestCase):
def setUp(self):
self._mock_registry_register = Mock()
@@ -719,3 +720,112 @@ def test_multiple_data_points_with_different_label_sets(self):
"""
),
)
+
+ # pylint: disable=too-many-locals
+ def test_resource_attr_filter_basic(self):
+ cases = [
+ (
+ "single key included",
+ lambda k: k == "os",
+ {"os": "Unix"},
+ {"version", "service_name", "service_version", "host"},
+ ),
+ (
+ "all keys included",
+ lambda k: True,
+ {
+ "os": "Unix",
+ "version": "1.2.3",
+ "service_name": "myapp",
+ "service_version": "1.0",
+ "host": "localhost",
+ },
+ set(),
+ ),
+ (
+ "no keys included",
+ lambda k: False,
+ {},
+ {"os", "version", "service_name", "service_version", "host"},
+ ),
+ (
+ "prefix match",
+ lambda k: k.startswith("service."),
+ {"service_name": "myapp", "service_version": "1.0"},
+ {"os", "version", "host"},
+ ),
+ ]
+ for name, resource_attr_filter, expected_in, expected_not_in in cases:
+ with self.subTest(name):
+ metric_reader = PrometheusMetricReader(
+ resource_attr_filter=resource_attr_filter
+ )
+ provider = MeterProvider(
+ metric_readers=[metric_reader],
+ resource=Resource(
+ {
+ "os": "Unix",
+ "version": "1.2.3",
+ "service.name": "myapp",
+ "service.version": "1.0",
+ "host": "localhost",
+ }
+ ),
+ )
+ meter = provider.get_meter("test")
+ counter = meter.create_counter("counter")
+ counter.add(1)
+ result = list(metric_reader._collector.collect())
+
+ # target info always has all resource attrs regardless of filter
+ target_info = result[0]
+ self.assertEqual(type(target_info), InfoMetricFamily)
+ for attr in (
+ "os",
+ "version",
+ "service_name",
+ "service_version",
+ "host",
+ ):
+ self.assertIn(attr, target_info.samples[0].labels)
+
+ counter_metric = result[1]
+ for label, value in expected_in.items():
+ self.assertEqual(
+ counter_metric.samples[0].labels[label], value
+ )
+ for label in expected_not_in:
+ self.assertNotIn(label, counter_metric.samples[0].labels)
+
+ def test_resource_attr_filter_disabled_by_default(self):
+ metric_reader = PrometheusMetricReader()
+ provider = MeterProvider(
+ metric_readers=[metric_reader],
+ resource=Resource({"os": "Unix", "version": "1.2.3"}),
+ )
+ meter = provider.get_meter("test")
+ counter = meter.create_counter("counter")
+ counter.add(1)
+ result = list(metric_reader._collector.collect())
+
+ counter_metric = result[1]
+ self.assertNotIn("os", counter_metric.samples[0].labels)
+ self.assertNotIn("version", counter_metric.samples[0].labels)
+
+ def test_resource_attr_filter_metric_attr_takes_precedence(self):
+ metric_reader = PrometheusMetricReader(
+ resource_attr_filter=lambda k: k == "service.name"
+ )
+ provider = MeterProvider(
+ metric_readers=[metric_reader],
+ resource=Resource({"service.name": "from-resource"}),
+ )
+ meter = provider.get_meter("test")
+ counter = meter.create_counter("counter")
+ counter.add(1, {"service_name": "from-metric"})
+ result = list(metric_reader._collector.collect())
+
+ counter_metric = result[1]
+ self.assertEqual(
+ counter_metric.samples[0].labels["service_name"], "from-metric"
+ )