Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 34 additions & 2 deletions profcheck/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package profcheck
import (
"errors"
"fmt"
"slices"

profiles "go.opentelemetry.io/proto/otlp/profiles/v1development"
"google.golang.org/protobuf/proto"
Expand Down Expand Up @@ -212,7 +213,17 @@ func (c ConformanceChecker) checkMappingTable(mappingTable []*profiles.Mapping,
if err := checkZeroVal(mappingTable); err != nil {
errs = errors.Join(errs, err)
}
for idx, m := range mappingTable {

type uniqMapping struct {
filenameStrIdx int32
attrIdxs []int32
memStart uint64
memLimit uint64
fileOffset uint64
}
var uniqMappings []uniqMapping

for idx, m := range mappingTable[1:] {
if err := c.checkIndex(len(dict.StringTable), m.FilenameStrindex); err != nil {
errs = errors.Join(errs, prefixErrorf(err, "[%d].filename_strindex", idx))
}
Expand All @@ -222,8 +233,29 @@ func (c ConformanceChecker) checkMappingTable(mappingTable []*profiles.Mapping,
if !(m.MemoryStart == 0 && m.MemoryLimit == 0) && !(m.MemoryStart < m.MemoryLimit) {
errs = errors.Join(errs, fmt.Errorf("[%d]: memory_start=%016x, memory_limit=%016x: must be both zero or start < limit", idx, m.MemoryStart, m.MemoryLimit))
}
if c.CheckDictionaryDuplicates {
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this style of duplicate entry check is fine for everyone, I can apply it to the other tables as well.

newMapping := uniqMapping{
filenameStrIdx: m.FilenameStrindex,
attrIdxs: m.AttributeIndices,
memStart: m.MemoryStart,
memLimit: m.MemoryLimit,
Comment thread
florianl marked this conversation as resolved.
fileOffset: m.FileOffset,
}
if slices.ContainsFunc(uniqMappings, func(e uniqMapping) bool {
slices.Sort(newMapping.attrIdxs)
slices.Sort(e.attrIdxs)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're doing a lot of redundant operations here (like iterating uniqMappings for every mapping, sorting e.attrIdxs repeatedly instead of once) and depending on number of mappings, it could get expensive.

Can't we switch uniqMappings to be a map instead? We can keep uniqMapping as the key if we switch attrIdxs to be a string of sorted indices.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can't we switch uniqMappings to be a map instead? We can keep uniqMapping as the key if we switch attrIdxs to be a string of sorted indices.

Unfortunatly it is not possible to use uniqMapping as key to a map, as uniqMapping contains a slice as a field. See https://go.dev/play/p/4-wpRA-KV3Q.

Copy link
Copy Markdown
Member

@christos68k christos68k Apr 23, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's why I mentioned switching attrIdxs to be a string of a sorted indices. Alternatively, we can sort and hash the slice like we do in the eBPF profiler reporter for location indices.

return newMapping.filenameStrIdx == e.filenameStrIdx &&
slices.Equal(newMapping.attrIdxs, e.attrIdxs) &&
Comment thread
florianl marked this conversation as resolved.
newMapping.memStart == e.memStart &&
newMapping.memLimit == e.memLimit &&
newMapping.fileOffset == e.fileOffset
}) {
errs = errors.Join(errs, fmt.Errorf("duplicate mapping at index %d: %v", idx, m))
continue
}
uniqMappings = append(uniqMappings, newMapping)
}
}
// TODO: Add optional uniqueness check.
// TODO: Add optional unreferenced entries check.
return errs
}
Expand Down
Loading