diff --git a/.gitignore b/.gitignore index 5d58886..e917c2e 100644 --- a/.gitignore +++ b/.gitignore @@ -27,5 +27,6 @@ settings.yaml __debug_bin* *.code-workspace .history/ -.sample-data/ -internal/service/ch/detector_queries_test.go +sample_data/ +e2e/local_data_test.go +e2e/clickhouse_container_test.go diff --git a/Makefile b/Makefile index cc5efef..f5bafb9 100644 --- a/Makefile +++ b/Makefile @@ -55,6 +55,23 @@ docker: dep @docker build -f ./Dockerfile . -t dimozone/$(BIN_NAME):$(VER_CUT) @docker tag dimozone/$(BIN_NAME):$(VER_CUT) dimozone/$(BIN_NAME):latest +# Build multi-arch (amd64 + arm64) and push with a random tag. Does not trigger GitHub workflows. +# Requires: docker buildx, docker login. Run from repo root. +docker-push-multiarch: + $(eval TAG := dev-$(shell openssl rand -hex 6)) + @echo "Building and pushing dimozone/$(BIN_NAME):$(TAG) (linux/amd64, linux/arm64)" + @docker buildx build --platform linux/amd64,linux/arm64 -f ./Dockerfile --push -t dimozone/$(BIN_NAME):$(TAG) . + @echo "Pushed dimozone/$(BIN_NAME):$(TAG)" + +# Same as docker-push-multiarch but using podman (manifest list + manifest push --all). +podman-push-multiarch: + $(eval TAG := dev-$(shell openssl rand -hex 6)) + $(eval IMAGE := dimozone/$(BIN_NAME):$(TAG)) + @echo "Building and pushing $(IMAGE) (linux/amd64, linux/arm64)" + @podman build --platform linux/amd64,linux/arm64 --manifest $(IMAGE) -f ./Dockerfile . + @podman manifest push --all $(IMAGE) docker://$(IMAGE) + @echo "Pushed $(IMAGE)" + gqlgen: ## Generate gqlgen code. @go tool gqlgen generate diff --git a/charts/telemetry-api/values.yaml b/charts/telemetry-api/values.yaml index 44e263b..a9d8970 100644 --- a/charts/telemetry-api/values.yaml +++ b/charts/telemetry-api/values.yaml @@ -2,7 +2,7 @@ replicaCount: 1 image: repository: dimozone/telemetry-api pullPolicy: IfNotPresent - tag: 69c2cd4 + tag: df47a59 imagePullSecrets: [] nameOverride: '' fullnameOverride: '' diff --git a/e2e/clickhouse_container_test.go b/e2e/clickhouse_container_test.go index 8e35db7..e5f8787 100644 --- a/e2e/clickhouse_container_test.go +++ b/e2e/clickhouse_container_test.go @@ -1,9 +1,15 @@ package e2e_test import ( + "bufio" "context" + "encoding/csv" + "encoding/json" "fmt" + "os" + "strconv" "testing" + "time" chconfig "github.com/DIMO-Network/clickhouse-infra/pkg/connect/config" "github.com/DIMO-Network/clickhouse-infra/pkg/container" @@ -12,6 +18,8 @@ import ( "github.com/stretchr/testify/require" ) +const loadBatchSize = 5000 + func setupClickhouseContainer(t *testing.T) *container.Container { t.Helper() ctx := context.Background() @@ -67,3 +75,99 @@ func insertEvent(t *testing.T, ch *container.Container, events []vss.Event) { err = batch.Send() require.NoError(t, err, "Failed to send batch") } + +// LoadSampleDataInto loads signal and event CSVs into the ClickHouse container. +func LoadSampleDataInto(t *testing.T, ch *container.Container, signalPath, eventPath string) { + t.Helper() + + sf, err := os.Open(signalPath) + require.NoError(t, err) + defer func() { _ = sf.Close() }() + br := bufio.NewReader(sf) + if peek, _ := br.Peek(3); len(peek) == 3 && peek[0] == 0xef && peek[1] == 0xbb && peek[2] == 0xbf { + _, _ = br.Discard(3) + } + sr := csv.NewReader(br) + sr.FieldsPerRecord = -1 + sigRows, err := sr.ReadAll() + require.NoError(t, err) + if len(sigRows) < 2 { + t.Fatalf("signal CSV has no data rows") + } + signals := make([]vss.Signal, 0, len(sigRows)-1) + for _, row := range sigRows[1:] { + if len(row) < 9 { + continue + } + tokenID, _ := strconv.ParseUint(row[0], 10, 32) + ts, _ := time.Parse("2006-01-02 15:04:05.000000", row[1]) + var loc vss.Location + if len(row[8]) > 0 { + _ = json.Unmarshal([]byte(row[8]), &loc) + } + valNum, _ := strconv.ParseFloat(row[6], 64) + signals = append(signals, vss.Signal{ + TokenID: uint32(tokenID), + Timestamp: ts, + Name: row[2], + Source: row[3], + Producer: row[4], + CloudEventID: row[5], + ValueNumber: valNum, + ValueString: row[7], + ValueLocation: loc, + }) + } + for i := 0; i < len(signals); i += loadBatchSize { + end := i + loadBatchSize + if end > len(signals) { + end = len(signals) + } + insertSignal(t, ch, signals[i:end]) + } + + ef, err := os.Open(eventPath) + require.NoError(t, err) + defer func() { _ = ef.Close() }() + erBr := bufio.NewReader(ef) + if peek, _ := erBr.Peek(3); len(peek) == 3 && peek[0] == 0xef && peek[1] == 0xbb && peek[2] == 0xbf { + _, _ = erBr.Discard(3) + } + er := csv.NewReader(erBr) + er.FieldsPerRecord = -1 + evRows, err := er.ReadAll() + require.NoError(t, err) + if len(evRows) < 2 { + t.Fatalf("event CSV has no data rows") + } + events := make([]vss.Event, 0, len(evRows)-1) + for _, row := range evRows[1:] { + if len(row) < 9 { + continue + } + ts, _ := time.Parse("2006-01-02 15:04:05.000000", row[5]) + durNs, _ := strconv.ParseUint(row[6], 10, 64) + var tags []string + if len(row[8]) > 0 { + _ = json.Unmarshal([]byte(row[8]), &tags) + } + events = append(events, vss.Event{ + Subject: row[0], + Source: row[1], + Producer: row[2], + CloudEventID: row[3], + Name: row[4], + Timestamp: ts, + DurationNs: durNs, + Metadata: row[7], + Tags: tags, + }) + } + for i := 0; i < len(events); i += loadBatchSize { + end := i + loadBatchSize + if end > len(events) { + end = len(events) + } + insertEvent(t, ch, events[i:end]) + } +} diff --git a/e2e/permission_test.go b/e2e/permission_test.go index 2dbb957..d222d77 100644 --- a/e2e/permission_test.go +++ b/e2e/permission_test.go @@ -132,7 +132,7 @@ func TestPermission(t *testing.T) { to: "2023-01-02T00:00:00Z" mechanism: ignitionDetection ) { - startTime + start { timestamp } } }`, permissions: []string{tokenclaims.PermissionGetLocationHistory, tokenclaims.PermissionGetNonLocationHistory}, @@ -147,7 +147,7 @@ func TestPermission(t *testing.T) { to: "2023-01-02T00:00:00Z" mechanism: ignitionDetection ) { - startTime + start { timestamp } } }`, permissions: []string{tokenclaims.PermissionGetNonLocationHistory}, @@ -163,7 +163,7 @@ func TestPermission(t *testing.T) { to: "2023-01-02T00:00:00Z" mechanism: ignitionDetection ) { - startTime + start { timestamp } } }`, permissions: []string{tokenclaims.PermissionGetLocationHistory}, diff --git a/e2e/segments_test.go b/e2e/segments_test.go index abbf718..43b1338 100644 --- a/e2e/segments_test.go +++ b/e2e/segments_test.go @@ -45,9 +45,9 @@ const ( testTokenID = uint32(12345) testSource = "test-source" testProducer = "test-producer" - // tripDuration must be > 150 seconds (minSegmentDurationSeconds default) - tripDuration = 180 // 3 minutes - // tripGap must be > 600 seconds (minIdleSeconds default) to ensure separate trips + // tripDuration must be >= 240s (defaultMinSegmentDurationSeconds in detectors) + tripDuration = 300 // 5 minutes + // tripGap must be > 300s (defaultMinIdleSeconds) to ensure separate trips tripGap = 720 // 12 minutes ) @@ -352,4 +352,50 @@ func TestSegmentDetectors(t *testing.T) { segments[0].StartTime.Format(time.RFC3339), fromMidTrip1.Format(time.RFC3339)) assert.False(t, segments[1].StartedBeforeRange, "Trip 2 should have StartedBeforeRange=false") }) + + // Excessive idling: insert engine speed (RPM) in idle range for a contiguous period + idleStart := baseTime.Add(48 * time.Hour) + idleDurationSec := 15 * 60 // 15 minutes + t.Run("StaticRpm", func(t *testing.T) { + idleSignals := generateIdleRpmSignals(idleStart, idleDurationSec) + insertTestSignals(t, conn, idleSignals) + + fromIdle := idleStart.Add(-1 * time.Hour) + toIdle := idleStart.Add(time.Duration(idleDurationSec)*time.Second + 1*time.Hour) + + detector := ch.NewStaticRpmDetector(conn) + segments, err := detector.DetectSegments(ctx, testTokenID, fromIdle, toIdle, nil) + require.NoError(t, err) + + require.Len(t, segments, 1, "Expected 1 static RPM (idling) segment") + seg := segments[0] + assert.False(t, seg.IsOngoing) + assert.NotNil(t, seg.EndTime) + // minSegmentDurationSeconds default is 240 (4 min); we have 15 min of idle + assert.GreaterOrEqual(t, seg.DurationSeconds, int32(240)) + t.Logf("Idling segment: %s - %v (duration: %ds)", + seg.StartTime.Format(time.RFC3339), seg.EndTime, seg.DurationSeconds) + }) +} + +// generateIdleRpmSignals creates powertrainCombustionEngineSpeed signals in idle range (e.g. 800 rpm) +// at 10s intervals for the given duration so 60s windows have enough samples and max(rpm) <= 1000. +func generateIdleRpmSignals(startTime time.Time, durationSeconds int) []testSignal { + const engineSpeedName = "powertrainCombustionEngineSpeed" + const idleRpm = 800.0 + signals := []testSignal{} + for offset := 0; offset < durationSeconds; offset += 10 { + ts := startTime.Add(time.Duration(offset) * time.Second) + signals = append(signals, testSignal{ + TokenID: testTokenID, + Timestamp: ts, + Name: engineSpeedName, + ValueNumber: idleRpm, + ValueString: "", + Source: testSource, + Producer: testProducer, + CloudEventID: fmt.Sprintf("idle-%s-%d", ts.Format("150405"), offset), + }) + } + return signals } diff --git a/internal/graph/generated.go b/internal/graph/generated.go index 4433709..a471beb 100644 --- a/internal/graph/generated.go +++ b/internal/graph/generated.go @@ -68,8 +68,19 @@ type ComplexityRoot struct { VehicleTokenID func(childComplexity int) int } + DailyActivity struct { + Date func(childComplexity int) int + Duration func(childComplexity int) int + End func(childComplexity int) int + EventCounts func(childComplexity int) int + SegmentCount func(childComplexity int) int + Signals func(childComplexity int) int + Start func(childComplexity int) int + } + DataSummary struct { AvailableSignals func(childComplexity int) int + EventDataSummary func(childComplexity int) int FirstSeen func(childComplexity int) int LastSeen func(childComplexity int) int NumberOfSignals func(childComplexity int) int @@ -88,6 +99,11 @@ type ComplexityRoot struct { Timestamp func(childComplexity int) int } + EventCount struct { + Count func(childComplexity int) int + Name func(childComplexity int) int + } + Location struct { Hdop func(childComplexity int) int Latitude func(childComplexity int) int @@ -105,24 +121,33 @@ type ComplexityRoot struct { Query struct { Attestations func(childComplexity int, tokenID *int, subject *string, filter *model.AttestationFilter) int AvailableSignals func(childComplexity int, tokenID int, filter *model.SignalFilter) int + DailyActivity func(childComplexity int, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, timezone *string) int DataSummary func(childComplexity int, tokenID int, filter *model.SignalFilter) int DeviceActivity func(childComplexity int, by model.AftermarketDeviceBy) int Events func(childComplexity int, tokenID int, from time.Time, to time.Time, filter *model.EventFilter) int PomVCLatest func(childComplexity int, tokenID int) int - Segments func(childComplexity int, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig) int + Segments func(childComplexity int, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, limit *int, after *time.Time) int Signals func(childComplexity int, tokenID int, interval string, from time.Time, to time.Time, filter *model.SignalFilter) int SignalsLatest func(childComplexity int, tokenID int, filter *model.SignalFilter) int VinVCLatest func(childComplexity int, tokenID int) int } Segment struct { - DurationSeconds func(childComplexity int) int - EndTime func(childComplexity int) int + Duration func(childComplexity int) int + End func(childComplexity int) int + EventCounts func(childComplexity int) int IsOngoing func(childComplexity int) int - StartTime func(childComplexity int) int + Signals func(childComplexity int) int + Start func(childComplexity int) int StartedBeforeRange func(childComplexity int) int } + SignalAggregationValue struct { + Agg func(childComplexity int) int + Name func(childComplexity int) int + Value func(childComplexity int) int + } + SignalAggregations struct { AngularVelocityYaw func(childComplexity int, agg model.FloatAggregation, filter *model.SignalFloatFilter) int BodyLightsIsAirbagWarningOn func(childComplexity int, agg model.FloatAggregation, filter *model.SignalFloatFilter) int @@ -392,6 +417,13 @@ type ComplexityRoot struct { Vin func(childComplexity int) int } + EventDataSummary struct { + FirstSeen func(childComplexity int) int + LastSeen func(childComplexity int) int + Name func(childComplexity int) int + NumberOfEvents func(childComplexity int) int + } + SignalDataSummary struct { FirstSeen func(childComplexity int) int LastSeen func(childComplexity int) int @@ -408,7 +440,8 @@ type QueryResolver interface { Attestations(ctx context.Context, tokenID *int, subject *string, filter *model.AttestationFilter) ([]*model.Attestation, error) DeviceActivity(ctx context.Context, by model.AftermarketDeviceBy) (*model.DeviceActivity, error) Events(ctx context.Context, tokenID int, from time.Time, to time.Time, filter *model.EventFilter) ([]*model.Event, error) - Segments(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig) ([]*model.Segment, error) + Segments(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, limit *int, after *time.Time) ([]*model.Segment, error) + DailyActivity(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, timezone *string) ([]*model.DailyActivity, error) VinVCLatest(ctx context.Context, tokenID int) (*model.Vinvc, error) PomVCLatest(ctx context.Context, tokenID int) (*model.Pomvc, error) } @@ -612,12 +645,61 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Attestation.VehicleTokenID(childComplexity), true + case "DailyActivity.date": + if e.complexity.DailyActivity.Date == nil { + break + } + + return e.complexity.DailyActivity.Date(childComplexity), true + case "DailyActivity.duration": + if e.complexity.DailyActivity.Duration == nil { + break + } + + return e.complexity.DailyActivity.Duration(childComplexity), true + case "DailyActivity.end": + if e.complexity.DailyActivity.End == nil { + break + } + + return e.complexity.DailyActivity.End(childComplexity), true + case "DailyActivity.eventCounts": + if e.complexity.DailyActivity.EventCounts == nil { + break + } + + return e.complexity.DailyActivity.EventCounts(childComplexity), true + case "DailyActivity.segmentCount": + if e.complexity.DailyActivity.SegmentCount == nil { + break + } + + return e.complexity.DailyActivity.SegmentCount(childComplexity), true + case "DailyActivity.signals": + if e.complexity.DailyActivity.Signals == nil { + break + } + + return e.complexity.DailyActivity.Signals(childComplexity), true + case "DailyActivity.start": + if e.complexity.DailyActivity.Start == nil { + break + } + + return e.complexity.DailyActivity.Start(childComplexity), true + case "DataSummary.availableSignals": if e.complexity.DataSummary.AvailableSignals == nil { break } return e.complexity.DataSummary.AvailableSignals(childComplexity), true + case "DataSummary.eventDataSummary": + if e.complexity.DataSummary.EventDataSummary == nil { + break + } + + return e.complexity.DataSummary.EventDataSummary(childComplexity), true case "DataSummary.firstSeen": if e.complexity.DataSummary.FirstSeen == nil { break @@ -681,6 +763,19 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Event.Timestamp(childComplexity), true + case "EventCount.count": + if e.complexity.EventCount.Count == nil { + break + } + + return e.complexity.EventCount.Count(childComplexity), true + case "EventCount.name": + if e.complexity.EventCount.Name == nil { + break + } + + return e.complexity.EventCount.Name(childComplexity), true + case "Location.hdop": if e.complexity.Location.Hdop == nil { break @@ -753,6 +848,17 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin } return e.complexity.Query.AvailableSignals(childComplexity, args["tokenId"].(int), args["filter"].(*model.SignalFilter)), true + case "Query.dailyActivity": + if e.complexity.Query.DailyActivity == nil { + break + } + + args, err := ec.field_Query_dailyActivity_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.DailyActivity(childComplexity, args["tokenId"].(int), args["from"].(time.Time), args["to"].(time.Time), args["mechanism"].(model.DetectionMechanism), args["config"].(*model.SegmentConfig), args["signalRequests"].([]*model.SegmentSignalRequest), args["eventRequests"].([]*model.SegmentEventRequest), args["timezone"].(*string)), true case "Query.dataSummary": if e.complexity.Query.DataSummary == nil { break @@ -807,7 +913,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return 0, false } - return e.complexity.Query.Segments(childComplexity, args["tokenId"].(int), args["from"].(time.Time), args["to"].(time.Time), args["mechanism"].(model.DetectionMechanism), args["config"].(*model.SegmentConfig)), true + return e.complexity.Query.Segments(childComplexity, args["tokenId"].(int), args["from"].(time.Time), args["to"].(time.Time), args["mechanism"].(model.DetectionMechanism), args["config"].(*model.SegmentConfig), args["signalRequests"].([]*model.SegmentSignalRequest), args["eventRequests"].([]*model.SegmentEventRequest), args["limit"].(*int), args["after"].(*time.Time)), true case "Query.signals": if e.complexity.Query.Signals == nil { break @@ -842,30 +948,42 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.VinVCLatest(childComplexity, args["tokenId"].(int)), true - case "Segment.durationSeconds": - if e.complexity.Segment.DurationSeconds == nil { + case "Segment.duration": + if e.complexity.Segment.Duration == nil { break } - return e.complexity.Segment.DurationSeconds(childComplexity), true - case "Segment.endTime": - if e.complexity.Segment.EndTime == nil { + return e.complexity.Segment.Duration(childComplexity), true + case "Segment.end": + if e.complexity.Segment.End == nil { break } - return e.complexity.Segment.EndTime(childComplexity), true + return e.complexity.Segment.End(childComplexity), true + case "Segment.eventCounts": + if e.complexity.Segment.EventCounts == nil { + break + } + + return e.complexity.Segment.EventCounts(childComplexity), true case "Segment.isOngoing": if e.complexity.Segment.IsOngoing == nil { break } return e.complexity.Segment.IsOngoing(childComplexity), true - case "Segment.startTime": - if e.complexity.Segment.StartTime == nil { + case "Segment.signals": + if e.complexity.Segment.Signals == nil { + break + } + + return e.complexity.Segment.Signals(childComplexity), true + case "Segment.start": + if e.complexity.Segment.Start == nil { break } - return e.complexity.Segment.StartTime(childComplexity), true + return e.complexity.Segment.Start(childComplexity), true case "Segment.startedBeforeRange": if e.complexity.Segment.StartedBeforeRange == nil { break @@ -873,6 +991,25 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Segment.StartedBeforeRange(childComplexity), true + case "SignalAggregationValue.agg": + if e.complexity.SignalAggregationValue.Agg == nil { + break + } + + return e.complexity.SignalAggregationValue.Agg(childComplexity), true + case "SignalAggregationValue.name": + if e.complexity.SignalAggregationValue.Name == nil { + break + } + + return e.complexity.SignalAggregationValue.Name(childComplexity), true + case "SignalAggregationValue.value": + if e.complexity.SignalAggregationValue.Value == nil { + break + } + + return e.complexity.SignalAggregationValue.Value(childComplexity), true + case "SignalAggregations.angularVelocityYaw": if e.complexity.SignalAggregations.AngularVelocityYaw == nil { break @@ -2970,6 +3107,31 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.VINVC.Vin(childComplexity), true + case "eventDataSummary.firstSeen": + if e.complexity.EventDataSummary.FirstSeen == nil { + break + } + + return e.complexity.EventDataSummary.FirstSeen(childComplexity), true + case "eventDataSummary.lastSeen": + if e.complexity.EventDataSummary.LastSeen == nil { + break + } + + return e.complexity.EventDataSummary.LastSeen(childComplexity), true + case "eventDataSummary.name": + if e.complexity.EventDataSummary.Name == nil { + break + } + + return e.complexity.EventDataSummary.Name(childComplexity), true + case "eventDataSummary.numberOfEvents": + if e.complexity.EventDataSummary.NumberOfEvents == nil { + break + } + + return e.complexity.EventDataSummary.NumberOfEvents(childComplexity), true + case "signalDataSummary.firstSeen": if e.complexity.SignalDataSummary.FirstSeen == nil { break @@ -3009,6 +3171,8 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { ec.unmarshalInputFilterLocation, ec.unmarshalInputInCircleFilter, ec.unmarshalInputSegmentConfig, + ec.unmarshalInputSegmentEventRequest, + ec.unmarshalInputSegmentSignalRequest, ec.unmarshalInputSignalFilter, ec.unmarshalInputSignalFloatFilter, ec.unmarshalInputSignalLocationFilter, @@ -3209,6 +3373,11 @@ input AttestationFilter { """ limit: Int + """ + Cursor for pagination (exclusive). + """ + cursor: Time + """ Filter attestations by tags. """ @@ -3386,6 +3555,30 @@ type DataSummary { data summary of an individual signal """ signalDataSummary: [signalDataSummary!]! + + """ + Events known to the vehicle: per-event name, count, and first/last seen. + """ + eventDataSummary: [eventDataSummary!]! +} + +type eventDataSummary { + """ + Event name + """ + name: String! + """ + Number of times this event occurred for the vehicle + """ + numberOfEvents: Uint64! + """ + First seen timestamp + """ + firstSeen: Time! + """ + Last seen timestamp + """ + lastSeen: Time! } type signalDataSummary { @@ -3477,7 +3670,7 @@ type SignalLocation { timestamp: Time! """ - value of the signal + location (latitude, longitude, hdop) at this timestamp. """ value: Location! } @@ -3512,6 +3705,24 @@ type Location { hdop: Float! } +""" +Result of aggregating a float signal over an interval. Used by segments and daily activity summaries. +Same shape as one row of aggregated signal data (name, aggregation type, computed value). +""" +type SignalAggregationValue { + name: String! + agg: String! + value: Float! +} + +""" +Event name and count. Used by segments, daily activity, and event summaries. +""" +type EventCount { + name: String! + count: Int! +} + """ Filters that apply to locations. """ @@ -3718,6 +3929,12 @@ input EventFilter { Best alternative when ignition signal is unavailable - same accuracy, same speed as frequency analysis. """ changePointDetection + + """ + Static RPM: Segments are contiguous periods where engine RPM remains in idle range. + Uses repeated windows of idle RPM (e.g. powertrainCombustionEngineSpeed <= maxIdleRpm) merged like trips. + """ + staticRpm } extend type Query { @@ -3728,10 +3945,14 @@ extend type Query { Detection mechanisms: - ignitionDetection: Uses 'isIgnitionOn' signal with configurable debouncing - frequencyAnalysis: Analyzes signal update frequency to detect activity periods - - sparseSampling: Samples 5-10% of signals for cost-effective detection + - changePointDetection: CUSUM-based regime change detection + - staticRpm: Idle RPM windows merged into segments Segment IDs are stable and consistent across queries as long as the segment start is captured in the underlying data source. + + When signalRequests and/or eventRequests are provided, each segment includes + optional signals, start/end points (end only when not ongoing), and eventCounts. """ segments( tokenId: Int! @@ -3739,7 +3960,60 @@ extend type Query { to: Time! mechanism: DetectionMechanism! config: SegmentConfig - ): [Segment!] @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) + signalRequests: [SegmentSignalRequest!] + eventRequests: [SegmentEventRequest!] + """ + Maximum number of segments to return. Default 100, max 200. + """ + limit: Int = 100 + """ + Cursor for pagination: return only segments with startTime > after (exclusive). + Pass the startTime of the last segment from the previous page for the next page. + """ + after: Time + ): [Segment!]! @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) + + """ + Returns one record per calendar day in the requested date range (activity segments only). + Mechanism must be ignitionDetection, frequencyAnalysis, or changePointDetection (staticRpm not allowed). + Maximum date range: 30 days. + """ + dailyActivity( + tokenId: Int! + from: Time! + to: Time! + mechanism: DetectionMechanism! + config: SegmentConfig + signalRequests: [SegmentSignalRequest!] + eventRequests: [SegmentEventRequest!] + timezone: String + ): [DailyActivity!]! @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) +} + +input SegmentSignalRequest { + name: String! + agg: FloatAggregation! +} + +input SegmentEventRequest { + name: String! +} + +type DailyActivity { + """Start of that calendar day (midnight in requested timezone), as UTC.""" + date: Time! + """Number of activity segments that started or fell within that day.""" + segmentCount: Int! + """Sum of segment durations (total active time that day) in seconds.""" + duration: Int! + """Start of day (timestamp = day start, value = location). Same shape as Segment.start. Null if not available.""" + start: SignalLocation + """End of day (timestamp = day end, location). Same shape as Segment.end. Null if not available.""" + end: SignalLocation + """Per-day signal aggregates (same shape as segment signals).""" + signals: [SignalAggregationValue!]! + """Per-day event counts.""" + eventCounts: [EventCount!]! } input SegmentConfig { @@ -3759,44 +4033,55 @@ input SegmentConfig { minSegmentDurationSeconds: Int = 240 """ - [frequencyAnalysis only] Minimum signal count per window for activity detection. - Higher values = more conservative (filters parked telemetry better). - Lower values = more sensitive (works for sparse signal vehicles). - Default: 10 (tuned to match ignition detection accuracy) - Min: 1, Max: 3600 + [frequencyAnalysis] Minimum signal count per window for activity detection. + [staticRpm] Minimum samples per window to consider it idle (same semantics). + Higher values = more conservative. Lower values = more sensitive. + Default: 10, Min: 1, Max: 3600 """ signalCountThreshold: Int = 10 + + """ + [staticRpm only] Upper bound for idle RPM. Windows with max(RPM) <= this are considered idle. + Default: 1500, Min: 300, Max: 3000 + """ + maxIdleRpm: Int = 1500 } type Segment { """ - Segment start timestamp (actual activity start transition) + Segment start (timestamp and location). Uses SignalLocation; always present. """ - startTime: Time! + start: SignalLocation! """ - Segment end timestamp (activity end after debounce period). - Null if segment is ongoing (extends beyond query range). + Segment end (timestamp and location). Uses SignalLocation; omitted when isOngoing is true. """ - endTime: Time + end: SignalLocation """ - Duration in seconds. - If ongoing: from start to query 'to' time. - If complete: from start to end. + Duration in seconds. If ongoing: from start to query 'to'. If complete: from start to end. """ - durationSeconds: Int! + duration: Int! """ True if segment extends beyond query time range (last activity is ongoing). + When true, end is not included in the response. """ isOngoing: Boolean! """ True if segment started before query time range. - Indicates startTime may be approximate. """ startedBeforeRange: Boolean! + + """ + Per-segment signal aggregates. Same shape as signals elsewhere (name, agg, value). + """ + signals: [SignalAggregationValue!] + """ + Per-segment event counts. + """ + eventCounts: [EventCount!] } `, BuiltIn: false}, @@ -5851,6 +6136,52 @@ func (ec *executionContext) field_Query_availableSignals_args(ctx context.Contex return args, nil } +func (ec *executionContext) field_Query_dailyActivity_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := graphql.ProcessArgField(ctx, rawArgs, "tokenId", ec.unmarshalNInt2int) + if err != nil { + return nil, err + } + args["tokenId"] = arg0 + arg1, err := graphql.ProcessArgField(ctx, rawArgs, "from", ec.unmarshalNTime2timeᚐTime) + if err != nil { + return nil, err + } + args["from"] = arg1 + arg2, err := graphql.ProcessArgField(ctx, rawArgs, "to", ec.unmarshalNTime2timeᚐTime) + if err != nil { + return nil, err + } + args["to"] = arg2 + arg3, err := graphql.ProcessArgField(ctx, rawArgs, "mechanism", ec.unmarshalNDetectionMechanism2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDetectionMechanism) + if err != nil { + return nil, err + } + args["mechanism"] = arg3 + arg4, err := graphql.ProcessArgField(ctx, rawArgs, "config", ec.unmarshalOSegmentConfig2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentConfig) + if err != nil { + return nil, err + } + args["config"] = arg4 + arg5, err := graphql.ProcessArgField(ctx, rawArgs, "signalRequests", ec.unmarshalOSegmentSignalRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentSignalRequestᚄ) + if err != nil { + return nil, err + } + args["signalRequests"] = arg5 + arg6, err := graphql.ProcessArgField(ctx, rawArgs, "eventRequests", ec.unmarshalOSegmentEventRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentEventRequestᚄ) + if err != nil { + return nil, err + } + args["eventRequests"] = arg6 + arg7, err := graphql.ProcessArgField(ctx, rawArgs, "timezone", ec.unmarshalOString2ᚖstring) + if err != nil { + return nil, err + } + args["timezone"] = arg7 + return args, nil +} + func (ec *executionContext) field_Query_dataSummary_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -5943,6 +6274,26 @@ func (ec *executionContext) field_Query_segments_args(ctx context.Context, rawAr return nil, err } args["config"] = arg4 + arg5, err := graphql.ProcessArgField(ctx, rawArgs, "signalRequests", ec.unmarshalOSegmentSignalRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentSignalRequestᚄ) + if err != nil { + return nil, err + } + args["signalRequests"] = arg5 + arg6, err := graphql.ProcessArgField(ctx, rawArgs, "eventRequests", ec.unmarshalOSegmentEventRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentEventRequestᚄ) + if err != nil { + return nil, err + } + args["eventRequests"] = arg6 + arg7, err := graphql.ProcessArgField(ctx, rawArgs, "limit", ec.unmarshalOInt2ᚖint) + if err != nil { + return nil, err + } + args["limit"] = arg7 + arg8, err := graphql.ProcessArgField(ctx, rawArgs, "after", ec.unmarshalOTime2ᚖtimeᚐTime) + if err != nil { + return nil, err + } + args["after"] = arg8 return args, nil } @@ -8168,6 +8519,235 @@ func (ec *executionContext) fieldContext_Attestation_tags(_ context.Context, fie return fc, nil } +func (ec *executionContext) _DailyActivity_date(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_date, + func(ctx context.Context) (any, error) { + return obj.Date, nil + }, + nil, + ec.marshalNTime2timeᚐTime, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_date(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_segmentCount(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_segmentCount, + func(ctx context.Context) (any, error) { + return obj.SegmentCount, nil + }, + nil, + ec.marshalNInt2int, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_segmentCount(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_duration(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_duration, + func(ctx context.Context) (any, error) { + return obj.Duration, nil + }, + nil, + ec.marshalNInt2int, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_duration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_start(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_start, + func(ctx context.Context) (any, error) { + return obj.Start, nil + }, + nil, + ec.marshalOSignalLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalLocation, + true, + false, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_start(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "timestamp": + return ec.fieldContext_SignalLocation_timestamp(ctx, field) + case "value": + return ec.fieldContext_SignalLocation_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalLocation", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_end(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_end, + func(ctx context.Context) (any, error) { + return obj.End, nil + }, + nil, + ec.marshalOSignalLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalLocation, + true, + false, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_end(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "timestamp": + return ec.fieldContext_SignalLocation_timestamp(ctx, field) + case "value": + return ec.fieldContext_SignalLocation_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalLocation", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_signals(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_signals, + func(ctx context.Context) (any, error) { + return obj.Signals, nil + }, + nil, + ec.marshalNSignalAggregationValue2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValueᚄ, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_signals(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_SignalAggregationValue_name(ctx, field) + case "agg": + return ec.fieldContext_SignalAggregationValue_agg(ctx, field) + case "value": + return ec.fieldContext_SignalAggregationValue_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalAggregationValue", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _DailyActivity_eventCounts(ctx context.Context, field graphql.CollectedField, obj *model.DailyActivity) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DailyActivity_eventCounts, + func(ctx context.Context) (any, error) { + return obj.EventCounts, nil + }, + nil, + ec.marshalNEventCount2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCountᚄ, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DailyActivity_eventCounts(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DailyActivity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_EventCount_name(ctx, field) + case "count": + return ec.fieldContext_EventCount_count(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type EventCount", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _DataSummary_numberOfSignals(ctx context.Context, field graphql.CollectedField, obj *model.DataSummary) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -8323,6 +8903,45 @@ func (ec *executionContext) fieldContext_DataSummary_signalDataSummary(_ context return fc, nil } +func (ec *executionContext) _DataSummary_eventDataSummary(ctx context.Context, field graphql.CollectedField, obj *model.DataSummary) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_DataSummary_eventDataSummary, + func(ctx context.Context) (any, error) { + return obj.EventDataSummary, nil + }, + nil, + ec.marshalNeventDataSummary2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventDataSummaryᚄ, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_DataSummary_eventDataSummary(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "DataSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_eventDataSummary_name(ctx, field) + case "numberOfEvents": + return ec.fieldContext_eventDataSummary_numberOfEvents(ctx, field) + case "firstSeen": + return ec.fieldContext_eventDataSummary_firstSeen(ctx, field) + case "lastSeen": + return ec.fieldContext_eventDataSummary_lastSeen(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type eventDataSummary", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _DeviceActivity_lastActive(ctx context.Context, field graphql.CollectedField, obj *model.DeviceActivity) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -8497,6 +9116,64 @@ func (ec *executionContext) fieldContext_Event_metadata(_ context.Context, field return fc, nil } +func (ec *executionContext) _EventCount_name(ctx context.Context, field graphql.CollectedField, obj *model.EventCount) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_EventCount_name, + func(ctx context.Context) (any, error) { + return obj.Name, nil + }, + nil, + ec.marshalNString2string, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_EventCount_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "EventCount", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _EventCount_count(ctx context.Context, field graphql.CollectedField, obj *model.EventCount) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_EventCount_count, + func(ctx context.Context) (any, error) { + return obj.Count, nil + }, + nil, + ec.marshalNInt2int, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_EventCount_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "EventCount", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Location_latitude(ctx context.Context, field graphql.CollectedField, obj *model.Location) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -9415,6 +10092,8 @@ func (ec *executionContext) fieldContext_Query_dataSummary(ctx context.Context, return ec.fieldContext_DataSummary_lastSeen(ctx, field) case "signalDataSummary": return ec.fieldContext_DataSummary_signalDataSummary(ctx, field) + case "eventDataSummary": + return ec.fieldContext_DataSummary_eventDataSummary(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type DataSummary", field.Name) }, @@ -9652,7 +10331,7 @@ func (ec *executionContext) _Query_segments(ctx context.Context, field graphql.C ec.fieldContext_Query_segments, func(ctx context.Context) (any, error) { fc := graphql.GetFieldContext(ctx) - return ec.resolvers.Query().Segments(ctx, fc.Args["tokenId"].(int), fc.Args["from"].(time.Time), fc.Args["to"].(time.Time), fc.Args["mechanism"].(model.DetectionMechanism), fc.Args["config"].(*model.SegmentConfig)) + return ec.resolvers.Query().Segments(ctx, fc.Args["tokenId"].(int), fc.Args["from"].(time.Time), fc.Args["to"].(time.Time), fc.Args["mechanism"].(model.DetectionMechanism), fc.Args["config"].(*model.SegmentConfig), fc.Args["signalRequests"].([]*model.SegmentSignalRequest), fc.Args["eventRequests"].([]*model.SegmentEventRequest), fc.Args["limit"].(*int), fc.Args["after"].(*time.Time)) }, func(ctx context.Context, next graphql.Resolver) graphql.Resolver { directive0 := next @@ -9680,9 +10359,9 @@ func (ec *executionContext) _Query_segments(ctx context.Context, field graphql.C next = directive2 return next }, - ec.marshalOSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentᚄ, + ec.marshalNSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentᚄ, + true, true, - false, ) } @@ -9694,16 +10373,20 @@ func (ec *executionContext) fieldContext_Query_segments(ctx context.Context, fie IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { - case "startTime": - return ec.fieldContext_Segment_startTime(ctx, field) - case "endTime": - return ec.fieldContext_Segment_endTime(ctx, field) - case "durationSeconds": - return ec.fieldContext_Segment_durationSeconds(ctx, field) + case "start": + return ec.fieldContext_Segment_start(ctx, field) + case "end": + return ec.fieldContext_Segment_end(ctx, field) + case "duration": + return ec.fieldContext_Segment_duration(ctx, field) case "isOngoing": return ec.fieldContext_Segment_isOngoing(ctx, field) case "startedBeforeRange": return ec.fieldContext_Segment_startedBeforeRange(ctx, field) + case "signals": + return ec.fieldContext_Segment_signals(ctx, field) + case "eventCounts": + return ec.fieldContext_Segment_eventCounts(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type Segment", field.Name) }, @@ -9722,6 +10405,88 @@ func (ec *executionContext) fieldContext_Query_segments(ctx context.Context, fie return fc, nil } +func (ec *executionContext) _Query_dailyActivity(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_Query_dailyActivity, + func(ctx context.Context) (any, error) { + fc := graphql.GetFieldContext(ctx) + return ec.resolvers.Query().DailyActivity(ctx, fc.Args["tokenId"].(int), fc.Args["from"].(time.Time), fc.Args["to"].(time.Time), fc.Args["mechanism"].(model.DetectionMechanism), fc.Args["config"].(*model.SegmentConfig), fc.Args["signalRequests"].([]*model.SegmentSignalRequest), fc.Args["eventRequests"].([]*model.SegmentEventRequest), fc.Args["timezone"].(*string)) + }, + func(ctx context.Context, next graphql.Resolver) graphql.Resolver { + directive0 := next + + directive1 := func(ctx context.Context) (any, error) { + if ec.directives.RequiresVehicleToken == nil { + var zeroVal []*model.DailyActivity + return zeroVal, errors.New("directive requiresVehicleToken is not implemented") + } + return ec.directives.RequiresVehicleToken(ctx, nil, directive0) + } + directive2 := func(ctx context.Context) (any, error) { + privileges, err := ec.unmarshalNPrivilege2ᚕstringᚄ(ctx, []any{"VEHICLE_ALL_TIME_LOCATION", "VEHICLE_NON_LOCATION_DATA"}) + if err != nil { + var zeroVal []*model.DailyActivity + return zeroVal, err + } + if ec.directives.RequiresAllOfPrivileges == nil { + var zeroVal []*model.DailyActivity + return zeroVal, errors.New("directive requiresAllOfPrivileges is not implemented") + } + return ec.directives.RequiresAllOfPrivileges(ctx, nil, directive1, privileges) + } + + next = directive2 + return next + }, + ec.marshalNDailyActivity2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDailyActivityᚄ, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_Query_dailyActivity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "date": + return ec.fieldContext_DailyActivity_date(ctx, field) + case "segmentCount": + return ec.fieldContext_DailyActivity_segmentCount(ctx, field) + case "duration": + return ec.fieldContext_DailyActivity_duration(ctx, field) + case "start": + return ec.fieldContext_DailyActivity_start(ctx, field) + case "end": + return ec.fieldContext_DailyActivity_end(ctx, field) + case "signals": + return ec.fieldContext_DailyActivity_signals(ctx, field) + case "eventCounts": + return ec.fieldContext_DailyActivity_eventCounts(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type DailyActivity", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_dailyActivity_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Query_vinVCLatest(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -9994,72 +10759,84 @@ func (ec *executionContext) fieldContext_Query___schema(_ context.Context, field return fc, nil } -func (ec *executionContext) _Segment_startTime(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { +func (ec *executionContext) _Segment_start(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, ec.OperationContext, field, - ec.fieldContext_Segment_startTime, + ec.fieldContext_Segment_start, func(ctx context.Context) (any, error) { - return obj.StartTime, nil + return obj.Start, nil }, nil, - ec.marshalNTime2timeᚐTime, + ec.marshalNSignalLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalLocation, true, true, ) } -func (ec *executionContext) fieldContext_Segment_startTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Segment_start(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Segment", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Time does not have child fields") + switch field.Name { + case "timestamp": + return ec.fieldContext_SignalLocation_timestamp(ctx, field) + case "value": + return ec.fieldContext_SignalLocation_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalLocation", field.Name) }, } return fc, nil } -func (ec *executionContext) _Segment_endTime(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { +func (ec *executionContext) _Segment_end(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, ec.OperationContext, field, - ec.fieldContext_Segment_endTime, + ec.fieldContext_Segment_end, func(ctx context.Context) (any, error) { - return obj.EndTime, nil + return obj.End, nil }, nil, - ec.marshalOTime2ᚖtimeᚐTime, + ec.marshalOSignalLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalLocation, true, false, ) } -func (ec *executionContext) fieldContext_Segment_endTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Segment_end(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Segment", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Time does not have child fields") + switch field.Name { + case "timestamp": + return ec.fieldContext_SignalLocation_timestamp(ctx, field) + case "value": + return ec.fieldContext_SignalLocation_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalLocation", field.Name) }, } return fc, nil } -func (ec *executionContext) _Segment_durationSeconds(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { +func (ec *executionContext) _Segment_duration(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, ec.OperationContext, field, - ec.fieldContext_Segment_durationSeconds, + ec.fieldContext_Segment_duration, func(ctx context.Context) (any, error) { - return obj.DurationSeconds, nil + return obj.Duration, nil }, nil, ec.marshalNInt2int, @@ -10068,7 +10845,7 @@ func (ec *executionContext) _Segment_durationSeconds(ctx context.Context, field ) } -func (ec *executionContext) fieldContext_Segment_durationSeconds(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Segment_duration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Segment", Field: field, @@ -10139,6 +10916,165 @@ func (ec *executionContext) fieldContext_Segment_startedBeforeRange(_ context.Co return fc, nil } +func (ec *executionContext) _Segment_signals(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_Segment_signals, + func(ctx context.Context) (any, error) { + return obj.Signals, nil + }, + nil, + ec.marshalOSignalAggregationValue2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValueᚄ, + true, + false, + ) +} + +func (ec *executionContext) fieldContext_Segment_signals(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Segment", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_SignalAggregationValue_name(ctx, field) + case "agg": + return ec.fieldContext_SignalAggregationValue_agg(ctx, field) + case "value": + return ec.fieldContext_SignalAggregationValue_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type SignalAggregationValue", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Segment_eventCounts(ctx context.Context, field graphql.CollectedField, obj *model.Segment) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_Segment_eventCounts, + func(ctx context.Context) (any, error) { + return obj.EventCounts, nil + }, + nil, + ec.marshalOEventCount2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCountᚄ, + true, + false, + ) +} + +func (ec *executionContext) fieldContext_Segment_eventCounts(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Segment", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_EventCount_name(ctx, field) + case "count": + return ec.fieldContext_EventCount_count(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type EventCount", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _SignalAggregationValue_name(ctx context.Context, field graphql.CollectedField, obj *model.SignalAggregationValue) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_SignalAggregationValue_name, + func(ctx context.Context) (any, error) { + return obj.Name, nil + }, + nil, + ec.marshalNString2string, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_SignalAggregationValue_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "SignalAggregationValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _SignalAggregationValue_agg(ctx context.Context, field graphql.CollectedField, obj *model.SignalAggregationValue) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_SignalAggregationValue_agg, + func(ctx context.Context) (any, error) { + return obj.Agg, nil + }, + nil, + ec.marshalNString2string, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_SignalAggregationValue_agg(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "SignalAggregationValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _SignalAggregationValue_value(ctx context.Context, field graphql.CollectedField, obj *model.SignalAggregationValue) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_SignalAggregationValue_value, + func(ctx context.Context) (any, error) { + return obj.Value, nil + }, + nil, + ec.marshalNFloat2float64, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_SignalAggregationValue_value(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "SignalAggregationValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _SignalAggregations_timestamp(ctx context.Context, field graphql.CollectedField, obj *model.SignalAggregations) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -27651,6 +28587,122 @@ func (ec *executionContext) fieldContext___Type_isOneOf(_ context.Context, field return fc, nil } +func (ec *executionContext) _eventDataSummary_name(ctx context.Context, field graphql.CollectedField, obj *model.EventDataSummary) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_eventDataSummary_name, + func(ctx context.Context) (any, error) { + return obj.Name, nil + }, + nil, + ec.marshalNString2string, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_eventDataSummary_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "eventDataSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _eventDataSummary_numberOfEvents(ctx context.Context, field graphql.CollectedField, obj *model.EventDataSummary) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_eventDataSummary_numberOfEvents, + func(ctx context.Context) (any, error) { + return obj.NumberOfEvents, nil + }, + nil, + ec.marshalNUint642uint64, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_eventDataSummary_numberOfEvents(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "eventDataSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Uint64 does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _eventDataSummary_firstSeen(ctx context.Context, field graphql.CollectedField, obj *model.EventDataSummary) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_eventDataSummary_firstSeen, + func(ctx context.Context) (any, error) { + return obj.FirstSeen, nil + }, + nil, + ec.marshalNTime2timeᚐTime, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_eventDataSummary_firstSeen(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "eventDataSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _eventDataSummary_lastSeen(ctx context.Context, field graphql.CollectedField, obj *model.EventDataSummary) (ret graphql.Marshaler) { + return graphql.ResolveField( + ctx, + ec.OperationContext, + field, + ec.fieldContext_eventDataSummary_lastSeen, + func(ctx context.Context) (any, error) { + return obj.LastSeen, nil + }, + nil, + ec.marshalNTime2timeᚐTime, + true, + true, + ) +} + +func (ec *executionContext) fieldContext_eventDataSummary_lastSeen(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "eventDataSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Time does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _signalDataSummary_name(ctx context.Context, field graphql.CollectedField, obj *model.SignalDataSummary) (ret graphql.Marshaler) { return graphql.ResolveField( ctx, @@ -27819,7 +28871,7 @@ func (ec *executionContext) unmarshalInputAttestationFilter(ctx context.Context, asMap[k] = v } - fieldsInOrder := [...]string{"id", "source", "dataVersion", "producer", "before", "after", "limit", "tags"} + fieldsInOrder := [...]string{"id", "source", "dataVersion", "producer", "before", "after", "limit", "cursor", "tags"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -27875,6 +28927,13 @@ func (ec *executionContext) unmarshalInputAttestationFilter(ctx context.Context, return it, err } it.Limit = data + case "cursor": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cursor")) + data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + it.Cursor = data case "tags": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags")) data, err := ec.unmarshalOStringArrayFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringArrayFilter(ctx, v) @@ -28013,8 +29072,11 @@ func (ec *executionContext) unmarshalInputSegmentConfig(ctx context.Context, obj if _, present := asMap["signalCountThreshold"]; !present { asMap["signalCountThreshold"] = 10 } + if _, present := asMap["maxIdleRpm"]; !present { + asMap["maxIdleRpm"] = 1500 + } - fieldsInOrder := [...]string{"minIdleSeconds", "minSegmentDurationSeconds", "signalCountThreshold"} + fieldsInOrder := [...]string{"minIdleSeconds", "minSegmentDurationSeconds", "signalCountThreshold", "maxIdleRpm"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -28042,6 +29104,74 @@ func (ec *executionContext) unmarshalInputSegmentConfig(ctx context.Context, obj return it, err } it.SignalCountThreshold = data + case "maxIdleRpm": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("maxIdleRpm")) + data, err := ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + it.MaxIdleRpm = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputSegmentEventRequest(ctx context.Context, obj any) (model.SegmentEventRequest, error) { + var it model.SegmentEventRequest + asMap := map[string]any{} + for k, v := range obj.(map[string]any) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"name"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Name = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputSegmentSignalRequest(ctx context.Context, obj any) (model.SegmentSignalRequest, error) { + var it model.SegmentSignalRequest + asMap := map[string]any{} + for k, v := range obj.(map[string]any) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"name", "agg"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "name": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Name = data + case "agg": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("agg")) + data, err := ec.unmarshalNFloatAggregation2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐFloatAggregation(ctx, v) + if err != nil { + return it, err + } + it.Agg = data } } @@ -28388,6 +29518,69 @@ func (ec *executionContext) _Attestation(ctx context.Context, sel ast.SelectionS return out } +var dailyActivityImplementors = []string{"DailyActivity"} + +func (ec *executionContext) _DailyActivity(ctx context.Context, sel ast.SelectionSet, obj *model.DailyActivity) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, dailyActivityImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DailyActivity") + case "date": + out.Values[i] = ec._DailyActivity_date(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "segmentCount": + out.Values[i] = ec._DailyActivity_segmentCount(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "duration": + out.Values[i] = ec._DailyActivity_duration(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "start": + out.Values[i] = ec._DailyActivity_start(ctx, field, obj) + case "end": + out.Values[i] = ec._DailyActivity_end(ctx, field, obj) + case "signals": + out.Values[i] = ec._DailyActivity_signals(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "eventCounts": + out.Values[i] = ec._DailyActivity_eventCounts(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var dataSummaryImplementors = []string{"DataSummary"} func (ec *executionContext) _DataSummary(ctx context.Context, sel ast.SelectionSet, obj *model.DataSummary) graphql.Marshaler { @@ -28424,6 +29617,11 @@ func (ec *executionContext) _DataSummary(ctx context.Context, sel ast.SelectionS if out.Values[i] == graphql.Null { out.Invalids++ } + case "eventDataSummary": + out.Values[i] = ec._DataSummary_eventDataSummary(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -28539,6 +29737,50 @@ func (ec *executionContext) _Event(ctx context.Context, sel ast.SelectionSet, ob return out } +var eventCountImplementors = []string{"EventCount"} + +func (ec *executionContext) _EventCount(ctx context.Context, sel ast.SelectionSet, obj *model.EventCount) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, eventCountImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("EventCount") + case "name": + out.Values[i] = ec._EventCount_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "count": + out.Values[i] = ec._EventCount_count(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var locationImplementors = []string{"Location"} func (ec *executionContext) _Location(ctx context.Context, sel ast.SelectionSet, obj *model.Location) graphql.Marshaler { @@ -28790,13 +30032,38 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "segments": field := field - innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) } }() res = ec._Query_segments(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "dailyActivity": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_dailyActivity(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } return res } @@ -28886,15 +30153,15 @@ func (ec *executionContext) _Segment(ctx context.Context, sel ast.SelectionSet, switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Segment") - case "startTime": - out.Values[i] = ec._Segment_startTime(ctx, field, obj) + case "start": + out.Values[i] = ec._Segment_start(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } - case "endTime": - out.Values[i] = ec._Segment_endTime(ctx, field, obj) - case "durationSeconds": - out.Values[i] = ec._Segment_durationSeconds(ctx, field, obj) + case "end": + out.Values[i] = ec._Segment_end(ctx, field, obj) + case "duration": + out.Values[i] = ec._Segment_duration(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } @@ -28908,6 +30175,59 @@ func (ec *executionContext) _Segment(ctx context.Context, sel ast.SelectionSet, if out.Values[i] == graphql.Null { out.Invalids++ } + case "signals": + out.Values[i] = ec._Segment_signals(ctx, field, obj) + case "eventCounts": + out.Values[i] = ec._Segment_eventCounts(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var signalAggregationValueImplementors = []string{"SignalAggregationValue"} + +func (ec *executionContext) _SignalAggregationValue(ctx context.Context, sel ast.SelectionSet, obj *model.SignalAggregationValue) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, signalAggregationValueImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("SignalAggregationValue") + case "name": + out.Values[i] = ec._SignalAggregationValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "agg": + out.Values[i] = ec._SignalAggregationValue_agg(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "value": + out.Values[i] = ec._SignalAggregationValue_value(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -33619,6 +34939,60 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o return out } +var eventDataSummaryImplementors = []string{"eventDataSummary"} + +func (ec *executionContext) _eventDataSummary(ctx context.Context, sel ast.SelectionSet, obj *model.EventDataSummary) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, eventDataSummaryImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("eventDataSummary") + case "name": + out.Values[i] = ec._eventDataSummary_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "numberOfEvents": + out.Values[i] = ec._eventDataSummary_numberOfEvents(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "firstSeen": + out.Values[i] = ec._eventDataSummary_firstSeen(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "lastSeen": + out.Values[i] = ec._eventDataSummary_lastSeen(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var signalDataSummaryImplementors = []string{"signalDataSummary"} func (ec *executionContext) _signalDataSummary(ctx context.Context, sel ast.SelectionSet, obj *model.SignalDataSummary) graphql.Marshaler { @@ -33714,6 +35088,60 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se return res } +func (ec *executionContext) marshalNDailyActivity2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDailyActivityᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.DailyActivity) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNDailyActivity2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDailyActivity(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNDailyActivity2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDailyActivity(ctx context.Context, sel ast.SelectionSet, v *model.DailyActivity) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DailyActivity(ctx, sel, v) +} + func (ec *executionContext) unmarshalNDetectionMechanism2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐDetectionMechanism(ctx context.Context, v any) (model.DetectionMechanism, error) { var res model.DetectionMechanism err := res.UnmarshalGQL(v) @@ -33734,6 +35162,60 @@ func (ec *executionContext) marshalNEvent2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtel return ec._Event(ctx, sel, v) } +func (ec *executionContext) marshalNEventCount2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.EventCount) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNEventCount2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCount(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNEventCount2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCount(ctx context.Context, sel ast.SelectionSet, v *model.EventCount) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._EventCount(ctx, sel, v) +} + func (ec *executionContext) unmarshalNFilterLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐFilterLocation(ctx context.Context, v any) (*model.FilterLocation, error) { res, err := ec.unmarshalInputFilterLocation(ctx, v) return &res, graphql.ErrorOnPath(ctx, err) @@ -33880,7 +35362,296 @@ func (ec *executionContext) marshalNPrivilege2ᚕstringᚄ(ctx context.Context, if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNPrivilege2string(ctx, sel, v[i]) + ret[i] = ec.marshalNPrivilege2string(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Segment) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNSegment2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegment(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNSegment2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegment(ctx context.Context, sel ast.SelectionSet, v *model.Segment) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._Segment(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNSegmentEventRequest2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentEventRequest(ctx context.Context, v any) (*model.SegmentEventRequest, error) { + res, err := ec.unmarshalInputSegmentEventRequest(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNSegmentSignalRequest2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentSignalRequest(ctx context.Context, v any) (*model.SegmentSignalRequest, error) { + res, err := ec.unmarshalInputSegmentSignalRequest(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNSignalAggregationValue2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValueᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.SignalAggregationValue) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNSignalAggregationValue2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNSignalAggregationValue2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValue(ctx context.Context, sel ast.SelectionSet, v *model.SignalAggregationValue) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._SignalAggregationValue(ctx, sel, v) +} + +func (ec *executionContext) marshalNSignalAggregations2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregations(ctx context.Context, sel ast.SelectionSet, v *model.SignalAggregations) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._SignalAggregations(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNSignalFloatFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalFloatFilter(ctx context.Context, v any) (*model.SignalFloatFilter, error) { + res, err := ec.unmarshalInputSignalFloatFilter(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNSignalLocation2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalLocation(ctx context.Context, sel ast.SelectionSet, v *model.SignalLocation) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._SignalLocation(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNString2string(ctx context.Context, v any) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + _ = sel + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v any) ([]string, error) { + var vSlice []any + vSlice = graphql.CoerceList(v) + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNString2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNString2string(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) unmarshalNStringAggregation2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringAggregation(ctx context.Context, v any) (model.StringAggregation, error) { + var res model.StringAggregation + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNStringAggregation2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringAggregation(ctx context.Context, sel ast.SelectionSet, v model.StringAggregation) graphql.Marshaler { + return v +} + +func (ec *executionContext) unmarshalNStringArrayFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringArrayFilter(ctx context.Context, v any) (*model.StringArrayFilter, error) { + res, err := ec.unmarshalInputStringArrayFilter(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNStringValueFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringValueFilter(ctx context.Context, v any) (*model.StringValueFilter, error) { + res, err := ec.unmarshalInputStringValueFilter(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v any) (time.Time, error) { + res, err := graphql.UnmarshalTime(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { + _ = sel + res := graphql.MarshalTime(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) unmarshalNUint642uint64(ctx context.Context, v any) (uint64, error) { + res, err := graphql.UnmarshalUint64(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNUint642uint64(ctx context.Context, sel ast.SelectionSet, v uint64) graphql.Marshaler { + _ = sel + res := graphql.MarshalUint64(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler { + return ec.___Directive(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i]) } if isLen1 { f(i) @@ -33900,37 +35671,12 @@ func (ec *executionContext) marshalNPrivilege2ᚕstringᚄ(ctx context.Context, return ret } -func (ec *executionContext) marshalNSegment2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegment(ctx context.Context, sel ast.SelectionSet, v *model.Segment) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._Segment(ctx, sel, v) -} - -func (ec *executionContext) marshalNSignalAggregations2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregations(ctx context.Context, sel ast.SelectionSet, v *model.SignalAggregations) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._SignalAggregations(ctx, sel, v) -} - -func (ec *executionContext) unmarshalNSignalFloatFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalFloatFilter(ctx context.Context, v any) (*model.SignalFloatFilter, error) { - res, err := ec.unmarshalInputSignalFloatFilter(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) unmarshalNString2string(ctx context.Context, v any) (string, error) { +func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v any) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { +func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { _ = sel res := graphql.MarshalString(v) if res == graphql.Null { @@ -33941,14 +35687,14 @@ func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.S return res } -func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v any) ([]string, error) { +func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v any) ([]string, error) { var vSlice []any vSlice = graphql.CoerceList(v) var err error res := make([]string, len(vSlice)) for i := range vSlice { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNString2string(ctx, vSlice[i]) + res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i]) if err != nil { return nil, err } @@ -33956,78 +35702,7 @@ func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v return res, nil } -func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - for i := range v { - ret[i] = ec.marshalNString2string(ctx, sel, v[i]) - } - - for _, e := range ret { - if e == graphql.Null { - return graphql.Null - } - } - - return ret -} - -func (ec *executionContext) unmarshalNStringAggregation2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringAggregation(ctx context.Context, v any) (model.StringAggregation, error) { - var res model.StringAggregation - err := res.UnmarshalGQL(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalNStringAggregation2githubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringAggregation(ctx context.Context, sel ast.SelectionSet, v model.StringAggregation) graphql.Marshaler { - return v -} - -func (ec *executionContext) unmarshalNStringArrayFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringArrayFilter(ctx context.Context, v any) (*model.StringArrayFilter, error) { - res, err := ec.unmarshalInputStringArrayFilter(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) unmarshalNStringValueFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐStringValueFilter(ctx context.Context, v any) (*model.StringValueFilter, error) { - res, err := ec.unmarshalInputStringValueFilter(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v any) (time.Time, error) { - res, err := graphql.UnmarshalTime(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { - _ = sel - res := graphql.MarshalTime(v) - if res == graphql.Null { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - } - return res -} - -func (ec *executionContext) unmarshalNUint642uint64(ctx context.Context, v any) (uint64, error) { - res, err := graphql.UnmarshalUint64(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalNUint642uint64(ctx context.Context, sel ast.SelectionSet, v uint64) graphql.Marshaler { - _ = sel - res := graphql.MarshalUint64(v) - if res == graphql.Null { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - } - return res -} - -func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler { - return ec.___Directive(ctx, sel, &v) -} - -func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler { +func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -34051,7 +35726,7 @@ func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgq if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i]) + ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i]) } if isLen1 { f(i) @@ -34071,38 +35746,19 @@ func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgq return ret } -func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v any) (string, error) { - res, err := graphql.UnmarshalString(v) - return res, graphql.ErrorOnPath(ctx, err) +func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler { + return ec.___EnumValue(ctx, sel, &v) } -func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { - _ = sel - res := graphql.MarshalString(v) - if res == graphql.Null { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - } - return res +func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler { + return ec.___Field(ctx, sel, &v) } -func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v any) ([]string, error) { - var vSlice []any - vSlice = graphql.CoerceList(v) - var err error - res := make([]string, len(vSlice)) - for i := range vSlice { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i]) - if err != nil { - return nil, err - } - } - return res, nil +func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler { + return ec.___InputValue(ctx, sel, &v) } -func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { +func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -34126,7 +35782,7 @@ func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i]) + ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) } if isLen1 { f(i) @@ -34146,19 +35802,11 @@ func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context return ret } -func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler { - return ec.___EnumValue(ctx, sel, &v) -} - -func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler { - return ec.___Field(ctx, sel, &v) -} - -func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler { - return ec.___InputValue(ctx, sel, &v) +func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler { + return ec.___Type(ctx, sel, &v) } -func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { +func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -34182,7 +35830,7 @@ func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋg if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) + ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) } if isLen1 { f(i) @@ -34202,11 +35850,33 @@ func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋg return ret } -func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler { - return ec.___Type(ctx, sel, &v) +func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec.___Type(ctx, sel, v) } -func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { +func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v any) (string, error) { + res, err := graphql.UnmarshalString(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + _ = sel + res := graphql.MarshalString(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + +func (ec *executionContext) marshalNeventDataSummary2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventDataSummaryᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.EventDataSummary) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -34230,7 +35900,7 @@ func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgen if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) + ret[i] = ec.marshalNeventDataSummary2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventDataSummary(ctx, sel, v[i]) } if isLen1 { f(i) @@ -34250,30 +35920,14 @@ func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgen return ret } -func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { +func (ec *executionContext) marshalNeventDataSummary2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventDataSummary(ctx context.Context, sel ast.SelectionSet, v *model.EventDataSummary) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "the requested element is null which the schema does not allow") } return graphql.Null } - return ec.___Type(ctx, sel, v) -} - -func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v any) (string, error) { - res, err := graphql.UnmarshalString(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { - _ = sel - res := graphql.MarshalString(v) - if res == graphql.Null { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - } - return res + return ec._eventDataSummary(ctx, sel, v) } func (ec *executionContext) marshalNsignalDataSummary2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalDataSummaryᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.SignalDataSummary) graphql.Marshaler { @@ -34495,6 +36149,53 @@ func (ec *executionContext) marshalOEvent2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋ return ret } +func (ec *executionContext) marshalOEventCount2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.EventCount) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNEventCount2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventCount(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) unmarshalOEventFilter2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐEventFilter(ctx context.Context, v any) (*model.EventFilter, error) { if v == nil { return nil, nil @@ -34614,7 +36315,51 @@ func (ec *executionContext) marshalOPOMVC2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtel return ec._POMVC(ctx, sel, v) } -func (ec *executionContext) marshalOSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Segment) graphql.Marshaler { +func (ec *executionContext) unmarshalOSegmentConfig2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentConfig(ctx context.Context, v any) (*model.SegmentConfig, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputSegmentConfig(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalOSegmentEventRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentEventRequestᚄ(ctx context.Context, v any) ([]*model.SegmentEventRequest, error) { + if v == nil { + return nil, nil + } + var vSlice []any + vSlice = graphql.CoerceList(v) + var err error + res := make([]*model.SegmentEventRequest, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNSegmentEventRequest2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentEventRequest(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOSegmentSignalRequest2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentSignalRequestᚄ(ctx context.Context, v any) ([]*model.SegmentSignalRequest, error) { + if v == nil { + return nil, nil + } + var vSlice []any + vSlice = graphql.CoerceList(v) + var err error + res := make([]*model.SegmentSignalRequest, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNSegmentSignalRequest2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentSignalRequest(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOSignalAggregationValue2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValueᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.SignalAggregationValue) graphql.Marshaler { if v == nil { return graphql.Null } @@ -34641,7 +36386,7 @@ func (ec *executionContext) marshalOSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetwork if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNSegment2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegment(ctx, sel, v[i]) + ret[i] = ec.marshalNSignalAggregationValue2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationValue(ctx, sel, v[i]) } if isLen1 { f(i) @@ -34661,14 +36406,6 @@ func (ec *executionContext) marshalOSegment2ᚕᚖgithubᚗcomᚋDIMOᚑNetwork return ret } -func (ec *executionContext) unmarshalOSegmentConfig2ᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSegmentConfig(ctx context.Context, v any) (*model.SegmentConfig, error) { - if v == nil { - return nil, nil - } - res, err := ec.unmarshalInputSegmentConfig(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - func (ec *executionContext) marshalOSignalAggregations2ᚕᚖgithubᚗcomᚋDIMOᚑNetworkᚋtelemetryᚑapiᚋinternalᚋgraphᚋmodelᚐSignalAggregationsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.SignalAggregations) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index d357648..87b5413 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -58,10 +58,29 @@ type AttestationFilter struct { After *time.Time `json:"after,omitempty"` // Limit attestations returned to this value. Defaults to 10. Limit *int `json:"limit,omitempty"` + // Cursor for pagination (exclusive). + Cursor *time.Time `json:"cursor,omitempty"` // Filter attestations by tags. Tags *StringArrayFilter `json:"tags,omitempty"` } +type DailyActivity struct { + // Start of that calendar day (midnight in requested timezone), as UTC. + Date time.Time `json:"date"` + // Number of activity segments that started or fell within that day. + SegmentCount int `json:"segmentCount"` + // Sum of segment durations (total active time that day) in seconds. + Duration int `json:"duration"` + // Start of day (timestamp = day start, value = location). Same shape as Segment.start. Null if not available. + Start *SignalLocation `json:"start,omitempty"` + // End of day (timestamp = day end, location). Same shape as Segment.end. Null if not available. + End *SignalLocation `json:"end,omitempty"` + // Per-day signal aggregates (same shape as segment signals). + Signals []*SignalAggregationValue `json:"signals"` + // Per-day event counts. + EventCounts []*EventCount `json:"eventCounts"` +} + type DataSummary struct { // Total number of signals collected NumberOfSignals uint64 `json:"numberOfSignals"` @@ -73,6 +92,8 @@ type DataSummary struct { LastSeen time.Time `json:"lastSeen"` // data summary of an individual signal SignalDataSummary []*SignalDataSummary `json:"signalDataSummary"` + // Events known to the vehicle: per-event name, count, and first/last seen. + EventDataSummary []*EventDataSummary `json:"eventDataSummary"` } type DeviceActivity struct { @@ -93,6 +114,12 @@ type Event struct { Metadata *string `json:"metadata,omitempty"` } +// Event name and count. Used by segments, daily activity, and event summaries. +type EventCount struct { + Name string `json:"name"` + Count int `json:"count"` +} + type EventFilter struct { // name is the name of the event. Name *StringValueFilter `json:"name,omitempty"` @@ -141,20 +168,21 @@ type Query struct { } type Segment struct { - // Segment start timestamp (actual activity start transition) - StartTime time.Time `json:"startTime"` - // Segment end timestamp (activity end after debounce period). - // Null if segment is ongoing (extends beyond query range). - EndTime *time.Time `json:"endTime,omitempty"` - // Duration in seconds. - // If ongoing: from start to query 'to' time. - // If complete: from start to end. - DurationSeconds int `json:"durationSeconds"` + // Segment start (timestamp and location). Uses SignalLocation; always present. + Start *SignalLocation `json:"start"` + // Segment end (timestamp and location). Uses SignalLocation; omitted when isOngoing is true. + End *SignalLocation `json:"end,omitempty"` + // Duration in seconds. If ongoing: from start to query 'to'. If complete: from start to end. + Duration int `json:"duration"` // True if segment extends beyond query time range (last activity is ongoing). + // When true, end is not included in the response. IsOngoing bool `json:"isOngoing"` // True if segment started before query time range. - // Indicates startTime may be approximate. StartedBeforeRange bool `json:"startedBeforeRange"` + // Per-segment signal aggregates. Same shape as signals elsewhere (name, agg, value). + Signals []*SignalAggregationValue `json:"signals,omitempty"` + // Per-segment event counts. + EventCounts []*EventCount `json:"eventCounts,omitempty"` } type SegmentConfig struct { @@ -167,12 +195,31 @@ type SegmentConfig struct { // Filters very short segments (testing, engine cycling). // Default: 240 (4 minutes), Min: 60, Max: 3600 MinSegmentDurationSeconds *int `json:"minSegmentDurationSeconds,omitempty"` - // [frequencyAnalysis only] Minimum signal count per window for activity detection. - // Higher values = more conservative (filters parked telemetry better). - // Lower values = more sensitive (works for sparse signal vehicles). - // Default: 10 (tuned to match ignition detection accuracy) - // Min: 1, Max: 3600 + // [frequencyAnalysis] Minimum signal count per window for activity detection. + // [staticRpm] Minimum samples per window to consider it idle (same semantics). + // Higher values = more conservative. Lower values = more sensitive. + // Default: 10, Min: 1, Max: 3600 SignalCountThreshold *int `json:"signalCountThreshold,omitempty"` + // [staticRpm only] Upper bound for idle RPM. Windows with max(RPM) <= this are considered idle. + // Default: 1500, Min: 300, Max: 3000 + MaxIdleRpm *int `json:"maxIdleRpm,omitempty"` +} + +type SegmentEventRequest struct { + Name string `json:"name"` +} + +type SegmentSignalRequest struct { + Name string `json:"name"` + Agg FloatAggregation `json:"agg"` +} + +// Result of aggregating a float signal over an interval. Used by segments and daily activity summaries. +// Same shape as one row of aggregated signal data (name, aggregation type, computed value). +type SignalAggregationValue struct { + Name string `json:"name"` + Agg string `json:"agg"` + Value float64 `json:"value"` } type SignalCollection struct { @@ -635,7 +682,7 @@ type SignalFloatFilter struct { type SignalLocation struct { // timestamp of when this data was colllected Timestamp time.Time `json:"timestamp"` - // value of the signal + // location (latitude, longitude, hdop) at this timestamp. Value *Location `json:"value"` } @@ -708,6 +755,17 @@ type Vinvc struct { RawVc string `json:"rawVC"` } +type EventDataSummary struct { + // Event name + Name string `json:"name"` + // Number of times this event occurred for the vehicle + NumberOfEvents uint64 `json:"numberOfEvents"` + // First seen timestamp + FirstSeen time.Time `json:"firstSeen"` + // Last seen timestamp + LastSeen time.Time `json:"lastSeen"` +} + type SignalDataSummary struct { // signal name Name string `json:"name"` @@ -734,17 +792,21 @@ const ( // Excellent noise resistance with 100% accuracy match to ignition baseline. // Best alternative when ignition signal is unavailable - same accuracy, same speed as frequency analysis. DetectionMechanismChangePointDetection DetectionMechanism = "changePointDetection" + // Static RPM: Segments are contiguous periods where engine RPM remains in idle range. + // Uses repeated windows of idle RPM (e.g. powertrainCombustionEngineSpeed <= maxIdleRpm) merged like trips. + DetectionMechanismStaticRpm DetectionMechanism = "staticRpm" ) var AllDetectionMechanism = []DetectionMechanism{ DetectionMechanismIgnitionDetection, DetectionMechanismFrequencyAnalysis, DetectionMechanismChangePointDetection, + DetectionMechanismStaticRpm, } func (e DetectionMechanism) IsValid() bool { switch e { - case DetectionMechanismIgnitionDetection, DetectionMechanismFrequencyAnalysis, DetectionMechanismChangePointDetection: + case DetectionMechanismIgnitionDetection, DetectionMechanismFrequencyAnalysis, DetectionMechanismChangePointDetection, DetectionMechanismStaticRpm: return true } return false diff --git a/internal/graph/segments.resolvers.go b/internal/graph/segments.resolvers.go index 9847e7c..42b327f 100644 --- a/internal/graph/segments.resolvers.go +++ b/internal/graph/segments.resolvers.go @@ -12,6 +12,11 @@ import ( ) // Segments is the resolver for the segments field. -func (r *queryResolver) Segments(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig) ([]*model.Segment, error) { - return r.BaseRepo.GetSegments(ctx, tokenID, from, to, mechanism, config) +func (r *queryResolver) Segments(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, limit *int, after *time.Time) ([]*model.Segment, error) { + return r.BaseRepo.GetSegments(ctx, tokenID, from, to, mechanism, config, signalRequests, eventRequests, limit, after) +} + +// DailyActivity is the resolver for the dailyActivity field. +func (r *queryResolver) DailyActivity(ctx context.Context, tokenID int, from time.Time, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, timezone *string) ([]*model.DailyActivity, error) { + return r.BaseRepo.GetDailyActivity(ctx, tokenID, from, to, mechanism, config, signalRequests, eventRequests, timezone) } diff --git a/internal/repositories/attestation/attestation.go b/internal/repositories/attestation/attestation.go index 786275a..16d86d5 100644 --- a/internal/repositories/attestation/attestation.go +++ b/internal/repositories/attestation/attestation.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "time" "github.com/DIMO-Network/cloudevent" "github.com/DIMO-Network/fetch-api/pkg/grpc" @@ -74,6 +75,14 @@ func (r *Repository) GetAttestations(ctx context.Context, subject string, filter if filter.After != nil { opts.After = timestamppb.New(*filter.After) } + // Add 1ns to cursor so "after cursor" is strictly > last item's time). + if filter.Cursor != nil { + updatedCursor := filter.Cursor.Add(time.Nanosecond) + cursorTS := timestamppb.New(updatedCursor) + if opts.After == nil || cursorTS.AsTime().After(opts.After.AsTime()) { + opts.After = cursorTS + } + } if filter.Before != nil { opts.Before = timestamppb.New(*filter.Before) diff --git a/internal/repositories/repositories.go b/internal/repositories/repositories.go index d5c3bd4..5f03451 100644 --- a/internal/repositories/repositories.go +++ b/internal/repositories/repositories.go @@ -34,10 +34,14 @@ var ManufacturerSourceTranslations = map[string]string{ // CHService is the interface for the ClickHouse service. type CHService interface { GetAggregatedSignals(ctx context.Context, aggArgs *model.AggregatedSignalArgs) ([]*ch.AggSignal, error) + GetAggregatedSignalsForRanges(ctx context.Context, tokenID uint32, ranges []ch.TimeRange, globalFrom, globalTo time.Time, floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) ([]*ch.AggSignalForRange, error) GetLatestSignals(ctx context.Context, latestArgs *model.LatestSignalsArgs) ([]*vss.Signal, error) GetAvailableSignals(ctx context.Context, tokenID uint32, filter *model.SignalFilter) ([]string, error) GetSignalSummaries(ctx context.Context, tokenID uint32, filter *model.SignalFilter) ([]*model.SignalDataSummary, error) GetEvents(ctx context.Context, subject string, from, to time.Time, filter *model.EventFilter) ([]*vss.Event, error) + GetEventCounts(ctx context.Context, subject string, from, to time.Time, eventNames []string) ([]*ch.EventCount, error) + GetEventCountsForRanges(ctx context.Context, subject string, ranges []ch.TimeRange, eventNames []string) ([]*ch.EventCountForRange, error) + GetEventSummaries(ctx context.Context, subject string) ([]*ch.EventSummary, error) GetSegments(ctx context.Context, tokenID uint32, from, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig) ([]*ch.Segment, error) } @@ -206,12 +210,30 @@ func (r *Repository) GetAvailableSignals(ctx context.Context, tokenID uint32, fi return retSignals, nil } -// GetDataSummary returns the signal metadata for the given tokenID and filter. +// GetDataSummary returns the signal and event metadata for the given tokenID and filter. func (r *Repository) GetDataSummary(ctx context.Context, tokenID uint32, filter *model.SignalFilter) (*model.DataSummary, error) { signalDataSummary, err := r.chService.GetSignalSummaries(ctx, tokenID, filter) if err != nil { return nil, handleDBError(ctx, err) } + subject := cloudevent.ERC721DID{ + ChainID: r.chainID, + ContractAddress: r.vehicleAddress, + TokenID: big.NewInt(int64(tokenID)), + }.String() + eventSummaries, err := r.chService.GetEventSummaries(ctx, subject) + if err != nil { + return nil, handleDBError(ctx, err) + } + eventDataSummary := make([]*model.EventDataSummary, len(eventSummaries)) + for i, es := range eventSummaries { + eventDataSummary[i] = &model.EventDataSummary{ + Name: es.Name, + NumberOfEvents: es.Count, + FirstSeen: es.FirstSeen, + LastSeen: es.LastSeen, + } + } totalCount := uint64(0) minTimestamp := time.Now().UTC() maxTimestamp := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) @@ -226,12 +248,21 @@ func (r *Repository) GetDataSummary(ctx context.Context, tokenID uint32, filter maxTimestamp = metadata.LastSeen } } + for _, es := range eventSummaries { + if es.FirstSeen.Before(minTimestamp) { + minTimestamp = es.FirstSeen + } + if es.LastSeen.After(maxTimestamp) { + maxTimestamp = es.LastSeen + } + } return &model.DataSummary{ NumberOfSignals: totalCount, FirstSeen: minTimestamp, LastSeen: maxTimestamp, AvailableSignals: availableSignals, SignalDataSummary: signalDataSummary, + EventDataSummary: eventDataSummary, }, nil } diff --git a/internal/repositories/repositories_mocks_test.go b/internal/repositories/repositories_mocks_test.go index a7292e7..d424fe2 100644 --- a/internal/repositories/repositories_mocks_test.go +++ b/internal/repositories/repositories_mocks_test.go @@ -59,6 +59,21 @@ func (mr *MockCHServiceMockRecorder) GetAggregatedSignals(ctx, aggArgs any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAggregatedSignals", reflect.TypeOf((*MockCHService)(nil).GetAggregatedSignals), ctx, aggArgs) } +// GetAggregatedSignalsForRanges mocks base method. +func (m *MockCHService) GetAggregatedSignalsForRanges(ctx context.Context, tokenID uint32, ranges []ch.TimeRange, globalFrom, globalTo time.Time, floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) ([]*ch.AggSignalForRange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAggregatedSignalsForRanges", ctx, tokenID, ranges, globalFrom, globalTo, floatArgs, locationArgs) + ret0, _ := ret[0].([]*ch.AggSignalForRange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAggregatedSignalsForRanges indicates an expected call of GetAggregatedSignalsForRanges. +func (mr *MockCHServiceMockRecorder) GetAggregatedSignalsForRanges(ctx, tokenID, ranges, globalFrom, globalTo, floatArgs, locationArgs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAggregatedSignalsForRanges", reflect.TypeOf((*MockCHService)(nil).GetAggregatedSignalsForRanges), ctx, tokenID, ranges, globalFrom, globalTo, floatArgs, locationArgs) +} + // GetAvailableSignals mocks base method. func (m *MockCHService) GetAvailableSignals(ctx context.Context, tokenID uint32, filter *model.SignalFilter) ([]string, error) { m.ctrl.T.Helper() @@ -74,6 +89,51 @@ func (mr *MockCHServiceMockRecorder) GetAvailableSignals(ctx, tokenID, filter an return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAvailableSignals", reflect.TypeOf((*MockCHService)(nil).GetAvailableSignals), ctx, tokenID, filter) } +// GetEventCounts mocks base method. +func (m *MockCHService) GetEventCounts(ctx context.Context, subject string, from, to time.Time, eventNames []string) ([]*ch.EventCount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventCounts", ctx, subject, from, to, eventNames) + ret0, _ := ret[0].([]*ch.EventCount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventCounts indicates an expected call of GetEventCounts. +func (mr *MockCHServiceMockRecorder) GetEventCounts(ctx, subject, from, to, eventNames any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventCounts", reflect.TypeOf((*MockCHService)(nil).GetEventCounts), ctx, subject, from, to, eventNames) +} + +// GetEventCountsForRanges mocks base method. +func (m *MockCHService) GetEventCountsForRanges(ctx context.Context, subject string, ranges []ch.TimeRange, eventNames []string) ([]*ch.EventCountForRange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventCountsForRanges", ctx, subject, ranges, eventNames) + ret0, _ := ret[0].([]*ch.EventCountForRange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventCountsForRanges indicates an expected call of GetEventCountsForRanges. +func (mr *MockCHServiceMockRecorder) GetEventCountsForRanges(ctx, subject, ranges, eventNames any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventCountsForRanges", reflect.TypeOf((*MockCHService)(nil).GetEventCountsForRanges), ctx, subject, ranges, eventNames) +} + +// GetEventSummaries mocks base method. +func (m *MockCHService) GetEventSummaries(ctx context.Context, subject string) ([]*ch.EventSummary, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventSummaries", ctx, subject) + ret0, _ := ret[0].([]*ch.EventSummary) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventSummaries indicates an expected call of GetEventSummaries. +func (mr *MockCHServiceMockRecorder) GetEventSummaries(ctx, subject any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventSummaries", reflect.TypeOf((*MockCHService)(nil).GetEventSummaries), ctx, subject) +} + // GetEvents mocks base method. func (m *MockCHService) GetEvents(ctx context.Context, subject string, from, to time.Time, filter *model.EventFilter) ([]*vss.Event, error) { m.ctrl.T.Helper() diff --git a/internal/repositories/segments.go b/internal/repositories/segments.go index 8e6edb5..1a938b8 100644 --- a/internal/repositories/segments.go +++ b/internal/repositories/segments.go @@ -3,14 +3,21 @@ package repositories import ( "context" "fmt" + "math/big" + "sort" "time" + "github.com/DIMO-Network/cloudevent" + "github.com/DIMO-Network/model-garage/pkg/vss" "github.com/DIMO-Network/server-garage/pkg/gql/errorhandler" "github.com/DIMO-Network/telemetry-api/internal/graph/model" + "github.com/DIMO-Network/telemetry-api/internal/service/ch" + "golang.org/x/sync/errgroup" ) const ( maxDateRangeDays = 30 + maxSegmentLimit = 200 ) // validateSegmentArgs validates the arguments for segment queries @@ -39,8 +46,9 @@ func validateSegmentArgs(tokenID int, from, to time.Time) error { return nil } -// validateSegmentConfig validates the segment configuration parameters -func validateSegmentConfig(config *model.SegmentConfig) error { +// validateSegmentConfig validates the segment configuration parameters. +// When mechanism is staticRpm, also validates idling-specific fields. +func validateSegmentConfig(config *model.SegmentConfig, mechanism model.DetectionMechanism) error { if config == nil { return nil } @@ -63,37 +71,518 @@ func validateSegmentConfig(config *model.SegmentConfig) error { } } + if mechanism == model.DetectionMechanismStaticRpm { + if config.MaxIdleRpm != nil { + if *config.MaxIdleRpm < 300 || *config.MaxIdleRpm > 3000 { + return fmt.Errorf("maxIdleRpm must be between 300 and 3000") + } + } + } + + return nil +} + +// validateSegmentLimit validates optional pagination limit. If non-nil, must be in [1, maxSegmentLimit]. +func validateSegmentLimit(limit *int) error { + if limit == nil { + return nil + } + if *limit < 1 || *limit > maxSegmentLimit { + return fmt.Errorf("limit must be between 1 and %d", maxSegmentLimit) + } return nil } -// GetSegments returns segments detected using the specified mechanism in the time range -func (r *Repository) GetSegments(ctx context.Context, tokenID int, from, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig) ([]*model.Segment, error) { - // Validate inputs +// defaultSegmentSignalSet returns the default signal set when summary is requested but signalRequests is omitted or empty. +func defaultSegmentSignalSet() []*model.SegmentSignalRequest { + return []*model.SegmentSignalRequest{ + {Name: "speed", Agg: model.FloatAggregationMax}, + {Name: "powertrainTransmissionTravelledDistance", Agg: model.FloatAggregationFirst}, + {Name: "powertrainTransmissionTravelledDistance", Agg: model.FloatAggregationLast}, + {Name: "powertrainFuelSystemAbsoluteLevel", Agg: model.FloatAggregationFirst}, + {Name: "powertrainFuelSystemAbsoluteLevel", Agg: model.FloatAggregationLast}, + {Name: "powertrainTractionBatteryStateOfChargeCurrent", Agg: model.FloatAggregationFirst}, + {Name: "powertrainTractionBatteryStateOfChargeCurrent", Agg: model.FloatAggregationLast}, + } +} + +// sortSegmentSignals orders signals by name, then by agg (e.g. FIRST, LAST, MAX) for stable, readable output. +func sortSegmentSignals(signals []*model.SignalAggregationValue) { + sort.Slice(signals, func(i, j int) bool { + if signals[i].Name != signals[j].Name { + return signals[i].Name < signals[j].Name + } + return signals[i].Agg < signals[j].Agg + }) +} + +// GetSegments returns segments detected using the specified mechanism in the time range. +// Pagination: pass after (exclusive cursor = startTime of last segment from previous page) and limit (default 100, max 200). +// Segments are ordered by startTime ascending. When after is set, only segments with startTime > after are requested from CH. +func (r *Repository) GetSegments(ctx context.Context, tokenID int, from, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, limit *int, after *time.Time) ([]*model.Segment, error) { if err := validateSegmentArgs(tokenID, from, to); err != nil { return nil, errorhandler.NewBadRequestError(ctx, err) } - - if err := validateSegmentConfig(config); err != nil { + if err := validateSegmentConfig(config, mechanism); err != nil { + return nil, errorhandler.NewBadRequestError(ctx, err) + } + if err := validateSegmentLimit(limit); err != nil { return nil, errorhandler.NewBadRequestError(ctx, err) } + // Cursor: only request segments with startTime > after so CH returns fewer rows + if after != nil && after.Before(to) { + cursorFrom := (*after).Add(time.Nanosecond) // exclusive: first segment start > after + if cursorFrom.After(from) { + from = cursorFrom + } + } - // Query from ClickHouse service chSegments, err := r.chService.GetSegments(ctx, uint32(tokenID), from, to, mechanism, config) if err != nil { return nil, handleDBError(ctx, err) } + // Apply limit before building ranges and batch queries so we don't run agg/event-count for segments we'll drop. + if limit != nil && len(chSegments) > *limit { + chSegments = chSegments[:*limit] + } - // Convert to GraphQL model + wantSummary := len(signalRequests) > 0 || len(eventRequests) > 0 + signalReqs := signalRequests + if wantSummary && len(signalReqs) == 0 { + signalReqs = defaultSegmentSignalSet() + } + var eventNames []string + if len(eventRequests) > 0 { + eventNames = make([]string, len(eventRequests)) + for i, e := range eventRequests { + eventNames[i] = e.Name + } + } + + subject := cloudevent.ERC721DID{ + ChainID: r.chainID, + ContractAddress: r.vehicleAddress, + TokenID: big.NewInt(int64(tokenID)), + }.String() + + var eventCountsBySeg map[int]map[string]int + var aggsBySeg map[int][]*ch.AggSignal + if wantSummary && len(chSegments) > 0 { + ranges := make([]ch.TimeRange, len(chSegments)) + var globalFrom, globalTo time.Time + for i, chSeg := range chSegments { + segTo := to + if chSeg.EndTime != nil { + segTo = *chSeg.EndTime + } + ranges[i] = ch.TimeRange{From: chSeg.StartTime, To: segTo} + if i == 0 { + globalFrom, globalTo = chSeg.StartTime, segTo + } else { + if chSeg.StartTime.Before(globalFrom) { + globalFrom = chSeg.StartTime + } + if segTo.After(globalTo) { + globalTo = segTo + } + } + } + floatArgs := make([]model.FloatSignalArgs, 0, len(signalReqs)) + for _, req := range signalReqs { + floatArgs = append(floatArgs, model.FloatSignalArgs{ + Name: req.Name, + Agg: req.Agg, + Alias: req.Name + "_" + string(req.Agg), + }) + } + locationArgs := []model.LocationSignalArgs{ + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationFirst, Alias: "startLoc"}, + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationLast, Alias: "endLoc"}, + } + var batchCounts []*ch.EventCountForRange + var batchAggs []*ch.AggSignalForRange + g, gctx := errgroup.WithContext(ctx) + g.Go(func() error { + var err error + batchCounts, err = r.chService.GetEventCountsForRanges(gctx, subject, ranges, eventNames) + return err + }) + g.Go(func() error { + var err error + batchAggs, err = r.chService.GetAggregatedSignalsForRanges(gctx, uint32(tokenID), ranges, globalFrom, globalTo, floatArgs, locationArgs) + return err + }) + if err := g.Wait(); err != nil { + return nil, handleDBError(ctx, err) + } + eventCountsBySeg = make(map[int]map[string]int, len(chSegments)) + for _, ec := range batchCounts { + if eventCountsBySeg[ec.SegIndex] == nil { + eventCountsBySeg[ec.SegIndex] = make(map[string]int) + } + eventCountsBySeg[ec.SegIndex][ec.Name] = ec.Count + } + aggsBySeg = make(map[int][]*ch.AggSignal, len(chSegments)) + for _, a := range batchAggs { + aggsBySeg[a.SegIndex] = append(aggsBySeg[a.SegIndex], &ch.AggSignal{ + SignalType: a.SignalType, + SignalIndex: a.SignalIndex, + ValueNumber: a.ValueNumber, + ValueString: a.ValueString, + ValueLocation: a.ValueLocation, + }) + } + } + + defaultLoc := &model.Location{Latitude: 0, Longitude: 0, Hdop: 0} segments := make([]*model.Segment, len(chSegments)) - for i, chSegment := range chSegments { + for i, chSeg := range chSegments { + startLoc := defaultLoc + var endPoint *model.SignalLocation + if !chSeg.IsOngoing && chSeg.EndTime != nil { + endPoint = &model.SignalLocation{Timestamp: *chSeg.EndTime, Value: defaultLoc} + } segments[i] = &model.Segment{ - StartTime: chSegment.StartTime, - EndTime: chSegment.EndTime, - DurationSeconds: int(chSegment.DurationSeconds), - IsOngoing: chSegment.IsOngoing, - StartedBeforeRange: chSegment.StartedBeforeRange, + Start: &model.SignalLocation{Timestamp: chSeg.StartTime, Value: startLoc}, + End: endPoint, + Duration: int(chSeg.DurationSeconds), + IsOngoing: chSeg.IsOngoing, + StartedBeforeRange: chSeg.StartedBeforeRange, + } + if wantSummary { + var eventCounts []*ch.EventCount + if eventCountsBySeg != nil { + m := eventCountsBySeg[i] + eventCounts = make([]*ch.EventCount, 0, len(m)) + for name, count := range m { + eventCounts = append(eventCounts, &ch.EventCount{Name: name, Count: count}) + } + } + var preFetchedAggs []*ch.AggSignal + if aggsBySeg != nil { + preFetchedAggs = aggsBySeg[i] + if preFetchedAggs == nil { + preFetchedAggs = []*ch.AggSignal{} + } + } + summary, err := r.segmentSummary(ctx, uint32(tokenID), subject, chSeg, from, signalReqs, eventNames, eventCounts, preFetchedAggs) + if err != nil { + return nil, err + } + segments[i].Signals = summary.Signals + segments[i].EventCounts = summary.EventCounts + if summary.StartLocation != nil { + segments[i].Start.Value = summary.StartLocation + } + if endPoint != nil && summary.EndLocation != nil { + segments[i].End.Value = summary.EndLocation + } } } - return segments, nil } + +type segmentSummaryResult struct { + Signals []*model.SignalAggregationValue + StartLocation *model.Location + EndLocation *model.Location + EventCounts []*model.EventCount +} + +func (r *Repository) segmentSummary(ctx context.Context, tokenID uint32, subject string, chSeg *ch.Segment, queryTo time.Time, signalReqs []*model.SegmentSignalRequest, eventNames []string, preFetchedEventCounts []*ch.EventCount, preFetchedAggs []*ch.AggSignal) (*segmentSummaryResult, error) { + segFrom := chSeg.StartTime + segTo := queryTo + if chSeg.EndTime != nil { + segTo = *chSeg.EndTime + } + intervalMicro := segTo.Sub(segFrom).Microseconds() + if intervalMicro <= 0 { + intervalMicro = 1 + } + + floatArgs := make([]model.FloatSignalArgs, 0, len(signalReqs)) + for _, req := range signalReqs { + floatArgs = append(floatArgs, model.FloatSignalArgs{ + Name: req.Name, + Agg: req.Agg, + Alias: req.Name + "_" + string(req.Agg), + }) + } + locationArgs := []model.LocationSignalArgs{ + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationFirst, Alias: "startLoc"}, + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationLast, Alias: "endLoc"}, + } + var aggs []*ch.AggSignal + if preFetchedAggs != nil { + aggs = preFetchedAggs + } else { + aggArgs := &model.AggregatedSignalArgs{ + SignalArgs: model.SignalArgs{TokenID: tokenID}, + FromTS: segFrom, + ToTS: segTo, + Interval: intervalMicro, + FloatArgs: floatArgs, + LocationArgs: locationArgs, + } + var err error + aggs, err = r.chService.GetAggregatedSignals(ctx, aggArgs) + if err != nil { + return nil, handleDBError(ctx, err) + } + } + + signalSummary := make([]*model.SignalAggregationValue, 0, len(floatArgs)) + var startLoc, endLoc *model.Location + for _, a := range aggs { + if a.SignalType == ch.FloatType && int(a.SignalIndex) < len(floatArgs) { + signalSummary = append(signalSummary, &model.SignalAggregationValue{ + Name: floatArgs[a.SignalIndex].Name, + Agg: string(floatArgs[a.SignalIndex].Agg), + Value: a.ValueNumber, + }) + } + if a.SignalType == ch.LocType { + loc := &model.Location{ + Latitude: a.ValueLocation.Latitude, + Longitude: a.ValueLocation.Longitude, + Hdop: a.ValueLocation.HDOP, + } + if a.SignalIndex == 0 { + startLoc = loc + } else { + endLoc = loc + } + } + } + sortSegmentSignals(signalSummary) + + var eventCountMap map[string]int + if preFetchedEventCounts != nil { + eventCountMap = make(map[string]int, len(preFetchedEventCounts)) + for _, ec := range preFetchedEventCounts { + eventCountMap[ec.Name] = ec.Count + } + } else { + eventCounts, err := r.chService.GetEventCounts(ctx, subject, segFrom, segTo, eventNames) + if err != nil { + return nil, handleDBError(ctx, err) + } + eventCountMap = make(map[string]int, len(eventCounts)) + for _, ec := range eventCounts { + eventCountMap[ec.Name] = ec.Count + } + } + var eventSummary []*model.EventCount + if len(eventNames) > 0 { + eventSummary = make([]*model.EventCount, len(eventNames)) + for i, name := range eventNames { + eventSummary[i] = &model.EventCount{Name: name, Count: eventCountMap[name]} + } + } else { + eventSummary = make([]*model.EventCount, 0, len(eventCountMap)) + for name, count := range eventCountMap { + eventSummary = append(eventSummary, &model.EventCount{Name: name, Count: count}) + } + } + + return &segmentSummaryResult{ + Signals: signalSummary, + StartLocation: startLoc, + EndLocation: endLoc, + EventCounts: eventSummary, + }, nil +} + +// GetDailyActivity returns one record per calendar day in the requested date range (activity segments only). +// mechanism must be ignitionDetection, frequencyAnalysis, or changePointDetection; staticRpm returns 400. +func (r *Repository) GetDailyActivity(ctx context.Context, tokenID int, from, to time.Time, mechanism model.DetectionMechanism, config *model.SegmentConfig, signalRequests []*model.SegmentSignalRequest, eventRequests []*model.SegmentEventRequest, timezone *string) ([]*model.DailyActivity, error) { + if mechanism == model.DetectionMechanismStaticRpm { + return nil, errorhandler.NewBadRequestError(ctx, fmt.Errorf("dailyActivity does not accept mechanism staticRpm; use ignitionDetection, frequencyAnalysis, or changePointDetection")) + } + loc := time.UTC + if timezone != nil && *timezone != "" { + var err error + loc, err = time.LoadLocation(*timezone) + if err != nil { + return nil, errorhandler.NewBadRequestError(ctx, fmt.Errorf("invalid timezone %q: %w", *timezone, err)) + } + } + fromInLoc := from.In(loc) + toInLoc := to.In(loc) + fromDate := time.Date(fromInLoc.Year(), fromInLoc.Month(), fromInLoc.Day(), 0, 0, 0, 0, loc) + toDate := time.Date(toInLoc.Year(), toInLoc.Month(), toInLoc.Day(), 0, 0, 0, 0, loc) + if !fromDate.Before(toDate) { + return nil, errorhandler.NewBadRequestError(ctx, fmt.Errorf("from date must be before to date")) + } + if toDate.After(time.Now().In(loc)) { + return nil, errorhandler.NewBadRequestError(ctx, fmt.Errorf("to date cannot be in the future")) + } + if toDate.Sub(fromDate) > maxDateRangeDays*24*time.Hour { + return nil, errorhandler.NewBadRequestError(ctx, fmt.Errorf("date range exceeds maximum of %d days", maxDateRangeDays)) + } + rangeStart := fromDate + rangeEnd := toDate.Add(24 * time.Hour) + + signalReqs := signalRequests + if len(signalReqs) == 0 { + signalReqs = defaultSegmentSignalSet() + } + var eventNames []string + if len(eventRequests) > 0 { + eventNames = make([]string, len(eventRequests)) + for i, e := range eventRequests { + eventNames[i] = e.Name + } + } + + segments, err := r.GetSegments(ctx, tokenID, rangeStart, rangeEnd, mechanism, config, signalReqs, eventRequests, nil, nil) + if err != nil { + return nil, err + } + subject := cloudevent.ERC721DID{ + ChainID: r.chainID, + ContractAddress: r.vehicleAddress, + TokenID: big.NewInt(int64(tokenID)), + }.String() + + var out []*model.DailyActivity + for d := fromDate; d.Before(toDate); d = d.Add(24 * time.Hour) { + dayStart := d + dayEnd := d.Add(24 * time.Hour) + dayStartUTC := dayStart.UTC() + dayEndUTC := dayEnd.UTC() + + var segmentCount int + var totalActiveSeconds int + var firstSeg, lastSeg *model.Segment + for _, seg := range segments { + segEnd := dayEndUTC + if seg.End != nil && seg.End.Timestamp.Before(dayEndUTC) { + segEnd = seg.End.Timestamp + } + if seg.Start.Timestamp.After(dayEndUTC) || segEnd.Before(dayStartUTC) || !segEnd.After(seg.Start.Timestamp) { + continue + } + segmentCount++ + overlapStart := seg.Start.Timestamp + if overlapStart.Before(dayStartUTC) { + overlapStart = dayStartUTC + } + overlapEnd := segEnd + if overlapEnd.After(dayEndUTC) { + overlapEnd = dayEndUTC + } + totalActiveSeconds += int(overlapEnd.Sub(overlapStart).Seconds()) + if firstSeg == nil { + firstSeg = seg + } + lastSeg = seg + } + + signalSummary, startLoc, endLoc, eventSummary, err := r.daySummary(ctx, uint32(tokenID), subject, dayStartUTC, dayEndUTC, signalReqs, eventNames) + if err != nil { + return nil, err + } + if firstSeg != nil && firstSeg.Start != nil && firstSeg.Start.Value != nil { + startLoc = firstSeg.Start.Value + } + if lastSeg != nil && lastSeg.End != nil && lastSeg.End.Value != nil { + endLoc = lastSeg.End.Value + } + + var startSignalLoc, endSignalLoc *model.SignalLocation + if startLoc != nil { + startSignalLoc = &model.SignalLocation{Timestamp: dayStartUTC, Value: startLoc} + } + if endLoc != nil { + endSignalLoc = &model.SignalLocation{Timestamp: dayEndUTC, Value: endLoc} + } + + out = append(out, &model.DailyActivity{ + Date: dayStartUTC, + SegmentCount: segmentCount, + Duration: totalActiveSeconds, + Start: startSignalLoc, + End: endSignalLoc, + Signals: signalSummary, + EventCounts: eventSummary, + }) + } + return out, nil +} + +func (r *Repository) daySummary(ctx context.Context, tokenID uint32, subject string, dayStart, dayEnd time.Time, signalReqs []*model.SegmentSignalRequest, eventNames []string) ([]*model.SignalAggregationValue, *model.Location, *model.Location, []*model.EventCount, error) { + intervalMicro := dayEnd.Sub(dayStart).Microseconds() + if intervalMicro <= 0 { + intervalMicro = 1 + } + floatArgs := make([]model.FloatSignalArgs, 0, len(signalReqs)) + for _, req := range signalReqs { + floatArgs = append(floatArgs, model.FloatSignalArgs{ + Name: req.Name, + Agg: req.Agg, + Alias: req.Name + "_" + string(req.Agg), + }) + } + locationArgs := []model.LocationSignalArgs{ + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationFirst, Alias: "startLoc"}, + {Name: vss.FieldCurrentLocationCoordinates, Agg: model.LocationAggregationLast, Alias: "endLoc"}, + } + aggArgs := &model.AggregatedSignalArgs{ + SignalArgs: model.SignalArgs{TokenID: tokenID}, + FromTS: dayStart, + ToTS: dayEnd, + Interval: intervalMicro, + FloatArgs: floatArgs, + LocationArgs: locationArgs, + } + aggs, err := r.chService.GetAggregatedSignals(ctx, aggArgs) + if err != nil { + return nil, nil, nil, nil, handleDBError(ctx, err) + } + signalSummary := make([]*model.SignalAggregationValue, 0, len(floatArgs)) + var startLoc, endLoc *model.Location + for _, a := range aggs { + if a.SignalType == ch.FloatType && int(a.SignalIndex) < len(floatArgs) { + signalSummary = append(signalSummary, &model.SignalAggregationValue{ + Name: floatArgs[a.SignalIndex].Name, + Agg: string(floatArgs[a.SignalIndex].Agg), + Value: a.ValueNumber, + }) + } + if a.SignalType == ch.LocType { + loc := &model.Location{ + Latitude: a.ValueLocation.Latitude, + Longitude: a.ValueLocation.Longitude, + Hdop: a.ValueLocation.HDOP, + } + if a.SignalIndex == 0 { + startLoc = loc + } else { + endLoc = loc + } + } + } + sortSegmentSignals(signalSummary) + eventCounts, err := r.chService.GetEventCounts(ctx, subject, dayStart, dayEnd, eventNames) + if err != nil { + return nil, nil, nil, nil, handleDBError(ctx, err) + } + eventCountMap := make(map[string]int) + for _, ec := range eventCounts { + eventCountMap[ec.Name] = ec.Count + } + var eventSummary []*model.EventCount + if len(eventNames) > 0 { + eventSummary = make([]*model.EventCount, len(eventNames)) + for i, name := range eventNames { + eventSummary[i] = &model.EventCount{Name: name, Count: eventCountMap[name]} + } + } else { + eventSummary = make([]*model.EventCount, 0, len(eventCountMap)) + for name, count := range eventCountMap { + eventSummary = append(eventSummary, &model.EventCount{Name: name, Count: count}) + } + } + return signalSummary, startLoc, endLoc, eventSummary, nil +} diff --git a/internal/repositories/validate_test.go b/internal/repositories/validate_test.go index 3853fd4..0782f2d 100644 --- a/internal/repositories/validate_test.go +++ b/internal/repositories/validate_test.go @@ -90,3 +90,52 @@ func TestValidateSegmentArgs(t *testing.T) { require.Error(t, err) }) } + +func TestValidateSegmentConfig(t *testing.T) { + validConfig := &model.SegmentConfig{} + otherMechanism := model.DetectionMechanismIgnitionDetection + idlingMechanism := model.DetectionMechanismStaticRpm + + t.Run("nil config", func(t *testing.T) { + require.NoError(t, validateSegmentConfig(nil, otherMechanism)) + require.NoError(t, validateSegmentConfig(nil, idlingMechanism)) + }) + + t.Run("valid config other mechanism", func(t *testing.T) { + require.NoError(t, validateSegmentConfig(validConfig, otherMechanism)) + }) + + t.Run("valid config staticRpm with idling fields", func(t *testing.T) { + cfg := &model.SegmentConfig{ + MaxIdleRpm: ptr(1000), + SignalCountThreshold: ptr(5), + } + require.NoError(t, validateSegmentConfig(cfg, idlingMechanism)) + }) + + t.Run("staticRpm maxIdleRpm out of range", func(t *testing.T) { + cfg := &model.SegmentConfig{MaxIdleRpm: ptr(100)} + require.Error(t, validateSegmentConfig(cfg, idlingMechanism)) + cfg.MaxIdleRpm = ptr(4000) + require.Error(t, validateSegmentConfig(cfg, idlingMechanism)) + }) +} + +func TestValidateSegmentLimit(t *testing.T) { + t.Run("nil limit", func(t *testing.T) { + require.NoError(t, validateSegmentLimit(nil)) + }) + t.Run("valid limit", func(t *testing.T) { + require.NoError(t, validateSegmentLimit(ptr(1))) + require.NoError(t, validateSegmentLimit(ptr(100))) + require.NoError(t, validateSegmentLimit(ptr(200))) + }) + t.Run("limit too low", func(t *testing.T) { + require.Error(t, validateSegmentLimit(ptr(0))) + }) + t.Run("limit too high", func(t *testing.T) { + require.Error(t, validateSegmentLimit(ptr(201))) + }) +} + +func ptr(i int) *int { return &i } diff --git a/internal/service/ch/ch.go b/internal/service/ch/ch.go index f159c1b..4d64bf7 100644 --- a/internal/service/ch/ch.go +++ b/internal/service/ch/ch.go @@ -12,6 +12,7 @@ import ( "github.com/DIMO-Network/telemetry-api/internal/config" "github.com/DIMO-Network/telemetry-api/internal/graph/model" "github.com/aarondl/sqlboiler/v4/queries/qm" + "github.com/prometheus/client_golang/prometheus" ) const ( @@ -133,6 +134,44 @@ func (s *Service) GetAggregatedSignals(ctx context.Context, aggArgs *model.Aggre return signals, nil } +// GetAggregatedSignalsForRanges returns aggregated signals for multiple time ranges (one per segment) in one query. +// Only FloatArgs and LocationArgs are used; StringArgs and ApproxLocArgs are ignored. +func (s *Service) GetAggregatedSignalsForRanges(ctx context.Context, tokenID uint32, ranges []TimeRange, globalFrom, globalTo time.Time, floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) ([]*AggSignalForRange, error) { + if len(ranges) == 0 { + return nil, nil + } + if len(floatArgs) == 0 && len(locationArgs) == 0 { + return []*AggSignalForRange{}, nil + } + stmt, args, err := getBatchAggQuery(tokenID, ranges, globalFrom, globalTo, floatArgs, locationArgs) + if err != nil { + return nil, err + } + timer := prometheus.NewTimer(GetAggregatedSignalsForRangesLatency) + rows, err := s.conn.Query(ctx, stmt, args...) + timer.ObserveDuration() + if err != nil { + return nil, fmt.Errorf("failed querying clickhouse for batch agg: %w", err) + } + var result []*AggSignalForRange + for rows.Next() { + var segIdx int16 // ClickHouse multiIf returns Int16 for segment indices + var row AggSignalForRange + if err := rows.Scan(&segIdx, &row.SignalType, &row.SignalIndex, &row.ValueNumber, &row.ValueString, &row.ValueLocation); err != nil { + _ = rows.Close() + return nil, fmt.Errorf("failed scanning batch agg row: %w", err) + } + row.SegIndex = int(segIdx) + rowCopy := row + result = append(result, &rowCopy) + } + _ = rows.Close() + if rows.Err() != nil { + return nil, fmt.Errorf("clickhouse batch agg row error: %w", rows.Err()) + } + return result, nil +} + func (s *Service) getSignals(ctx context.Context, stmt string, args []any) ([]*vss.Signal, error) { rows, err := s.conn.Query(ctx, stmt, args...) if err != nil { @@ -155,6 +194,16 @@ func (s *Service) getSignals(ctx context.Context, stmt string, args []any) ([]*v return signals, nil } +// AggSignalForRange is AggSignal with segment index (from GetAggregatedSignalsForRanges). +type AggSignalForRange struct { + SegIndex int + SignalType FieldType + SignalIndex uint16 + ValueNumber float64 + ValueString string + ValueLocation vss.Location +} + type AggSignal struct { // SignalType describes the type of values in the aggregation: // float, string, or approximate location. @@ -254,13 +303,57 @@ func (s *Service) GetSignalSummaries(ctx context.Context, tokenId uint32, filter return signalSummaries, nil } +// EventCount is the count of events by name in a time range. +type EventCount struct { + Name string + Count int +} + +// EventCountForRange is event count by name for one segment index (from GetEventCountsForRanges). +type EventCountForRange struct { + SegIndex int + Name string + Count int +} + +// EventSummary is the per-event summary for a vehicle (all time): name, count, first/last seen. +type EventSummary struct { + Name string + Count uint64 + FirstSeen time.Time + LastSeen time.Time +} + +// GetEventSummaries returns per-event summaries (name, count, first/last seen) for a subject (vehicle), all time. +func (s *Service) GetEventSummaries(ctx context.Context, subject string) ([]*EventSummary, error) { + stmt, args := getEventSummariesQuery(subject) + rows, err := s.conn.Query(ctx, stmt, args...) + if err != nil { + return nil, fmt.Errorf("failed querying clickhouse for event summaries: %w", err) + } + var result []*EventSummary + for rows.Next() { + var es EventSummary + if err := rows.Scan(&es.Name, &es.Count, &es.FirstSeen, &es.LastSeen); err != nil { + _ = rows.Close() + return nil, fmt.Errorf("failed scanning event summary row: %w", err) + } + result = append(result, &es) + } + _ = rows.Close() + if rows.Err() != nil { + return nil, fmt.Errorf("clickhouse event summary row error: %w", rows.Err()) + } + return result, nil +} + func (s *Service) GetEvents(ctx context.Context, subject string, from, to time.Time, filter *model.EventFilter) ([]*vss.Event, error) { mods := []qm.QueryMod{ qm.Select(vss.EventNameCol, vss.EventSourceCol, vss.EventTimestampCol, vss.EventDurationNsCol, vss.EventMetadataCol, vss.EventTagsCol), qm.From(vss.EventTableName), qm.Where(eventSubjectWhere, subject), - qm.Where(timestampFrom, from), - qm.Where(timestampTo, to), + qm.Where(vss.EventTimestampCol+" >= ?", from), + qm.Where(vss.EventTimestampCol+" < ?", to), qm.OrderBy(vss.EventTimestampCol + " DESC"), } mods = appendEventFilterMods(mods, filter) @@ -286,3 +379,59 @@ func (s *Service) GetEvents(ctx context.Context, subject string, from, to time.T } return events, nil } + +// GetEventCounts returns event counts by name in the given time range. +// If eventNames is nil or empty, all event names in the range are returned; otherwise only requested names (missing names get count 0 in the caller). +func (s *Service) GetEventCounts(ctx context.Context, subject string, from, to time.Time, eventNames []string) ([]*EventCount, error) { + stmt, args := getEventCountsQuery(subject, from, to, eventNames) + rows, err := s.conn.Query(ctx, stmt, args...) + if err != nil { + return nil, fmt.Errorf("failed querying clickhouse for event counts: %w", err) + } + var result []*EventCount + for rows.Next() { + var name string + var count uint64 // ClickHouse count(*) is UInt64 + if err := rows.Scan(&name, &count); err != nil { + _ = rows.Close() + return nil, fmt.Errorf("failed scanning event count row: %w", err) + } + result = append(result, &EventCount{Name: name, Count: int(count)}) + } + _ = rows.Close() + if rows.Err() != nil { + return nil, fmt.Errorf("clickhouse event count row error: %w", rows.Err()) + } + return result, nil +} + +// GetEventCountsForRanges returns event counts by name per segment index for multiple time ranges in one query. +// If eventNames is nil or empty, all event names are returned; otherwise only requested names (missing get count 0 at call site). +func (s *Service) GetEventCountsForRanges(ctx context.Context, subject string, ranges []TimeRange, eventNames []string) ([]*EventCountForRange, error) { + if len(ranges) == 0 { + return nil, nil + } + stmt, args := getEventCountsForRangesQuery(subject, ranges, eventNames) + timer := prometheus.NewTimer(GetEventCountsForRangesLatency) + rows, err := s.conn.Query(ctx, stmt, args...) + timer.ObserveDuration() + if err != nil { + return nil, fmt.Errorf("failed querying clickhouse for event counts by range: %w", err) + } + var result []*EventCountForRange + for rows.Next() { + var segIdx int16 // ClickHouse multiIf returns Int16 for small segment indices + var name string + var count uint64 + if err := rows.Scan(&segIdx, &name, &count); err != nil { + _ = rows.Close() + return nil, fmt.Errorf("failed scanning event count by range row: %w", err) + } + result = append(result, &EventCountForRange{SegIndex: int(segIdx), Name: name, Count: int(count)}) + } + _ = rows.Close() + if rows.Err() != nil { + return nil, fmt.Errorf("clickhouse event count by range row error: %w", rows.Err()) + } + return result, nil +} diff --git a/internal/service/ch/metrics.go b/internal/service/ch/metrics.go new file mode 100644 index 0000000..ec98b5c --- /dev/null +++ b/internal/service/ch/metrics.go @@ -0,0 +1,36 @@ +package ch + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + // GetSegmentsLatency measures latency of segment detection by mechanism + GetSegmentsLatency = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "telemetry_ch_get_segments_latency_seconds", + Help: "Latency of GetSegments (segment detection) in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"mechanism"}, + ) + + // GetAggregatedSignalsForRangesLatency measures latency of batch signal aggregation for segment summaries + GetAggregatedSignalsForRangesLatency = promauto.NewHistogram( + prometheus.HistogramOpts{ + Name: "telemetry_ch_get_aggregated_signals_for_ranges_latency_seconds", + Help: "Latency of GetAggregatedSignalsForRanges in seconds", + Buckets: prometheus.DefBuckets, + }, + ) + + // GetEventCountsForRangesLatency measures latency of batch event counts for segment summaries + GetEventCountsForRangesLatency = promauto.NewHistogram( + prometheus.HistogramOpts{ + Name: "telemetry_ch_get_event_counts_for_ranges_latency_seconds", + Help: "Latency of GetEventCountsForRanges in seconds", + Buckets: prometheus.DefBuckets, + }, + ) +) diff --git a/internal/service/ch/queries.go b/internal/service/ch/queries.go index 885dabb..d9c1ca0 100644 --- a/internal/service/ch/queries.go +++ b/internal/service/ch/queries.go @@ -53,17 +53,6 @@ const ( latestTimestamp = "max(" + vss.TimestampCol + ") as ts" ) -// Aggregation functions for float signals. -const ( - avgGroup = "avg(" + vss.ValueNumberCol + ")" - randFloatGroup = "groupArraySample(1, %d)(" + vss.ValueNumberCol + ")[1]" - minGroup = "min(" + vss.ValueNumberCol + ")" - maxGroup = "max(" + vss.ValueNumberCol + ")" - medGroup = "median(" + vss.ValueNumberCol + ")" - firstFloatGroup = "argMin(" + vss.ValueNumberCol + ", " + vss.TimestampCol + ")" - lastFloatGroup = "argMax(" + vss.ValueNumberCol + ", " + vss.TimestampCol + ")" -) - // Aggregation functions for string signals. const ( randStringGroup = "groupArraySample(1, %d)(" + vss.ValueStringCol + ")[1]" @@ -74,10 +63,8 @@ const ( ) const ( - avgLocationGroup = "CAST(tuple(avg(" + vss.ValueLocationCol + ".latitude), avg(" + vss.ValueLocationCol + ".longitude), avg(" + vss.ValueLocationCol + ".hdop)), 'Tuple(latitude Float64, longitude Float64, hdop Float64)')" - randLocationGroup = "groupArraySample(1, %d)(" + vss.ValueLocationCol + ")[1]" - firstLocationGroup = "argMin(" + vss.ValueLocationCol + ", " + vss.TimestampCol + ")" - lastLocationGroup = "argMax(" + vss.ValueLocationCol + ", " + vss.TimestampCol + ")" + locationTupleType = "Tuple(latitude Float64, longitude Float64, hdop Float64)" + locationZeroTuple = "CAST(tuple(0, 0, 0), '" + locationTupleType + "')" ) var SourceTranslations = map[string][]string{ @@ -199,38 +186,18 @@ func selectStringAggs(stringAggs []model.StringSignalArgs) qm.QueryMod { func selectLocationAggs(stringAggs []model.LocationSignalArgs) qm.QueryMod { if len(stringAggs) == 0 { - return qm.Select("CAST(tuple(0, 0, 0), 'Tuple(latitude Float64, longitude Float64, hdop Float64)') AS " + AggLocationCol) + return qm.Select(locationZeroTuple + " AS " + AggLocationCol) } - // Add a CASE statement for each name and its corresponding aggregation function caseStmts := make([]string, 0, len(stringAggs)) for i, agg := range stringAggs { caseStmts = append(caseStmts, fmt.Sprintf("WHEN %s = %d AND %s = %d THEN %s", signalTypeCol, LocType, signalIndexCol, i, getLocationAgg(agg.Agg))) } - caseStmt := fmt.Sprintf("CASE %s ELSE CAST(tuple(0, 0, 0), 'Tuple(latitude Float64, longitude Float64, hdop Float64)') END AS %s", strings.Join(caseStmts, " "), AggLocationCol) + caseStmt := fmt.Sprintf("CASE %s ELSE %s END AS %s", strings.Join(caseStmts, " "), locationZeroTuple, AggLocationCol) return qm.Select(caseStmt) } -// returns a string representation of the aggregation function based on the aggregation type. func getFloatAggFunc(aggType model.FloatAggregation) string { - aggStr := avgGroup - switch aggType { - case model.FloatAggregationAvg: - aggStr = avgGroup - case model.FloatAggregationRand: - seed := time.Now().UnixMilli() - aggStr = fmt.Sprintf(randFloatGroup, seed) - case model.FloatAggregationMin: - aggStr = minGroup - case model.FloatAggregationMax: - aggStr = maxGroup - case model.FloatAggregationMed: - aggStr = medGroup - case model.FloatAggregationFirst: - aggStr = firstFloatGroup - case model.FloatAggregationLast: - aggStr = lastFloatGroup - } - return aggStr + return floatAggExpr(vss.ValueNumberCol, vss.TimestampCol, aggType) } // returns a string representation of the aggregation function based on the aggregation type. @@ -253,18 +220,92 @@ func getStringAgg(aggType model.StringAggregation) string { } func getLocationAgg(aggType model.LocationAggregation) string { - aggLoc := firstLocationGroup + return locationAggExpr(vss.ValueLocationCol, vss.TimestampCol, aggType) +} + +// floatAggExpr returns the aggregation expression for a float agg type using the given column exprs (e.g. value_number and timestamp, optionally qualified). +func floatAggExpr(valueNumberExpr, timestampExpr string, aggType model.FloatAggregation) string { + switch aggType { + case model.FloatAggregationAvg: + return "avg(" + valueNumberExpr + ")" + case model.FloatAggregationRand: + return fmt.Sprintf("groupArraySample(1, %d)("+valueNumberExpr+")[1]", time.Now().UnixMilli()) + case model.FloatAggregationMin: + return "min(" + valueNumberExpr + ")" + case model.FloatAggregationMax: + return "max(" + valueNumberExpr + ")" + case model.FloatAggregationMed: + return "median(" + valueNumberExpr + ")" + case model.FloatAggregationFirst: + return "argMin(" + valueNumberExpr + ", " + timestampExpr + ")" + case model.FloatAggregationLast: + return "argMax(" + valueNumberExpr + ", " + timestampExpr + ")" + default: + return "avg(" + valueNumberExpr + ")" + } +} + +// locationAggExpr returns the aggregation expression for a location agg type using the given column exprs. +func locationAggExpr(valueLocationExpr, timestampExpr string, aggType model.LocationAggregation) string { switch aggType { case model.LocationAggregationAvg: - aggLoc = avgLocationGroup + return "CAST(tuple(avg(" + valueLocationExpr + ".latitude), avg(" + valueLocationExpr + ".longitude), avg(" + valueLocationExpr + ".hdop)), '" + locationTupleType + "')" case model.LocationAggregationRand: - aggLoc = randLocationGroup + return fmt.Sprintf("groupArraySample(1, %d)("+valueLocationExpr+")[1]", time.Now().UnixMilli()) case model.LocationAggregationFirst: - aggLoc = firstLocationGroup + return "argMin(" + valueLocationExpr + ", " + timestampExpr + ")" case model.LocationAggregationLast: - aggLoc = lastLocationGroup + return "argMax(" + valueLocationExpr + ", " + timestampExpr + ")" + default: + return "argMin(" + valueLocationExpr + ", " + timestampExpr + ")" + } +} + +// batchFloatCaseExprQualified is like batchFloatCaseExpr but with qualified column names (alias.value_number, alias.timestamp). +func batchFloatCaseExprQualified(alias string, floatArgs []model.FloatSignalArgs) string { + return batchFloatCaseExprWithAlias(alias, floatArgs) +} + +func batchFloatCaseExprWithAlias(alias string, floatArgs []model.FloatSignalArgs) string { + if len(floatArgs) == 0 { + return "NULL AS " + AggNumberCol + } + typeCol, indexCol := signalTypeCol, signalIndexCol + vNum, ts := vss.ValueNumberCol, vss.TimestampCol + if alias != "" { + typeCol, indexCol = alias+"."+signalTypeCol, alias+"."+signalIndexCol + vNum, ts = alias+"."+vss.ValueNumberCol, alias+"."+vss.TimestampCol + } + parts := make([]string, 0, len(floatArgs)) + for i, agg := range floatArgs { + expr := floatAggExpr(vNum, ts, agg.Agg) + parts = append(parts, fmt.Sprintf("WHEN %s = %d AND %s = %d THEN %s", typeCol, FloatType, indexCol, i, expr)) + } + return fmt.Sprintf("CASE %s ELSE NULL END AS %s", strings.Join(parts, " "), AggNumberCol) +} + +// batchLocationCaseExprQualified is like batchLocationCaseExpr with qualified column names. +func batchLocationCaseExprQualified(alias string, locationArgs []model.LocationSignalArgs) string { + return batchLocationCaseExprWithAlias(alias, locationArgs) +} + +func batchLocationCaseExprWithAlias(alias string, locationArgs []model.LocationSignalArgs) string { + zeroLoc := locationZeroTuple + " AS " + AggLocationCol + if len(locationArgs) == 0 { + return zeroLoc + } + typeCol, indexCol := signalTypeCol, signalIndexCol + vLoc, ts := vss.ValueLocationCol, vss.TimestampCol + if alias != "" { + typeCol, indexCol = alias+"."+signalTypeCol, alias+"."+signalIndexCol + vLoc, ts = alias+"."+vss.ValueLocationCol, alias+"."+vss.TimestampCol + } + parts := make([]string, 0, len(locationArgs)) + for i, agg := range locationArgs { + expr := locationAggExpr(vLoc, ts, agg.Agg) + parts = append(parts, fmt.Sprintf("WHEN %s = %d AND %s = %d THEN %s", typeCol, LocType, indexCol, i, expr)) } - return aggLoc + return fmt.Sprintf("CASE %s ELSE %s END AS %s", strings.Join(parts, " "), locationZeroTuple, AggLocationCol) } // getLatestQuery creates a query to get the latest signal value for each signal names @@ -517,6 +558,53 @@ func getAggQuery(aggArgs *model.AggregatedSignalArgs) (string, []any, error) { return stmt, args, nil } +// getBatchAggQuery returns a query that computes the same aggregations as getAggQuery for multiple +// time ranges (segments) in one round-trip. Only FloatArgs and LocationArgs are supported. +// Result columns: seg_idx (Int32), signal_type, signal_index, value_number, value_string, value_location. +func getBatchAggQuery(tokenID uint32, ranges []TimeRange, globalFrom, globalTo time.Time, floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) (string, []any, error) { + if len(ranges) == 0 { + return "", nil, errors.New("no ranges for batch agg") + } + if len(floatArgs) == 0 && len(locationArgs) == 0 { + return "", nil, errors.New("no aggregations for batch agg") + } + valueTable := buildBatchAggValueTable(floatArgs, locationArgs) + multiIf := buildSegmentIndexMultiIf(vss.TimestampCol, len(ranges)) + args := make([]any, 0, 2*len(ranges)+3) + for _, r := range ranges { + args = append(args, r.From, r.To) + } + args = append(args, tokenID, globalFrom, globalTo) + inner := buildBatchAggInner(valueTable, multiIf) + outer := buildBatchAggOuter(inner, floatArgs, locationArgs) + return outer, args, nil +} + +func buildBatchAggValueTable(floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) string { + valuesArgs := make([]string, 0, len(floatArgs)+len(locationArgs)) + for i, agg := range floatArgs { + valuesArgs = append(valuesArgs, aggTableEntry(FloatType, i, agg.Name)) + } + for i, agg := range locationArgs { + valuesArgs = append(valuesArgs, aggTableEntry(LocType, i, agg.Name)) + } + return fmt.Sprintf("VALUES('%s', %s) as %s ON %s.%s = %s.%s", valueTableDef, strings.Join(valuesArgs, ", "), aggTableName, vss.TableName, vss.NameCol, aggTableName, vss.NameCol) +} + +func buildBatchAggInner(valueTable, multiIf string) string { + selectList := multiIf + ", " + signalTypeCol + ", " + signalIndexCol + ", " + vss.TimestampCol + ", " + vss.ValueNumberCol + ", " + vss.ValueStringCol + ", " + vss.ValueLocationCol + return "SELECT " + selectList + " FROM " + vss.TableName + " INNER JOIN " + valueTable + + " WHERE " + tokenIDWhere + " AND " + vss.TimestampCol + " >= ? AND " + vss.TimestampCol + " < ?" +} + +func buildBatchAggOuter(inner string, floatArgs []model.FloatSignalArgs, locationArgs []model.LocationSignalArgs) string { + const alias = "batch_inner" + selectList := alias + ".seg_idx, " + alias + "." + signalTypeCol + ", " + alias + "." + signalIndexCol + ", " + + batchFloatCaseExprQualified(alias, floatArgs) + ", NULL AS " + AggStringCol + ", " + batchLocationCaseExprQualified(alias, locationArgs) + groupBy := alias + ".seg_idx, " + alias + "." + signalTypeCol + ", " + alias + "." + signalIndexCol + return "SELECT " + selectList + " FROM (" + inner + ") AS " + alias + " WHERE " + alias + ".seg_idx >= 0 GROUP BY " + groupBy +} + func buildFloatConditionList(fil *model.SignalFloatFilter) []qm.QueryMod { if fil == nil { return nil @@ -621,6 +709,19 @@ func aggTableEntry(ft FieldType, index int, name string) string { return fmt.Sprintf("(%d, %d, '%s')", ft, index, name) } +// buildSegmentIndexMultiIf returns "multiIf( (tsCol >= ? AND tsCol < ?), 0, ..., -1) AS seg_idx" for n ranges. +func buildSegmentIndexMultiIf(timestampCol string, nRanges int) string { + if nRanges == 0 { + return "toInt32(-1) AS seg_idx" + } + cond := "(" + timestampCol + " >= ? AND " + timestampCol + " < ?)" + parts := make([]string, 0, nRanges) + for i := 0; i < nRanges; i++ { + parts = append(parts, cond+", "+fmt.Sprintf("%d", i)) + } + return "multiIf(" + strings.Join(parts, ", ") + ", -1) AS seg_idx" +} + func getDistinctQuery(tokenId uint32, filter *model.SignalFilter) (string, []any) { mods := []qm.QueryMod{ qm.Distinct(vss.NameCol), @@ -678,6 +779,86 @@ func appendEventFilterMods(mods []qm.QueryMod, filter *model.EventFilter) []qm.Q return mods } +// getEventSummariesQuery returns a query that summarizes events by name for a subject (all time). +func getEventSummariesQuery(subject string) (string, []any) { + mods := []qm.QueryMod{ + qm.Select(vss.EventNameCol + " AS name"), + qm.Select("count(*) AS count"), + qm.Select("MIN(" + vss.EventTimestampCol + ") AS first_seen"), + qm.Select("MAX(" + vss.EventTimestampCol + ") AS last_seen"), + qm.From(vss.EventTableName), + qm.Where(eventSubjectWhere, subject), + qm.GroupBy(vss.EventNameCol), + qm.OrderBy(vss.EventNameCol), + } + return newQuery(mods...) +} + +// getEventCountsQuery returns a query that counts events by name in the given time range. +// If eventNames is non-nil and non-empty, only those names are included; otherwise all names. +func getEventCountsQuery(subject string, from, to time.Time, eventNames []string) (string, []any) { + mods := []qm.QueryMod{ + qm.Select(vss.EventNameCol + " AS name"), + qm.Select("count(*) AS count"), + qm.From(vss.EventTableName), + qm.Where(eventSubjectWhere, subject), + qm.Where(vss.EventTimestampCol+" >= ?", from), + qm.Where(vss.EventTimestampCol+" < ?", to), + qm.GroupBy(vss.EventNameCol), + } + if len(eventNames) > 0 { + mods = append(mods, qm.WhereIn(vss.EventNameCol+" IN ?", eventNames)) + } + return newQuery(mods...) +} + +// TimeRange is a [From, To) interval for batch event count queries. +type TimeRange struct { + From, To time.Time +} + +// getEventCountsForRangesQuery returns a query that counts events by name per segment index +// for multiple time ranges in one round-trip. Ranges are (from, to) exclusive on to. +// Result columns: seg_idx (Int32), name (String), count (UInt64). +func getEventCountsForRangesQuery(subject string, ranges []TimeRange, eventNames []string) (string, []any) { + if len(ranges) == 0 { + return eventCountsForRangesEmptyQuery(), nil + } + multiIf := buildSegmentIndexMultiIf(vss.EventTimestampCol, len(ranges)) + args := buildEventCountsForRangesArgs(ranges, subject, eventNames) + innerSelect := buildEventCountsForRangesInner(multiIf, eventNames) + stmt := "SELECT seg_idx, name, count(*) AS count FROM (" + innerSelect + ") WHERE seg_idx >= 0 GROUP BY seg_idx, name" + return stmt, args +} + +func eventCountsForRangesEmptyQuery() string { + return "SELECT toInt32(-1) AS seg_idx, '' AS name, toUInt64(0) AS count FROM " + vss.EventTableName + " WHERE 0" +} + +func buildEventCountsForRangesArgs(ranges []TimeRange, subject string, eventNames []string) []any { + args := make([]any, 0, 2*len(ranges)+1+len(eventNames)) + for _, r := range ranges { + args = append(args, r.From, r.To) + } + args = append(args, subject) + for _, n := range eventNames { + args = append(args, n) + } + return args +} + +func buildEventCountsForRangesInner(multiIf string, eventNames []string) string { + inner := "SELECT " + multiIf + ", " + vss.EventNameCol + " AS name FROM " + vss.EventTableName + " PREWHERE " + eventSubjectWhere + if len(eventNames) > 0 { + placeholders := make([]string, len(eventNames)) + for i := range eventNames { + placeholders[i] = "?" + } + inner += " AND " + vss.EventNameCol + " IN (" + strings.Join(placeholders, ", ") + ")" + } + return inner +} + func stringFilterMod(field string, filter *model.StringValueFilter) []qm.QueryMod { var newMods []qm.QueryMod if filter == nil { diff --git a/internal/service/ch/segments.go b/internal/service/ch/segments.go index 1b60124..6c3439b 100644 --- a/internal/service/ch/segments.go +++ b/internal/service/ch/segments.go @@ -6,6 +6,7 @@ import ( "time" "github.com/DIMO-Network/telemetry-api/internal/graph/model" + "github.com/prometheus/client_golang/prometheus" ) const ( @@ -35,12 +36,16 @@ func (s *Service) GetSegments( detector = &FrequencyDetector{conn: s.conn} case model.DetectionMechanismChangePointDetection: detector = &ChangePointDetector{conn: s.conn} + case model.DetectionMechanismStaticRpm: + detector = &StaticRpmDetector{conn: s.conn} default: return nil, fmt.Errorf("unknown detection mechanism: %s", mechanism) } // Detect segments using mechanism-specific logic + timer := prometheus.NewTimer(GetSegmentsLatency.WithLabelValues(mechanism.String())) segments, err := detector.DetectSegments(ctx, tokenID, from, to, config) + timer.ObserveDuration() if err != nil { return nil, err } diff --git a/internal/service/ch/static_rpm_detector.go b/internal/service/ch/static_rpm_detector.go new file mode 100644 index 0000000..0a15fc3 --- /dev/null +++ b/internal/service/ch/static_rpm_detector.go @@ -0,0 +1,132 @@ +package ch + +import ( + "context" + "fmt" + "time" + + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/DIMO-Network/telemetry-api/internal/graph/model" +) + +const ( + defaultIdleWindowSizeSeconds = 60 // 1 minute windows (same as frequency detector) + defaultMaxIdleRpm = 1000 + engineSpeedSignalName = "powertrainCombustionEngineSpeed" // fixed; not configurable + defaultSignalCountThresholdIdle = 2 // powertrainCombustionEngineSpeed ~2/min; min samples per 1min window + defaultMinIdleRpmForEngineRunning = 0 // min_rpm > this to exclude engine-off +) + +// StaticRpmDetector detects segments where engine RPM remains in idle range (static/low RPM). +// Uses repeated windows of idle RPM merged like trips. +type StaticRpmDetector struct { + conn clickhouse.Conn +} + +// NewStaticRpmDetector creates a new StaticRpmDetector with the given connection. +func NewStaticRpmDetector(conn clickhouse.Conn) *StaticRpmDetector { + return &StaticRpmDetector{conn: conn} +} + +// DetectSegments implements idle-RPM-based segment detection. +func (d *StaticRpmDetector) DetectSegments( + ctx context.Context, + tokenID uint32, + from, to time.Time, + config *model.SegmentConfig, +) ([]*Segment, error) { + maxGap := defaultMinIdleSeconds + minDuration := defaultMinSegmentDurationSeconds + maxIdleRpm := defaultMaxIdleRpm + signalThreshold := defaultSignalCountThresholdIdle + + if config != nil { + if config.MinIdleSeconds != nil { + maxGap = *config.MinIdleSeconds + } + if config.MinSegmentDurationSeconds != nil { + minDuration = *config.MinSegmentDurationSeconds + } + if config.MaxIdleRpm != nil { + maxIdleRpm = *config.MaxIdleRpm + } + if config.SignalCountThreshold != nil { + signalThreshold = *config.SignalCountThreshold + } + } + + windowSize := defaultIdleWindowSizeSeconds + lookbackFrom := from.Add(-time.Duration(maxGap) * time.Second) + windows, err := d.getIdleWindows(ctx, tokenID, lookbackFrom, to, windowSize, maxIdleRpm, signalThreshold) + if err != nil { + return nil, fmt.Errorf("failed to get idle windows: %w", err) + } + + if len(windows) == 0 { + return nil, nil + } + + segments := mergeWindowsIntoSegments(tokenID, windows, from, to, maxGap, minDuration) + return segments, nil +} + +// getIdleWindows returns time windows where engine speed is in idle band (0 < rpm <= maxIdleRpm). +// Uses signal FINAL; groups by window and keeps windows with sample_count >= signalThreshold and max(rpm) <= maxIdleRpm and min(rpm) > 0. +func (d *StaticRpmDetector) getIdleWindows( + ctx context.Context, + tokenID uint32, + from, to time.Time, + windowSizeSeconds int, + maxIdleRpm int, + signalThreshold int, +) ([]ActiveWindow, error) { + query := ` +SELECT + toStartOfInterval(timestamp, INTERVAL ? second) AS window_start, + toStartOfInterval(timestamp, INTERVAL ? second) + INTERVAL ? second AS window_end, + count() AS signal_count, + uniq(name) AS distinct_signal_count +FROM signal FINAL +PREWHERE token_id = ? +WHERE name = ? + AND timestamp >= ? + AND timestamp < ? +GROUP BY window_start +HAVING signal_count >= ? AND max(value_number) <= ? AND min(value_number) > ? +ORDER BY window_start` + + rows, err := d.conn.Query(ctx, query, + windowSizeSeconds, windowSizeSeconds, windowSizeSeconds, + tokenID, engineSpeedSignalName, from, to, + signalThreshold, maxIdleRpm, defaultMinIdleRpmForEngineRunning) + if err != nil { + return nil, fmt.Errorf("failed querying idle windows: %w", err) + } + defer func() { _ = rows.Close() }() + + expectedWindows := int(to.Sub(from).Seconds()) / windowSizeSeconds + if expectedWindows <= 0 { + expectedWindows = 1 + } + windows := make([]ActiveWindow, 0, expectedWindows) + + for rows.Next() { + var w ActiveWindow + err := rows.Scan(&w.WindowStart, &w.WindowEnd, &w.SignalCount, &w.DistinctSignalCount) + if err != nil { + return nil, fmt.Errorf("failed scanning idle window row: %w", err) + } + windows = append(windows, w) + } + + if rows.Err() != nil { + return nil, fmt.Errorf("idle window row error: %w", rows.Err()) + } + + return windows, nil +} + +// GetMechanismName returns the name of this detection mechanism. +func (d *StaticRpmDetector) GetMechanismName() string { + return "staticRpm" +} diff --git a/internal/service/ch/static_rpm_detector_test.go b/internal/service/ch/static_rpm_detector_test.go new file mode 100644 index 0000000..9bb7230 --- /dev/null +++ b/internal/service/ch/static_rpm_detector_test.go @@ -0,0 +1,91 @@ +package ch + +import ( + "testing" + "time" + + "github.com/DIMO-Network/telemetry-api/internal/graph/model" + "github.com/stretchr/testify/require" +) + +func TestStaticRpmDetector_GetMechanismName(t *testing.T) { + d := &StaticRpmDetector{} + require.Equal(t, "staticRpm", d.GetMechanismName()) +} + +func TestStaticRpmDetector_DetectSegments_ConfigDefaults(t *testing.T) { + // Config: static RPM uses SignalCountThreshold (same as frequency), maxIdleRpm; engine speed signal name is fixed. + _ = model.DetectionMechanismStaticRpm + maxRpm := 900 + threshold := 5 + config := &model.SegmentConfig{ + MaxIdleRpm: &maxRpm, + SignalCountThreshold: &threshold, + MinIdleSeconds: ptr(600), + MinSegmentDurationSeconds: ptr(300), + } + require.NotNil(t, config) + require.Equal(t, 900, *config.MaxIdleRpm) + require.Equal(t, 5, *config.SignalCountThreshold) +} + +func TestStaticRpmDetector_IdleWindowsMergeIntoOneSegment(t *testing.T) { + // Static RPM detector uses mergeWindowsIntoSegments with idle windows; merge logic is shared with frequency detector. + // Verify that a run of consecutive idle windows produces one segment when gap and duration are satisfied. + now := time.Now() + from := now.Add(-30 * time.Minute) + to := now.Add(-5 * time.Minute) + tokenID := uint32(1) + maxGap := 300 + minDuration := 60 + + // Consecutive 1-minute idle windows; last window ends before query 'to' so segment is completed (not ongoing) + endWindows := to.Add(-10 * time.Minute) // last window ends 10 min before query end + windows := make([]ActiveWindow, 0, 15) + for s := from; s.Before(endWindows); s = s.Add(time.Minute) { + windows = append(windows, ActiveWindow{ + WindowStart: s, + WindowEnd: s.Add(time.Minute), + SignalCount: 5, + DistinctSignalCount: 1, + }) + } + segments := mergeWindowsIntoSegments(tokenID, windows, from, to, maxGap, minDuration) + require.Len(t, segments, 1) + require.False(t, segments[0].IsOngoing) + require.NotNil(t, segments[0].EndTime) + require.True(t, segments[0].DurationSeconds >= int32(minDuration)) +} + +func TestStaticRpmDetector_TwoIdleBlocksProduceTwoSegments(t *testing.T) { + now := time.Now() + from := now.Add(-2 * time.Hour) + to := now.Add(-10 * time.Minute) + tokenID := uint32(1) + maxGap := 300 // 5 min + minDuration := 120 // 2 min + + // Block 1: 2 hours ago, 3 minutes of idle windows + block1Start := from + block1End := from.Add(3 * time.Minute) + // Block 2: 30 min ago, 3 minutes of idle windows (gap between block1 and block2 > maxGap) + block2Start := to.Add(-30 * time.Minute) + block2End := block2Start.Add(3 * time.Minute) + + windows := []ActiveWindow{ + {WindowStart: block1Start, WindowEnd: block1Start.Add(time.Minute), SignalCount: 5, DistinctSignalCount: 1}, + {WindowStart: block1Start.Add(time.Minute), WindowEnd: block1End, SignalCount: 5, DistinctSignalCount: 1}, + {WindowStart: block1End, WindowEnd: block1End.Add(time.Minute), SignalCount: 5, DistinctSignalCount: 1}, + {WindowStart: block2Start, WindowEnd: block2Start.Add(time.Minute), SignalCount: 5, DistinctSignalCount: 1}, + {WindowStart: block2Start.Add(time.Minute), WindowEnd: block2End, SignalCount: 5, DistinctSignalCount: 1}, + {WindowStart: block2End, WindowEnd: block2End.Add(time.Minute), SignalCount: 5, DistinctSignalCount: 1}, + } + segments := mergeWindowsIntoSegments(tokenID, windows, from, to, maxGap, minDuration) + require.Len(t, segments, 2) + require.False(t, segments[0].IsOngoing) + require.False(t, segments[1].IsOngoing) + require.True(t, segments[0].StartTime.Equal(block1Start)) + require.True(t, segments[1].StartTime.Equal(block2Start)) +} + +func ptr(i int) *int { return &i } diff --git a/schema/attestation.graphqls b/schema/attestation.graphqls index 86d37fe..a2267e6 100644 --- a/schema/attestation.graphqls +++ b/schema/attestation.graphqls @@ -111,6 +111,11 @@ input AttestationFilter { """ limit: Int + """ + Cursor for pagination (exclusive). + """ + cursor: Time + """ Filter attestations by tags. """ diff --git a/schema/base.graphqls b/schema/base.graphqls index feb894f..17ca61b 100644 --- a/schema/base.graphqls +++ b/schema/base.graphqls @@ -145,6 +145,30 @@ type DataSummary { data summary of an individual signal """ signalDataSummary: [signalDataSummary!]! + + """ + Events known to the vehicle: per-event name, count, and first/last seen. + """ + eventDataSummary: [eventDataSummary!]! +} + +type eventDataSummary { + """ + Event name + """ + name: String! + """ + Number of times this event occurred for the vehicle + """ + numberOfEvents: Uint64! + """ + First seen timestamp + """ + firstSeen: Time! + """ + Last seen timestamp + """ + lastSeen: Time! } type signalDataSummary { @@ -236,7 +260,7 @@ type SignalLocation { timestamp: Time! """ - value of the signal + location (latitude, longitude, hdop) at this timestamp. """ value: Location! } @@ -271,6 +295,24 @@ type Location { hdop: Float! } +""" +Result of aggregating a float signal over an interval. Used by segments and daily activity summaries. +Same shape as one row of aggregated signal data (name, aggregation type, computed value). +""" +type SignalAggregationValue { + name: String! + agg: String! + value: Float! +} + +""" +Event name and count. Used by segments, daily activity, and event summaries. +""" +type EventCount { + name: String! + count: Int! +} + """ Filters that apply to locations. """ diff --git a/schema/segments.graphqls b/schema/segments.graphqls index d76f968..fa41854 100644 --- a/schema/segments.graphqls +++ b/schema/segments.graphqls @@ -19,6 +19,12 @@ enum DetectionMechanism { Best alternative when ignition signal is unavailable - same accuracy, same speed as frequency analysis. """ changePointDetection + + """ + Static RPM: Segments are contiguous periods where engine RPM remains in idle range. + Uses repeated windows of idle RPM (e.g. powertrainCombustionEngineSpeed <= maxIdleRpm) merged like trips. + """ + staticRpm } extend type Query { @@ -29,10 +35,14 @@ extend type Query { Detection mechanisms: - ignitionDetection: Uses 'isIgnitionOn' signal with configurable debouncing - frequencyAnalysis: Analyzes signal update frequency to detect activity periods - - sparseSampling: Samples 5-10% of signals for cost-effective detection + - changePointDetection: CUSUM-based regime change detection + - staticRpm: Idle RPM windows merged into segments Segment IDs are stable and consistent across queries as long as the segment start is captured in the underlying data source. + + When signalRequests and/or eventRequests are provided, each segment includes + optional signals, start/end points (end only when not ongoing), and eventCounts. """ segments( tokenId: Int! @@ -40,7 +50,60 @@ extend type Query { to: Time! mechanism: DetectionMechanism! config: SegmentConfig - ): [Segment!] @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) + signalRequests: [SegmentSignalRequest!] + eventRequests: [SegmentEventRequest!] + """ + Maximum number of segments to return. Default 100, max 200. + """ + limit: Int = 100 + """ + Cursor for pagination: return only segments with startTime > after (exclusive). + Pass the startTime of the last segment from the previous page for the next page. + """ + after: Time + ): [Segment!]! @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) + + """ + Returns one record per calendar day in the requested date range (activity segments only). + Mechanism must be ignitionDetection, frequencyAnalysis, or changePointDetection (staticRpm not allowed). + Maximum date range: 30 days. + """ + dailyActivity( + tokenId: Int! + from: Time! + to: Time! + mechanism: DetectionMechanism! + config: SegmentConfig + signalRequests: [SegmentSignalRequest!] + eventRequests: [SegmentEventRequest!] + timezone: String + ): [DailyActivity!]! @requiresVehicleToken @requiresAllOfPrivileges(privileges: [VEHICLE_ALL_TIME_LOCATION, VEHICLE_NON_LOCATION_DATA]) +} + +input SegmentSignalRequest { + name: String! + agg: FloatAggregation! +} + +input SegmentEventRequest { + name: String! +} + +type DailyActivity { + """Start of that calendar day (midnight in requested timezone), as UTC.""" + date: Time! + """Number of activity segments that started or fell within that day.""" + segmentCount: Int! + """Sum of segment durations (total active time that day) in seconds.""" + duration: Int! + """Start of day (timestamp = day start, value = location). Same shape as Segment.start. Null if not available.""" + start: SignalLocation + """End of day (timestamp = day end, location). Same shape as Segment.end. Null if not available.""" + end: SignalLocation + """Per-day signal aggregates (same shape as segment signals).""" + signals: [SignalAggregationValue!]! + """Per-day event counts.""" + eventCounts: [EventCount!]! } input SegmentConfig { @@ -60,43 +123,54 @@ input SegmentConfig { minSegmentDurationSeconds: Int = 240 """ - [frequencyAnalysis only] Minimum signal count per window for activity detection. - Higher values = more conservative (filters parked telemetry better). - Lower values = more sensitive (works for sparse signal vehicles). - Default: 10 (tuned to match ignition detection accuracy) - Min: 1, Max: 3600 + [frequencyAnalysis] Minimum signal count per window for activity detection. + [staticRpm] Minimum samples per window to consider it idle (same semantics). + Higher values = more conservative. Lower values = more sensitive. + Default: 10, Min: 1, Max: 3600 """ signalCountThreshold: Int = 10 + + """ + [staticRpm only] Upper bound for idle RPM. Windows with max(RPM) <= this are considered idle. + Default: 1500, Min: 300, Max: 3000 + """ + maxIdleRpm: Int = 1500 } type Segment { """ - Segment start timestamp (actual activity start transition) + Segment start (timestamp and location). Uses SignalLocation; always present. """ - startTime: Time! + start: SignalLocation! """ - Segment end timestamp (activity end after debounce period). - Null if segment is ongoing (extends beyond query range). + Segment end (timestamp and location). Uses SignalLocation; omitted when isOngoing is true. """ - endTime: Time + end: SignalLocation """ - Duration in seconds. - If ongoing: from start to query 'to' time. - If complete: from start to end. + Duration in seconds. If ongoing: from start to query 'to'. If complete: from start to end. """ - durationSeconds: Int! + duration: Int! """ True if segment extends beyond query time range (last activity is ongoing). + When true, end is not included in the response. """ isOngoing: Boolean! """ True if segment started before query time range. - Indicates startTime may be approximate. """ startedBeforeRange: Boolean! + + """ + Per-segment signal aggregates. Same shape as signals elsewhere (name, agg, value). + """ + signals: [SignalAggregationValue!] + """ + Per-segment event counts. + """ + eventCounts: [EventCount!] }