open-telemetry / opentelemetry-collector-contrib

Contrib repository for the OpenTelemetry Collector
https://opentelemetry.io
Apache License 2.0
3.02k stars 2.33k forks source link

[azuremonitorexporter] ai.cloud.role and ai.cloud.roleinstance are not populated #187

Closed youngbupark closed 4 years ago

youngbupark commented 4 years ago

Collector and SDK version

Problem

azuremonitorexporter does not populate servicename to cloudRole and cloudRoleInstance properties into the envelope, so that application map shows incorrect service name.

The exporter code is supposed to populate the service name, but app insights telemetry doesn't include cloudRole field.

Application map

Test code(See below) uses Jaeger exporter and configure "trace-demo" service name. The expected service name on the bottom of the circle is "trace-demo", but it shows "dapr-dev-insights".

image

Dependency Telemetry

timestamp [UTC] | 2020-04-24T13:38:22.411464Z
  | id | 71c8d87ea48d50b5
  | name | bar
  | success | True
  | resultCode | 0
  | duration | 0.004
  | performanceBucket | <250ms
  | itemType | request
  | customDimensions | {"float":"312.23","span.kind":"server","exporter":"jaeger"}
  | operation_Name | bar
  | operation_Id | 07a742e8cd5e7afa33d8bb34c2c59f9b
  | operation_ParentId | dfc8071f1b93436b
  | client_Type | PC
  | client_Model | Other
  | client_OS | Other
  | client_IP | 0.0.0.0
  | client_City | xxx
  | client_StateOrProvince | xx
  | client_CountryOrRegion | United States
  | client_Browser | Go-http-client 1.1
  | appId | b61b23aa-7a2e-4182-9431-8689af7bd8d5
  | appName | dapr-dev-insight
  | iKey | b723ef3d-a015-4e6e-84bf-e898d528f677
  | itemId | ddd5f422-8630-11ea-bc9c-936b910cbc1c
  | itemCount | 1

Configurations

Opentelemetry configuration

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: otel-collector-conf
  labels:
    app: opentelemetry
    component: otel-collector-conf
data:
  otel-collector-config: |
    receivers:
      jaeger:
        protocols:
          thrift_http:
            endpoint: "0.0.0.0:14268"
    processors:
      queued_retry:
      batch:
    extensions:
      health_check:
      pprof:
        endpoint: :1888
      zpages:
        endpoint: :55679
    exporters:
      azuremonitor:
      azuremonitor/2:
        endpoint: "https://dc.services.visualstudio.com/v2/track"
        instrumentation_key: "ikey"
        # maxbatchsize is the maximum number of items that can be queued before calling to the configured endpoint
        maxbatchsize: 100
        # maxbatchinterval is the maximum time to wait before calling the configured endpoint.
        maxbatchinterval: 10s
    service:
      extensions: [pprof, zpages, health_check]
      pipelines:
        traces:
          receivers: [jaeger]
          exporters: [azuremonitor/2]
          processors: [batch, queued_retry]
---
apiVersion: v1
kind: Service
metadata:
  name: otel-collector
  labels:
    app: opencesus
    component: otel-collector
spec:
  ports:
  - name: otel # Default endpoint for Opencensus receiver.
    port: 14268
    protocol: TCP
    targetPort: 14268
  selector:
    component: otel-collector
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: otel-collector
  labels:
    app: opentelemetry
    component: otel-collector
spec:
  replicas: 1  # scale out based on your usage
  selector:
    matchLabels:
      app: opentelemetry
  template:
    metadata:
      labels:
        app: opentelemetry
        component: otel-collector
    spec:
      containers:
      - name: otel-collector
        image: otel/opentelemetry-collector-contrib:0.3.0
        command:
          - "/otelcontribcol"
          - "--config=/conf/otel-collector-config.yaml"
        resources:
          limits:
            cpu: 1
            memory: 2Gi
          requests:
            cpu: 200m
            memory: 400Mi
        ports:
          - containerPort: 14268 # Default endpoint for Opencensus receiver.
        volumeMounts:
          - name: otel-collector-config-vol
            mountPath: /conf
          #- name: otel-collector-secrets
          #  mountPath: /secrets
        livenessProbe:
          httpGet:
            path: /
            port: 13133
        readinessProbe:
          httpGet:
            path: /
            port: 13133
      volumes:
        - configMap:
            name: otel-collector-conf
            items:
              - key: otel-collector-config
                path: otel-collector-config.yaml
          name: otel-collector-config-vol
#       - secret:
#            name: otel-collector-secrets
#            items:
#              - key: cert.pem
#                path: cert.pem
#              - key: key.pem
#                path: key.pem

Test Code

package main

import (
    "context"
    "log"

    "go.opentelemetry.io/otel/api/core"
    "go.opentelemetry.io/otel/api/global"
    "go.opentelemetry.io/otel/api/key"
    "go.opentelemetry.io/otel/api/trace"

    "go.opentelemetry.io/otel/exporters/trace/jaeger"
    sdktrace "go.opentelemetry.io/otel/sdk/trace"
)

// initTracer creates a new trace provider instance and registers it as global trace provider.
func initTracer() func() {
    // Create and install Jaeger export pipeline
    _, flush, err := jaeger.NewExportPipeline(
        jaeger.WithCollectorEndpoint("http://localhost:14268/api/traces"),
        jaeger.WithProcess(jaeger.Process{
            ServiceName: "trace-demo",
            Tags: []core.KeyValue{
                key.String("exporter", "jaeger"),
                key.Float64("float", 312.23),
            },
        }),
        jaeger.RegisterAsGlobal(),
        jaeger.WithSDK(&sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
    )
    if err != nil {
        log.Fatal(err)
    }

    return func() {
        flush()
    }
}

func main() {
    fn := initTracer()
    defer fn()

    ctx := context.Background()

    tr := global.Tracer("component-main")
    ctx, span := tr.Start(ctx, "foo", trace.WithSpanKind(trace.SpanKindClient))
    bar(ctx)
    span.End()
}

func bar(ctx context.Context) {
    tr := global.Tracer("component-bar")
    _, span := tr.Start(ctx, "bar", trace.WithSpanKind(trace.SpanKindServer))
    defer span.End()

    // Do bar...
}
youngbupark commented 4 years ago

I tried to use opentelemetry-go otlp exporter + opentelemetry collector. The result is same.

package main

import (
    "context"
    "log"
    "time"

    "go.opentelemetry.io/otel/api/core"
    "go.opentelemetry.io/otel/api/correlation"
    "go.opentelemetry.io/otel/api/global"
    "go.opentelemetry.io/otel/api/key"
    "go.opentelemetry.io/otel/api/metric"
    "go.opentelemetry.io/otel/api/trace"
    metricstdout "go.opentelemetry.io/otel/exporters/metric/stdout"
    "go.opentelemetry.io/otel/exporters/otlp"
    "go.opentelemetry.io/otel/sdk/metric/controller/push"
    "go.opentelemetry.io/otel/sdk/resource/resourcekeys"
    sdktrace "go.opentelemetry.io/otel/sdk/trace"
)

var (
    fooKey     = key.New("ex.com/foo")
    barKey     = key.New("ex.com/bar")
    lemonsKey  = key.New("ex.com/lemons")
    anotherKey = key.New("ex.com/another")
)

// initTracer creates and registers trace provider instance.
func initTracer() {
    var err error
    exp, err := otlp.NewExporter(otlp.WithInsecure(), otlp.WithAddress("localhost:9090"))
    if err != nil {
        log.Panicf("failed to initialize trace stdout exporter %v", err)
        return
    }
    tp, err := sdktrace.NewProvider(
        sdktrace.WithSyncer(exp),
        sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
        sdktrace.WithResourceAttributes(key.String(resourcekeys.ServiceKeyName, "trace-demo"), key.String(resourcekeys.ServiceKeyInstanceID, "trace-demo")))
    if err != nil {
        log.Panicf("failed to initialize trace provider %v", err)
    }
    global.SetTraceProvider(tp)
}

func initMeter() *push.Controller {
    pusher, err := metricstdout.InstallNewPipeline(metricstdout.Config{
        Quantiles:   []float64{0.5, 0.9, 0.99},
        PrettyPrint: false,
    })
    if err != nil {
        log.Panicf("failed to initialize metric stdout exporter %v", err)
    }
    return pusher
}

func main() {
    defer initMeter().Stop()
    initTracer()

    tracer := global.Tracer("ex.com/basic")
    meter := global.Meter("ex.com/basic")

    commonLabels := []core.KeyValue{lemonsKey.Int(10), key.String("A", "1"), key.String("B", "2"), key.String("C", "3")}

    oneMetricCB := func(result metric.Float64ObserverResult) {
        result.Observe(1, commonLabels...)
    }
    _ = metric.Must(meter).RegisterFloat64Observer("ex.com.one", oneMetricCB,
        metric.WithDescription("An observer set to 1.0"),
    )

    measureTwo := metric.Must(meter).NewFloat64Measure("ex.com.two")

    ctx := context.Background()

    ctx = correlation.NewContext(ctx,
        fooKey.String("foo1"),
        barKey.String("bar1"),
    )

    measure := measureTwo.Bind(commonLabels...)
    defer measure.Unbind()

    err := tracer.WithSpan(ctx, "operation", func(ctx context.Context) error {

        trace.SpanFromContext(ctx).AddEvent(ctx, "Nice operation!", key.New("bogons").Int(100))

        trace.SpanFromContext(ctx).SetAttributes(anotherKey.String("yes"))

        meter.RecordBatch(
            // Note: call-site variables added as context Entries:
            correlation.NewContext(ctx, anotherKey.String("xyz")),
            commonLabels,

            measureTwo.Measurement(2.0),
        )

        return tracer.WithSpan(
            ctx,
            "Sub operation...",
            func(ctx context.Context) error {
                trace.SpanFromContext(ctx).SetAttributes(lemonsKey.String("five"))

                trace.SpanFromContext(ctx).AddEvent(ctx, "Sub span event")

                measure.Record(ctx, 1.3)

                return nil
            },
        )
    })
    if err != nil {
        panic(err)
    }

    time.Sleep(time.Minute * 3)
}
bogdandrutu commented 4 years ago

This issue seems to be completed.