Merge pull request #315 from meysamhadeli/feat/add-full-observibility-with-otel-collector

feat/add full observibility with otel collector
This commit is contained in:
Meysam Hadeli 2025-02-14 02:09:53 +03:30 committed by GitHub
commit abe4860a1c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
59 changed files with 14141 additions and 1538 deletions

View File

@ -0,0 +1,8 @@
# Dashboards
- [Introducing ASP.NET Core metrics and Grafana dashboards in .NET 8](https://devblogs.microsoft.com/dotnet/introducing-aspnetcore-metrics-and-grafana-dashboards-in-dotnet-8/)
- [ASP.NET Core](https://grafana.com/grafana/dashboards/19924-asp-net-core/)
- [ASP.NET Core Endpoint](https://grafana.com/grafana/dashboards/19925-asp-net-core-endpoint/)
- [Node Exporter Quickstart and Dashboard](https://grafana.com/grafana/dashboards/13978-node-exporter-quickstart-and-dashboard/)
- [PostgreSQL Exporter Quickstart and Dashboard](https://grafana.com/grafana/dashboards/14114-postgres-overview/)
- [RabbitMQ-Overview](https://grafana.com/grafana/dashboards/10991-rabbitmq-overview/)

View File

@ -0,0 +1,892 @@
{
"__inputs": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.4.3"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"annotations": {
"list": []
},
"editable": false,
"gnetId": 13978,
"graphTooltip": 0,
"hideControls": false,
"id": null,
"links": [],
"refresh": "",
"rows": [
{
"collapse": false,
"collapsed": false,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
"id": 2,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "(\n (1 - rate(node_cpu_seconds_total{job=\"node\", mode=\"idle\", instance=\"$instance\"}[$__interval]))\n/ ignoring(cpu) group_left\n count without (cpu)( node_cpu_seconds_total{job=\"node\", mode=\"idle\", instance=\"$instance\"})\n)\n",
"format": "time_series",
"interval": "1m",
"intervalFactor": 5,
"legendFormat": "{{cpu}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "CPU Usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": 1,
"min": 0,
"show": true
},
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": 1,
"min": 0,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {},
"id": 3,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "node_load1{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "1m load average",
"refId": "A"
},
{
"expr": "node_load5{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "5m load average",
"refId": "B"
},
{
"expr": "node_load15{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "15m load average",
"refId": "C"
},
{
"expr": "count(node_cpu_seconds_total{job=\"node\", instance=\"$instance\", mode=\"idle\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "logical cores",
"refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Load Average",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
}
]
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6",
"type": "row"
},
{
"collapse": false,
"collapsed": false,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
"id": 4,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"span": 9,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "(\n node_memory_MemTotal_bytes{job=\"node\", instance=\"$instance\"}\n-\n node_memory_MemFree_bytes{job=\"node\", instance=\"$instance\"}\n-\n node_memory_Buffers_bytes{job=\"node\", instance=\"$instance\"}\n-\n node_memory_Cached_bytes{job=\"node\", instance=\"$instance\"}\n)\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory used",
"refId": "A"
},
{
"expr": "node_memory_Buffers_bytes{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory buffers",
"refId": "B"
},
{
"expr": "node_memory_Cached_bytes{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory cached",
"refId": "C"
},
{
"expr": "node_memory_MemFree_bytes{job=\"node\", instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory free",
"refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Memory Usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
}
]
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "$datasource",
"format": "percent",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": true,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {},
"id": 5,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"span": 3,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"tableColumn": "",
"targets": [
{
"expr": "100 -\n(\n avg(node_memory_MemAvailable_bytes{job=\"node\", instance=\"$instance\"})\n/\n avg(node_memory_MemTotal_bytes{job=\"node\", instance=\"$instance\"})\n* 100\n)\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "80, 90",
"title": "Memory Usage",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6",
"type": "row"
},
{
"collapse": false,
"collapsed": false,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {},
"id": 6,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [
{
"alias": "/ read| written/",
"yaxis": 1
},
{
"alias": "/ io time/",
"yaxis": 2
}
],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "rate(node_disk_read_bytes_total{job=\"node\", instance=\"$instance\", device!=\"\"}[$__interval])",
"format": "time_series",
"interval": "1m",
"intervalFactor": 2,
"legendFormat": "{{device}} read",
"refId": "A"
},
{
"expr": "rate(node_disk_written_bytes_total{job=\"node\", instance=\"$instance\", device!=\"\"}[$__interval])",
"format": "time_series",
"interval": "1m",
"intervalFactor": 2,
"legendFormat": "{{device}} written",
"refId": "B"
},
{
"expr": "rate(node_disk_io_time_seconds_total{job=\"node\", instance=\"$instance\", device!=\"\"}[$__interval])",
"format": "time_series",
"interval": "1m",
"intervalFactor": 2,
"legendFormat": "{{device}} io time",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Disk I/O",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {},
"id": 7,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [
{
"alias": "used",
"color": "#E0B400"
},
{
"alias": "available",
"color": "#73BF69"
}
],
"spaceLength": 10,
"span": 6,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "sum(\n max by (device) (\n node_filesystem_size_bytes{job=\"node\", instance=\"$instance\", fstype!=\"\"}\n -\n node_filesystem_avail_bytes{job=\"node\", instance=\"$instance\", fstype!=\"\"}\n )\n)\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "used",
"refId": "A"
},
{
"expr": "sum(\n max by (device) (\n node_filesystem_avail_bytes{job=\"node\", instance=\"$instance\", fstype!=\"\"}\n )\n)\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "available",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Disk Space Usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
}
]
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6",
"type": "row"
},
{
"collapse": false,
"collapsed": false,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {},
"id": 8,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "rate(node_network_receive_bytes_total{job=\"node\", instance=\"$instance\", device!=\"lo\"}[$__interval])",
"format": "time_series",
"interval": "1m",
"intervalFactor": 2,
"legendFormat": "{{device}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Network Received",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {},
"id": 9,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"sideWidth": null,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "rate(node_network_transmit_bytes_total{job=\"node\", instance=\"$instance\", device!=\"lo\"}[$__interval])",
"format": "time_series",
"interval": "1m",
"intervalFactor": 2,
"legendFormat": "{{device}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Network Transmitted",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": 0,
"show": true
}
]
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": false,
"title": "Dashboard Row",
"titleSize": "h6",
"type": "row"
}
],
"schemaVersion": 14,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"current": {
"text": "Prometheus",
"value": "Prometheus"
},
"hide": 0,
"label": null,
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"allValue": null,
"current": {},
"datasource": "$datasource",
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "instance",
"options": [],
"query": "label_values(node_exporter_build_info{job=\"node\"}, instance)",
"refresh": 2,
"regex": "",
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "browser",
"title": "Node Exporter Quickstart and Dashboard",
"version": 0,
"description": "A quickstart to setup Prometheus Node Exporter with preconfigured dashboards, alerting rules, and recording rules."
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
# https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
apiVersion: 1
providers:
- name: "default"
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
allowUiUpdates: true
updateIntervalSeconds: 5 # how often Grafana will scan for changed dashboards
options:
path: /var/lib/grafana/dashboards # path to dashboards on disk

View File

@ -0,0 +1,88 @@
# https://grafana.com/docs/grafana/latest/administration/provisioning/
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared/grafana-datasources.yaml
# https://github.com/grafana/intro-to-mltp/blob/main/grafana/provisioning/datasources/datasources.yaml
apiVersion: 1
datasources:
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared/grafana-datasources.yaml
- name: Prometheus
type: prometheus
typeName: Prometheus
uid: prometheus-uid
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: true
readOnly: false
user: ''
database: ''
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Jaeger
type: jaeger
access: proxy
url: http://jaeger-all-in-one:16686
editable: false
uid: jaeger-uid
- name: Zipkin
type: zipkin
access: proxy
url: http://zipkin-all-in-one:9411
editable: false
uid: zipkin-uid
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared/grafana-datasources.yaml
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: false
version: 1
editable: false
apiVersion: 1
uid: tempo-uid
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus-uid
streamingEnabled:
search: true
#https://github.com/grafana/intro-to-mltp/blob/main/grafana/provisioning/datasources/datasources.yaml
- name: Loki
type: loki
access: proxy
uid: loki-uid
url: http://loki:3100
user: ''
database: ''
readOnly: false
jsonData:
derivedFields:
- datasourceUid: tempo-uid
matcherRegex: "^.*?traceI[d|D]=(\\w+).*$"
name: traceId
url: '$${__value.raw}'
- name: Kibana
type: elasticsearch
url: http://elasticsearch:9200
access: proxy
isDefault: false
uid: kibana-uid
jsonData:
esVersion: 7
timeField: "@timestamp"
maxConcurrentShardRequests: 256
interval: Daily
logMessageField: "message" # Optional: Field for log messages
logLevelField: "level" # Optional: Field for log levels
editable: true

View File

@ -0,0 +1,44 @@
# https://grafana.com/docs/loki/latest/configure/examples/configuration-examples/
# https://github.com/grafana/loki/issues/2018#issuecomment-970789233
# https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/
# https://github.com/grafana/loki/blob/main/examples/getting-started/loki-config.yaml
# https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml
# https://grafana.com/docs/loki/latest/configure/examples/configuration-examples/#1-local-configuration-exampleyaml
---
# https://grafana.com/docs/loki/latest/configure/examples/configuration-examples/#1-local-configuration-exampleyaml
auth_enabled: false
# This is a complete configuration to deploy Loki backed by the filesystem.
# The index will be shipped to the storage via tsdb-shipper.
server:
http_listen_port: 3100
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /tmp/loki
schema_config:
configs:
- from: 2020-05-15
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
storage_config:
filesystem:
directory: /tmp/loki/chunks
# https://grafana.com/docs/loki/latest/send-data/otel/
# https://grafana.com/docs/loki/latest/send-data/otel/#changing-the-default-mapping-of-otlp-to-loki-format
limits_config:
# this attribute should be `true` when we use `otlphttp/loki`, but if we want to use `loki component` from `opentelemetry-collector-contrib` it should be false.
allow_structured_metadata: true

View File

@ -0,0 +1,131 @@
# ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/examples/demo/otel-collector-config.yaml
# https://opentelemetry.io/docs/collector/configuration/
# https://opentelemetry.io/docs/collector/architecture/
# https://betterstack.com/community/guides/observability/opentelemetry-collector/
# https://signoz.io/blog/opentelemetry-collector-complete-guide/
# This configuration sets up an OpenTelemetry Collector that receives trace data via the OTLP protocol over HTTP on port 4318, applies batch processing, and then exports the processed traces
# to exporter components like `Jaeger` endpoint located at `jaeger-all-in-one:4317`. It also includes a health_check extension for monitoring the collector's status.
# Receivers in the OpenTelemetry Collector are components that collect telemetry data (traces, metrics, and logs) from various sources, such as instrumented applications or agents.
# They act as entry points, converting incoming data into OpenTelemetry's internal format for processing and export.
# https://betterstack.com/community/guides/observability/opentelemetry-collector/#exploring-the-opentelemetry-collector-components
# https://opentelemetry.io/docs/collector/architecture/#receivers
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md
# https://opentelemetry.io/docs/collector/configuration/#receivers
receivers:
# supported receivers
# https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver
# instead of specifying details explicitly we can just use `otlp` and it uses both grpc and http
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
# prometheus:
# config:
# scrape_configs:
# - job_name: 'node-exporter'
# scrape_interval: 10s
# static_configs:
# - targets: [ 'node-exporter:9100' ]
# Processors in the OpenTelemetry Collector modify and enhance telemetry data by filtering, transforming, enriching, or batching it to prepare it for export.
# https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor
processors:
batch: # Batches logs for better performance
# - Exporters in the OpenTelemetry Collector send processed telemetry data to backend systems like observability platforms, databases, or cloud services for storage, visualization, and analysis.
# - The `key` follows the `type/name` format, where `type` specifies the exporter `type` (e.g., otlp, kafka, prometheus), and `name` (optional) can be appended to provide a unique name for multiple instance of the same type
# https://betterstack.com/community/guides/observability/opentelemetry-collector/#exploring-the-opentelemetry-collector-components
# https://opentelemetry.io/docs/collector/architecture/#exporters
# https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter
# https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter
# https://opentelemetry.io/docs/collector/configuration/#exporters
exporters:
# valid values: [prometheusremotewrite zipkin otlphttp file kafka prometheus debug nop otlp opencensus]
# Prometheus exporter metrics
prometheus:
endpoint: "0.0.0.0:8889"
prometheusremotewrite:
endpoint: "http://prometheus:9090/api/v1/write"
# https://grafana.com/docs/loki/latest/send-data/otel/
# https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/lokiexporter/README.md
otlphttp/loki:
endpoint: "http://loki:3100/otlp"
tls:
insecure: true
# # we can also use `loki component` from `opentelemetry-collector-contrib` if we don't want to use builtin `otlphttp` exporter type and `http://loki:3100/otlp` loki endpoint
# loki:
# endpoint: "http://loki:3100/loki/api/v1/push"
# tls:
# insecure: true
debug:
# https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter
# using `elasticsearch` from `opentelemetry-collector-contrib` components because it doesn't exist in `opentelemetry-collector`
elasticsearch:
endpoint: "http://elasticsearch:9200"
zipkin:
endpoint: "http://zipkin-all-in-one:9411/api/v2/spans"
format: proto
# export collected telemetry traces to jaeger OTLP grpc port, we can send data to other available endpoints and ports on jaeger as well
otlp/jaeger:
endpoint: "http://jaeger-all-in-one:4317"
tls:
insecure: true
otlp/tempo:
endpoint: "http://tempo:4317"
tls:
insecure: true
# seq-otlp:
# endpoint: "http://seq:5341/ingest/otlp"
# https://opentelemetry.io/docs/collector/configuration/#extensions
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md
extensions:
pprof:
endpoint: 0.0.0.0:1888
zpages:
endpoint: 0.0.0.0:55679
health_check:
endpoint: 0.0.0.0:13133
# - The service section is used to configure what components are enabled in the Collector based on the configuration found in the receivers, processors, exporters, and extensions sections.
# - If a component is configured, but not defined within the service section, then its not enabled.
# https://betterstack.com/community/guides/observability/opentelemetry-collector/#exploring-the-opentelemetry-collector-components
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md
# https://opentelemetry.io/docs/collector/architecture/
# https://opentelemetry.io/docs/collector/configuration/#service
service:
# The `service.extensions` subsection determines which of the configured extensions will be enabled
extensions: [pprof, zpages, health_check]
# The `service.pipeline` Each pipeline starts with one or more receivers collecting data, which is then processed sequentially by processors (applying transformations, filtering, or sampling).
# The processed data is finally sent to all configured exporters, ensuring each receives a copy. Components must be pre-configured in their respective sections before being used in a pipeline.
# pipeline activate predefined components, defined components are disabled by default
pipelines:
traces:
receivers: [otlp]
processors: [batch]
# https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#exporter-selection
exporters: [debug, zipkin, otlp/jaeger, otlp/tempo]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [debug, prometheusremotewrite, prometheus]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlphttp/loki, elasticsearch]

View File

@ -0,0 +1,48 @@
# ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/examples/demo/prometheus.yaml
# https://prometheus.io/docs/introduction/first_steps/
# https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
global:
scrape_interval: 5s
scrape_configs:
# when we use otel-collector we should comment other jobs in prometheus config, and we read configs from `otel-collector-config`
- job_name: "otel-collector"
scrape_interval: 10s
static_configs:
# otel-collector Prometheus exporter metrics
- targets: [ 'otel-collector:8889' ]
- targets: [ 'otel-collector:8888' ]
- job_name: "prometheus"
static_configs:
- targets: ["prometheus:9090"]
# # https://prometheus.io/docs/guides/node-exporter/
# # https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
# - job_name: "node-exporter"
# static_configs:
# - targets: [ 'node-exporter:9100' ]
# # if we don't use otel collector we should uncomment this
# # scrap application metrics
# # http://localhost:4000/metrics by AddPrometheusExporter()
# - job_name: vertical-slice-template-api
# scrape_interval: 10s
# metrics_path: /metrics
# static_configs:
# - targets: ['host.docker.internal:4000']
#
# # if we don't use otel collector we should uncomment this
# # scrap application health metrics
# # http://localhost:4000/health/metrics by AddPrometheusExporter()
# - job_name: vertical-slice-template-api-healthchecks
# scrape_interval: 10s
# metrics_path: /health/metrics
# static_configs:
# - targets: ['host.docker.internal:4000']
## https://github.com/grafana/tempo/blob/main/example/docker-compose/shared/prometheus.yaml
# - job_name: 'tempo'
# static_configs:
# - targets: [ 'tempo:3200' ]

View File

@ -0,0 +1,49 @@
# https://grafana.com/docs/tempo/latest/configuration/
# https://github.com/grafana/tempo/blob/main/example/docker-compose/local/tempo.yaml
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared/tempo.yaml
stream_over_http_enabled: true
server:
http_listen_port: 3200
log_level: info
distributor:
receivers:
otlp:
protocols:
grpc:
endpoint: "tempo:4317"
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /var/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
traces_storage:
path: /var/tempo/generator/traces
storage:
trace:
backend: local # backend configuration to use
wal:
path: /var/tempo/wal # where to store the wal locally
local:
path: /var/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator
generate_native_histograms: both

View File

@ -0,0 +1,362 @@
# ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/examples/demo/docker-compose.yaml
# ref: https://github.com/joaofbantunes/DotNetMicroservicesObservabilitySample/blob/main/docker-compose.yml
# ref: https://github.com/oskardudycz/EventSourcing.NetCore/blob/main/docker-compose.yml
# https://github.com/grafana/intro-to-mltp
# https://stackoverflow.com/questions/65272764/ports-are-not-available-listen-tcp-0-0-0-0-50070-bind-an-attempt-was-made-to
name: booking-microservices
services:
#######################################################
# rabbitmq
#######################################################
rabbitmq:
image: rabbitmq:management
container_name: rabbitmq
restart: unless-stopped
ports:
- "5672:5672"
- "15672:15672"
# volumes:
# - rabbitmq:/var/lib/rabbitmq
networks:
- infrastructure
#######################################################
# postgres
#######################################################
postgres:
image: postgres:latest
container_name: postgres
restart: unless-stopped
ports:
- '5432:5432'
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
command:
- "postgres"
- "-c"
- "wal_level=logical"
- "-c"
- "max_prepared_transactions=10"
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- infrastructure
#######################################################
# EventStoreDB
#######################################################
eventstore:
container_name: eventstore
image: eventstore/eventstore:latest
restart: unless-stopped
environment:
- EVENTSTORE_CLUSTER_SIZE=1
- EVENTSTORE_RUN_PROJECTIONS=All
- EVENTSTORE_START_STANDARD_PROJECTIONS=True
- EVENTSTORE_HTTP_PORT=2113
- EVENTSTORE_INSECURE=True
- EVENTSTORE_ENABLE_ATOM_PUB_OVER_HTTP=True
ports:
- "2113:2113"
networks:
- infrastructure
#######################################################
# Mongo
#######################################################
mongo:
image: mongo:latest
container_name: mongo
restart: unless-stopped
# environment:
# - MONGO_INITDB_ROOT_USERNAME=root
# - MONGO_INITDB_ROOT_PASSWORD=secret
ports:
- 27017:27017
networks:
- infrastructure
#######################################################
# Redis
#######################################################
redis:
image: redis
container_name: redis
restart: unless-stopped
ports:
- 6379:6379
networks:
- infrastructure
#######################################################
# jaeger
# https://www.jaegertracing.io/docs/1.64/deployment/
# https://www.jaegertracing.io/docs/1.6/deployment/
#######################################################
jaeger-all-in-one:
image: jaegertracing/all-in-one:latest
container_name: jaeger-all-in-one
restart: unless-stopped
ports:
- "6831:6831/udp" # UDP port for Jaeger agent
- "16686:16686" # endpoints and Jaeger UI
- "14268:14268" # HTTP port for accept trace spans directly from clients
- "14317:4317" # OTLP gRPC receiver for jaeger
- "14318:4318" # OTLP http receiver for jaeger
# - "9411" # Accepts Zipkin spans - /api/v2/spans
networks:
- infrastructure
#######################################################
# zipkin
# https://zipkin.io/pages/quickstart
#######################################################
zipkin-all-in-one:
image: openzipkin/zipkin:latest
container_name: zipkin-all-in-one
restart: unless-stopped
ports:
- "9411:9411"
networks:
- infrastructure
#######################################################
# otel-collector
# https://opentelemetry.io/docs/collector/installation/
# https://github.com/open-telemetry/opentelemetry-collector
# https://github.com/open-telemetry/opentelemetry-collector-contrib
# we can use none contrib docker `otel/opentelemetry-collector` version from `https://github.com/open-telemetry/opentelemetry-collector` repository but,
# if we need more components like `elasticsearch` we should use `otel/opentelemetry-collector-contrib` image of `https://github.com/open-telemetry/opentelemetry-collector-contrib` repository.
#######################################################
otel-collector:
image: otel/opentelemetry-collector-contrib:latest
container_name: otel-collector
restart: unless-stopped
command: ["--config=/etc/otelcol-contrib/config.yaml"]
volumes:
- ./../configs/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
ports:
- "11888:1888" # pprof extension
- "8888:8888" # Prometheus metrics exposed by the Collector
- "8889:8889" # Prometheus exporter metrics
- "13133:13133" # health_check extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP http receiver
- "55679:55679" # zpages extension
networks:
- infrastructure
#######################################################
# prometheus
# https://prometheus.io/docs/introduction/first_steps/
# https://prometheus.io/docs/prometheus/3.1/installation/
# https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
#######################################################
prometheus:
image: prom/prometheus:latest
restart: unless-stopped
ports:
- "9090:9090"
volumes:
- ./../configs/prometheus.yaml:/etc/prometheus/prometheus.yml
# to passe one flag, such as "--log.level=debug" or "--web.enable-remote-write-receiver", we need to override the whole command, as we can't just pass one extra argument
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
- "--web.console.templates=/usr/share/prometheus/consoles"
# need this for the OpenTelemetry collector to be able to put metrics into Prometheus
- "--web.enable-remote-write-receiver"
# - "--log.level=debug"
networks:
- infrastructure
#######################################################
# node-exporter
# https://prometheus.io/docs/guides/node-exporter/
# https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
#######################################################
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
ports:
- "9101:9100"
networks:
- infrastructure
#######################################################
# grafana
# https://grafana.com/docs/grafana/latest/administration/provisioning/
# https://grafana.com/docs/grafana/latest/setup-grafana/installation/docker/
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-docker/
# https://github.com/grafana/intro-to-mltp/blob/main/grafana/provisioning/datasources/datasources.yaml
#######################################################
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
environment:
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
# - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
# - GF_AUTH_DISABLE_LOGIN_FORM=true
depends_on:
- prometheus
ports:
- "3000:3000"
volumes:
- ./../configs/grafana/provisioning:/etc/grafana/provisioning
- ./../configs/grafana/dashboards:/var/lib/grafana/dashboards
## https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/
# - ./../configs/grafana/grafana.ini:/etc/grafana/grafana.ini
networks:
- infrastructure
#######################################################
# tempo
# https://github.com/grafana/tempo/blob/main/example/docker-compose/otel-collector/docker-compose.yaml
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared
# https://github.com/grafana/tempo/blob/main/example/docker-compose/local
# https://github.com/grafana/tempo/tree/main/example/docker-compose
#######################################################
tempo:
image: grafana/tempo:latest
container_name: tempo
restart: unless-stopped
command: [ "-config.file=/etc/tempo.yaml" ]
volumes:
- ./../configs/tempo.yaml:/etc/tempo.yaml
ports:
- "3200" # tempo
- "24317:4317" # otlp grpc
- "24318:4318" # otlp http
networks:
- infrastructure
#######################################################
# loki
# https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/
# https://github.com/grafana/loki/blob/main/production/docker-compose.yaml
# https://github.com/grafana/loki/blob/main/examples/getting-started/docker-compose.yaml
#######################################################
loki:
image: grafana/loki:latest
hostname: loki
container_name: loki
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- ./../configs/loki-config.yaml:/etc/loki/local-config.yaml
networks:
- infrastructure
#######################################################
# elasticsearch
# https://www.elastic.co/guide/en/elasticsearch/reference/7.17/docker.html#docker-compose-file
#######################################################
elasticsearch:
container_name: elasticsearch
restart: unless-stopped
image: docker.elastic.co/elasticsearch/elasticsearch:8.17.0
environment:
- discovery.type=single-node
- cluster.name=docker-cluster
- node.name=docker-node
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=false
- network.host=0.0.0.0
- http.port=9200
- transport.host=localhost
- bootstrap.memory_lock=true
- cluster.routing.allocation.disk.threshold_enabled=false
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elastic-data:/usr/share/elasticsearch/data
ports:
- ${ELASTIC_HOST_PORT:-9200}:${ELASTIC_PORT:-9200}
- 9300:9300
networks:
- infrastructure
#######################################################
# kibana
# https://www.elastic.co/guide/en/kibana/current/docker.html
#######################################################
kibana:
image: docker.elastic.co/kibana/kibana:8.17.0
container_name: kibana
restart: unless-stopped
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- ${KIBANA_HOST_PORT:-5601}:${KIBANA_PORT:-5601}
depends_on:
- elasticsearch
networks:
- infrastructure
#######################################################
# cadvisor
#######################################################
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
restart: unless-stopped
ports:
- "8080:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
devices:
- /dev/kmsg
networks:
- infrastructure
networks:
infrastructure:
name: infrastructure
driver: bridge
volumes:
elastic-data:
postgres-data:

View File

@ -1,79 +1,49 @@
version: "3.3"
name: booking-microservices
services:
#######################################################
# Postgres
######################################################
postgres:
image: postgres:latest
container_name: postgres
restart: unless-stopped
ports:
- '5432:5432'
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
command:
- "postgres"
- "-c"
- "wal_level=logical"
- "-c"
- "max_prepared_transactions=10"
networks:
- booking
#######################################################
# SqlServer
#######################################################
# sql:
# container_name: sql
# image: mcr.microsoft.com/mssql/server
# restart: unless-stopped
# ports:
# - "1433:1433"
# environment:
# SA_PASSWORD: "Password@1234"
# ACCEPT_EULA: "Y"
#######################################################
# Rabbitmq
#######################################################
#######################################################
# rabbitmq
#######################################################
rabbitmq:
container_name: rabbitmq
image: rabbitmq:management
container_name: rabbitmq
restart: unless-stopped
ports:
- 5672:5672
- 15672:15672
- "5672:5672"
- "15672:15672"
# volumes:
# - rabbitmq:/var/lib/rabbitmq
networks:
- booking
#######################################################
# postgres
#######################################################
postgres:
image: postgres:latest
container_name: postgres
restart: unless-stopped
ports:
- '5432:5432'
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
command:
- "postgres"
- "-c"
- "wal_level=logical"
- "-c"
- "max_prepared_transactions=10"
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- booking
#######################################################
# Jaeger
#######################################################
jaeger:
image: jaegertracing/all-in-one
container_name: jaeger
restart: unless-stopped
networks:
- booking
ports:
- 5775:5775/udp
- 5778:5778
- 6831:6831/udp
- 6832:6832/udp
- 9411:9411
- 14268:14268
- 16686:16686
#######################################################
# EventStoreDB
#######################################################
#######################################################
# EventStoreDB
#######################################################
eventstore:
container_name: eventstore
image: eventstore/eventstore:latest
@ -88,12 +58,12 @@ services:
ports:
- "2113:2113"
networks:
- booking
- booking
#######################################################
# Mongo
#######################################################
#######################################################
# Mongo
#######################################################
mongo:
image: mongo:latest
container_name: mongo
@ -101,93 +71,260 @@ services:
# environment:
# - MONGO_INITDB_ROOT_USERNAME=root
# - MONGO_INITDB_ROOT_PASSWORD=secret
networks:
- booking
ports:
- 27017:27017
networks:
- booking
#######################################################
# Elastic Search
#######################################################
elasticsearch:
container_name: elasticsearch
image: elasticsearch:7.17.9
#######################################################
# Redis
#######################################################
redis:
image: redis
container_name: redis
restart: unless-stopped
ports:
- 9200:9200
- 6379:6379
networks:
- booking
#######################################################
# jaeger
# https://www.jaegertracing.io/docs/1.64/deployment/
# https://www.jaegertracing.io/docs/1.6/deployment/
#######################################################
jaeger-all-in-one:
image: jaegertracing/all-in-one:latest
container_name: jaeger-all-in-one
restart: unless-stopped
ports:
- "6831:6831/udp" # UDP port for Jaeger agent
- "16686:16686" # endpoints and Jaeger UI
- "14268:14268" # HTTP port for accept trace spans directly from clients
- "14317:4317" # OTLP gRPC receiver for jaeger
- "14318:4318" # OTLP http receiver for jaeger
# - "9411" # Accepts Zipkin spans - /api/v2/spans
networks:
- booking
#######################################################
# zipkin
# https://zipkin.io/pages/quickstart
#######################################################
zipkin-all-in-one:
image: openzipkin/zipkin:latest
container_name: zipkin-all-in-one
restart: unless-stopped
ports:
- "9411:9411"
networks:
- booking
#######################################################
# otel-collector
# https://opentelemetry.io/docs/collector/installation/
# https://github.com/open-telemetry/opentelemetry-collector
# https://github.com/open-telemetry/opentelemetry-collector-contrib
# we can use none contrib docker `otel/opentelemetry-collector` version from `https://github.com/open-telemetry/opentelemetry-collector` repository but,
# if we need more components like `elasticsearch` we should use `otel/opentelemetry-collector-contrib` image of `https://github.com/open-telemetry/opentelemetry-collector-contrib` repository.
#######################################################
otel-collector:
image: otel/opentelemetry-collector-contrib:latest
container_name: otel-collector
restart: unless-stopped
command: ["--config=/etc/otelcol-contrib/config.yaml"]
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
environment:
- xpack.monitoring.enabled=true
- xpack.watcher.enabled=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- discovery.type=single-node
networks:
- booking
#######################################################
# Kibana
#######################################################
kibana:
container_name: kibana
image: kibana:7.17.9
restart: unless-stopped
- ./../configs/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
ports:
- 5601:5601
depends_on:
- elasticsearch
environment:
- ELASTICSEARCH_URL=elasticsearch:9200
- "11888:1888" # pprof extension
- "8888:8888" # Prometheus metrics exposed by the Collector
- "8889:8889" # Prometheus exporter metrics
- "13133:13133" # health_check extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP http receiver
- "55679:55679" # zpages extension
networks:
- booking
#######################################################
# prometheus
#######################################################
#######################################################
# prometheus
# https://prometheus.io/docs/introduction/first_steps/
# https://prometheus.io/docs/prometheus/3.1/installation/
# https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
#######################################################
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
ports:
- "9090:9090"
environment:
- TZ=UTC
volumes:
- ./monitoring/prom/prometheus.yml:/etc/prometheus/prometheus.yml
networks:
- booking
#######################################################
# grafana
#######################################################
grafana:
image: grafana/grafana
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- ./monitoring/grafana-data/data:/var/lib/grafana
networks:
- booking
#######################################################
# node_exporter
#######################################################
node_exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
restart: unless-stopped
- ./../configs/prometheus.yaml:/etc/prometheus/prometheus.yml
# to passe one flag, such as "--log.level=debug" or "--web.enable-remote-write-receiver", we need to override the whole command, as we can't just pass one extra argument
command:
- '--path.rootfs=/host'
pid: host
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
- "--web.console.templates=/usr/share/prometheus/consoles"
# need this for the OpenTelemetry collector to be able to put metrics into Prometheus
- "--web.enable-remote-write-receiver"
# - "--log.level=debug"
networks:
- booking
#######################################################
# node-exporter
# https://prometheus.io/docs/guides/node-exporter/
# https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/
#######################################################
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
ports:
- "9101:9100"
networks:
- booking
#######################################################
# grafana
# https://grafana.com/docs/grafana/latest/administration/provisioning/
# https://grafana.com/docs/grafana/latest/setup-grafana/installation/docker/
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-docker/
# https://github.com/grafana/intro-to-mltp/blob/main/grafana/provisioning/datasources/datasources.yaml
#######################################################
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
environment:
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
# - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
# - GF_AUTH_DISABLE_LOGIN_FORM=true
depends_on:
- prometheus
ports:
- "3000:3000"
volumes:
- ./../configs/grafana/provisioning:/etc/grafana/provisioning
- ./../configs/grafana/dashboards:/var/lib/grafana/dashboards
## https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/
# - ./../configs/grafana/grafana.ini:/etc/grafana/grafana.ini
networks:
- booking
#######################################################
# tempo
# https://github.com/grafana/tempo/blob/main/example/docker-compose/otel-collector/docker-compose.yaml
# https://github.com/grafana/tempo/blob/main/example/docker-compose/shared
# https://github.com/grafana/tempo/blob/main/example/docker-compose/local
# https://github.com/grafana/tempo/tree/main/example/docker-compose
#######################################################
tempo:
image: grafana/tempo:latest
container_name: tempo
restart: unless-stopped
command: [ "-config.file=/etc/tempo.yaml" ]
volumes:
- ./../configs/tempo.yaml:/etc/tempo.yaml
ports:
- "3200" # tempo
- "24317:4317" # otlp grpc
- "24318:4318" # otlp http
networks:
- booking
#######################################################
# loki
# https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/
# https://github.com/grafana/loki/blob/main/production/docker-compose.yaml
# https://github.com/grafana/loki/blob/main/examples/getting-started/docker-compose.yaml
#######################################################
loki:
image: grafana/loki:latest
hostname: loki
container_name: loki
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- ./../configs/loki-config.yaml:/etc/loki/local-config.yaml
networks:
- booking
#######################################################
# elasticsearch
# https://www.elastic.co/guide/en/elasticsearch/reference/7.17/docker.html#docker-compose-file
#######################################################
elasticsearch:
container_name: elasticsearch
restart: unless-stopped
image: docker.elastic.co/elasticsearch/elasticsearch:8.17.0
environment:
- discovery.type=single-node
- cluster.name=docker-cluster
- node.name=docker-node
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=false
- network.host=0.0.0.0
- http.port=9200
- transport.host=localhost
- bootstrap.memory_lock=true
- cluster.routing.allocation.disk.threshold_enabled=false
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elastic-data:/usr/share/elasticsearch/data
ports:
- ${ELASTIC_HOST_PORT:-9200}:${ELASTIC_PORT:-9200}
- 9300:9300
networks:
- booking
#######################################################
# kibana
# https://www.elastic.co/guide/en/kibana/current/docker.html
#######################################################
kibana:
image: docker.elastic.co/kibana/kibana:8.17.0
container_name: kibana
restart: unless-stopped
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- ${KIBANA_HOST_PORT:-5601}:${KIBANA_PORT:-5601}
depends_on:
- elasticsearch
networks:
- booking
#######################################################
# cadvisor
#######################################################
@ -208,6 +345,7 @@ services:
networks:
- booking
######################################################
# Gateway
######################################################
@ -344,9 +482,11 @@ services:
networks:
booking:
name: booking
driver: bridge
volumes:
elasticsearch-data:
elastic-data:
postgres-data:

View File

@ -1,234 +0,0 @@
version: "3.3"
services:
#######################################################
# Rabbitmq
#######################################################
rabbitmq:
container_name: rabbitmq
image: rabbitmq:management
restart: unless-stopped
ports:
- 5672:5672
- 15672:15672
networks:
- booking
#######################################################
# Postgres
######################################################
postgres:
image: postgres:latest
container_name: postgres
restart: unless-stopped
ports:
- '5432:5432'
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
command:
- "postgres"
- "-c"
- "wal_level=logical"
- "-c"
- "max_prepared_transactions=10"
networks:
- booking
#######################################################
# SqlServer
#######################################################
# sql:
# container_name: sql
# image: mcr.microsoft.com/mssql/server
# restart: unless-stopped
# ports:
# - "1433:1433"
# environment:
# SA_PASSWORD: "Password@1234"
# ACCEPT_EULA: "Y"
#######################################################
# Jaeger
#######################################################
jaeger:
container_name: jaeger
image: jaegertracing/all-in-one
restart: unless-stopped
networks:
- booking
ports:
- 5775:5775/udp
- 5778:5778
- 6831:6831/udp
- 6832:6832/udp
- 9411:9411
- 14268:14268
- 16686:16686
# #######################################################
# # EventStoreDB
# #######################################################
# #https://stackoverflow.com/questions/65272764/ports-are-not-available-listen-tcp-0-0-0-0-50070-bind-an-attempt-was-made-to
eventstore:
container_name: eventstore
image: eventstore/eventstore:latest
restart: unless-stopped
environment:
- EVENTSTORE_CLUSTER_SIZE=1
- EVENTSTORE_RUN_PROJECTIONS=All
- EVENTSTORE_START_STANDARD_PROJECTIONS=True
- EVENTSTORE_HTTP_PORT=2113
- EVENTSTORE_INSECURE=True
- EVENTSTORE_ENABLE_ATOM_PUB_OVER_HTTP=True
ports:
- "2113:2113"
networks:
- booking
#######################################################
# Mongo
#######################################################
mongo:
image: mongo:latest
container_name: mongo
restart: unless-stopped
# environment:
# - MONGO_INITDB_ROOT_USERNAME=root
# - MONGO_INITDB_ROOT_PASSWORD=secret
networks:
- booking
ports:
- 27017:27017
#######################################################
# Elastic Search
#######################################################
elasticsearch:
container_name: elasticsearch
image: elasticsearch:7.17.9
restart: unless-stopped
ports:
- 9200:9200
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
environment:
- xpack.monitoring.enabled=true
- xpack.watcher.enabled=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- discovery.type=single-node
networks:
- booking
#######################################################
# Kibana
#######################################################
kibana:
container_name: kibana
image: kibana:7.17.9
restart: unless-stopped
ports:
- 5601:5601
depends_on:
- elasticsearch
environment:
- ELASTICSEARCH_URL=http://localhost:9200
networks:
- booking
#######################################################
# Redis
#######################################################
redis:
image: redis
container_name: redis
restart: unless-stopped
networks:
- booking
ports:
- 6379:6379
#######################################################
# prometheus
#######################################################
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
ports:
- "9090:9090"
environment:
- TZ=UTC
volumes:
- ./monitoring/prom/prometheus.yml:/etc/prometheus/prometheus.yml
networks:
- booking
#######################################################
# grafana
#######################################################
grafana:
image: grafana/grafana
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- ./monitoring/grafana-data/data:/var/lib/grafana
networks:
- booking
#######################################################
# node_exporter
#######################################################
node_exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
restart: unless-stopped
command:
- '--path.rootfs=/host'
pid: host
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
networks:
- booking
#######################################################
# cadvisor
#######################################################
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
restart: unless-stopped
ports:
- "8080:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
devices:
- /dev/kmsg
networks:
- booking
networks:
booking:
volumes:
elasticsearch-data:

View File

@ -1,69 +0,0 @@
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- scheme: http
timeout: 10s
api_version: v1
static_configs:
- targets: []
scrape_configs:
- job_name: prometheus
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- localhost:9090
- job_name: cadvisor
static_configs:
- targets:
- cadvisor:8080
- job_name: node_exporter
static_configs:
- targets:
- node_exporter:9100
- job_name: flight
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- host.docker.internal:5004
- job_name: identity
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- host.docker.internal:6005
- job_name: passenger
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- host.docker.internal:6012
- job_name: booking
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- host.docker.internal:6010
- job_name: gateway
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: https
static_configs:
- targets:
- host.docker.internal:5000

File diff suppressed because it is too large Load Diff

View File

@ -29,8 +29,6 @@
<PackageReference Include="Microsoft.Extensions.DependencyModel" Version="9.0.0" />
<PackageReference Include="Npgsql" Version="9.0.1" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="OpenTelemetry.Exporter.Prometheus.AspNetCore" Version="1.10.0-beta.1" />
<PackageReference Include="OpenTelemetry.Instrumentation.GrpcNetClient" Version="1.9.0-beta.1" />
<PackageReference Include="Polly" Version="8.5.0" />
<PackageReference Include="Humanizer.Core" Version="2.14.1" />
<PackageReference Include="IdGen" Version="3.0.7" />
@ -50,10 +48,8 @@
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0" />
<PackageReference Include="MongoDB.Driver" Version="3.1.0" />
<PackageReference Include="Newtonsoft.Json" Version="13.0.3" />
<PackageReference Include="OpenTelemetry.Contrib.Instrumentation.MassTransit" Version="1.0.0-beta2" />
<PackageReference Include="Scalar.AspNetCore" Version="1.2.64" />
<PackageReference Include="Scrutor" Version="5.0.2" />
<PackageReference Include="Sentry.Serilog" Version="4.13.0" />
<PackageReference Include="Serilog" Version="4.2.0" />
<PackageReference Include="Serilog.AspNetCore" Version="8.0.3" />
<PackageReference Include="Serilog.Enrichers.Span" Version="3.1.0" />
@ -68,8 +64,8 @@
<PackageReference Include="Swashbuckle.AspNetCore" Version="7.1.0" />
<PackageReference Include="Swashbuckle.AspNetCore.SwaggerGen" Version="7.1.0" />
<PackageReference Include="Swashbuckle.AspNetCore.SwaggerUI" Version="7.1.0" />
<PackageReference Include="MassTransit" Version="8.3.3" />
<PackageReference Include="MassTransit.RabbitMQ" Version="8.3.3" />
<PackageReference Include="MassTransit" Version="8.3.6" />
<PackageReference Include="MassTransit.RabbitMQ" Version="8.3.6" />
<PackageReference Include="Duende.IdentityServer" Version="7.0.8" />
<PackageReference Include="Duende.IdentityServer.AspNetIdentity" Version="7.0.8" />
<PackageReference Include="Duende.IdentityServer.EntityFramework" Version="7.0.8" />
@ -85,14 +81,18 @@
<PackageReference Include="Unchase.Swashbuckle.AspNetCore.Extensions" Version="2.7.1" />
<PackageReference Include="Yarp.ReverseProxy" Version="2.2.0" />
<PackageReference Include="prometheus-net" Version="8.2.1" />
<PackageReference Include="prometheus-net.AspNetCore" Version="8.2.1" />
<PackageReference Include="OpenTelemetry" Version="1.10.0" />
<PackageReference Include="OpenTelemetry.Exporter.Jaeger" Version="1.6.0-rc.1" />
<PackageReference Include="OpenTelemetry.Extensions.Hosting" Version="1.10.0" />
<PackageReference Include="OpenTelemetry.Instrumentation.AspNetCore" Version="1.9.0" />
<PackageReference Include="OpenTelemetry.Instrumentation.Http" Version="1.10.0" />
<PackageReference Include="Npgsql.OpenTelemetry" Version="9.0.1" />
<PackageReference Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" Version="1.11.1" />
<PackageReference Include="OpenTelemetry.Extensions.Hosting" Version="1.11.1"/>
<PackageReference Include="OpenTelemetry.Instrumentation.AspNetCore" Version="1.11.0"/>
<PackageReference Include="OpenTelemetry.Instrumentation.GrpcNetClient" Version="1.11.0-beta.1"/>
<PackageReference Include="OpenTelemetry.Instrumentation.Http" Version="1.11.0"/>
<PackageReference Include="OpenTelemetry.Instrumentation.Process" Version="1.11.0-beta.1"/>
<PackageReference Include="OpenTelemetry.Instrumentation.Runtime" Version="1.11.0"/>
<PackageReference Include="OpenTelemetry.Exporter.Prometheus.AspNetCore" Version="1.11.0-beta.1"/>
<PackageReference Include="Grafana.OpenTelemetry" Version="1.2.0"/>
<PackageReference Include="OpenTelemetry.Exporter.Console" Version="1.11.1"/>
<PackageReference Include="OpenTelemetry.Exporter.Zipkin" Version="1.11.1"/>
<PackageReference Include="EventStore.Client.Grpc.Streams" Version="23.3.7" />
<PackageReference Include="Npgsql.EntityFrameworkCore.PostgreSQL" Version="9.0.1" />

View File

@ -2,6 +2,7 @@ using BuildingBlocks.EFCore;
using BuildingBlocks.Logging;
using BuildingBlocks.MassTransit;
using BuildingBlocks.Mongo;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.Web;
using HealthChecks.UI.Client;
using Microsoft.AspNetCore.Builder;
@ -25,13 +26,11 @@ public static class Extensions
var postgresOptions = services.GetOptions<PostgresOptions>(nameof(PostgresOptions));
var rabbitMqOptions = services.GetOptions<RabbitMqOptions>(nameof(RabbitMqOptions));
var mongoOptions = services.GetOptions<MongoOptions>(nameof(MongoOptions));
var logOptions = services.GetOptions<LogOptions>(nameof(LogOptions));
var healthChecksBuilder = services.AddHealthChecks()
.AddRabbitMQ(
rabbitConnectionString:
$"amqp://{rabbitMqOptions.UserName}:{rabbitMqOptions.Password}@{rabbitMqOptions.HostName}")
.AddElasticsearch(logOptions.Elastic.ElasticServiceUrl);
$"amqp://{rabbitMqOptions.UserName}:{rabbitMqOptions.Password}@{rabbitMqOptions.HostName}");
if (mongoOptions.ConnectionString is not null)
healthChecksBuilder.AddMongoDb(mongoOptions.ConnectionString);

View File

@ -1,8 +0,0 @@
namespace BuildingBlocks.Logging;
public class ElasticOptions
{
public bool Enabled { get; set; }
public string ElasticServiceUrl { get; set; }
public string ElasticSearchIndex { get; set; }
}

View File

@ -7,7 +7,6 @@ using Microsoft.Extensions.Configuration;
using Serilog;
using Serilog.Events;
using Serilog.Exceptions;
using Serilog.Sinks.Elasticsearch;
using Serilog.Sinks.SpectreConsole;
namespace BuildingBlocks.Logging
@ -18,10 +17,7 @@ namespace BuildingBlocks.Logging
{
builder.Host.UseSerilog((context, services, loggerConfiguration) =>
{
var environment = Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT");
var logOptions = context.Configuration.GetSection(nameof(LogOptions)).Get<LogOptions>();
var appOptions = context.Configuration.GetSection(nameof(AppOptions)).Get<AppOptions>();
var logLevel = Enum.TryParse<LogEventLevel>(logOptions.Level, true, out var level)
? level
@ -39,35 +35,6 @@ namespace BuildingBlocks.Logging
.Enrich.FromLogContext()
.ReadFrom.Configuration(context.Configuration);
if (logOptions.Elastic is { Enabled: true })
{
loggerConfiguration.WriteTo.Elasticsearch(
new ElasticsearchSinkOptions(new Uri(logOptions.Elastic.ElasticServiceUrl))
{
AutoRegisterTemplate = true,
IndexFormat = $"{appOptions.Name}-{environment?.ToLower(CultureInfo.CurrentCulture)}"
});
}
if (logOptions?.Sentry is { Enabled: true })
{
var minimumBreadcrumbLevel = Enum.TryParse<LogEventLevel>(logOptions.Level, true, out var minBreadcrumbLevel)
? minBreadcrumbLevel
: LogEventLevel.Information;
var minimumEventLevel = Enum.TryParse<LogEventLevel>(logOptions.Sentry.MinimumEventLevel, true, out var minEventLevel)
? minEventLevel
: LogEventLevel.Error;
loggerConfiguration.WriteTo.Sentry(o =>
{
o.Dsn = logOptions.Sentry.Dsn;
o.MinimumBreadcrumbLevel = minimumBreadcrumbLevel;
o.MinimumEventLevel = minimumEventLevel;
});
}
if (logOptions.File is { Enabled: true })
{
var root = env.ContentRootPath;

View File

@ -3,9 +3,6 @@ namespace BuildingBlocks.Logging
public class LogOptions
{
public string Level { get; set; }
public ElasticOptions Elastic { get; set; }
public SentryOptions Sentry { get; set; }
public FileOptions File { get; set; }
public string LogTemplate { get; set; }
}

View File

@ -1,9 +0,0 @@
namespace BuildingBlocks.Logging;
public class SentryOptions
{
public bool Enabled { get; set; }
public string Dsn { get; set; }
public string MinimumBreadcrumbLevel { get; set; }
public string MinimumEventLevel { get; set; }
}

View File

@ -1,44 +0,0 @@
using BuildingBlocks.Utils;
using BuildingBlocks.Web;
using Microsoft.Extensions.DependencyInjection;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
namespace BuildingBlocks.OpenTelemetry;
using global::OpenTelemetry.Metrics;
public static class Extensions
{
public static IServiceCollection AddCustomOpenTelemetry(this IServiceCollection services)
{
services.AddOpenTelemetry()
.WithTracing(builder => builder
.AddGrpcClientInstrumentation()
.AddMassTransitInstrumentation()
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.SetResourceBuilder(ResourceBuilder.CreateDefault()
.AddService(services.GetOptions<AppOptions>("AppOptions").Name))
.AddJaegerExporter())
.WithMetrics(builder =>
{
builder.AddPrometheusExporter();
builder.AddMeter(
"Microsoft.AspNetCore.Hosting",
"Microsoft.AspNetCore.Server.Kestrel"
);
builder.AddView("request-duration",
new ExplicitBucketHistogramConfiguration
{
Boundaries = new[]
{
0, 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10
}
});
}
);
return services;
}
}

View File

@ -0,0 +1,175 @@
using System.Diagnostics;
using System.Globalization;
namespace BuildingBlocks.OpenTelemetryCollector;
internal static class ActivityExtensions
{
/// <summary>
/// Retrieves the tags from the parent of the current Activity, if available.
/// </summary>
/// <param name="activity">The current Activity.</param>
/// <returns>A dictionary containing the parent tags, or an empty dictionary if no parent tags are available.</returns>
public static Dictionary<string, object?> GetParentTags(this Activity activity)
{
ArgumentNullException.ThrowIfNull(activity);
var parentTags = new Dictionary<string, object?>();
// Check if the current activity has a parent
var parentActivity = activity.Parent;
if (parentActivity != null)
{
foreach (var tag in parentActivity.Tags)
{
parentTags[tag.Key] = tag.Value;
}
}
else
{
// If no parent Activity is available, check for links
foreach (var link in activity.Links)
{
// Extract tags from the first link's context (assuming it's the parent-like context)
if (link.Tags != null)
{
foreach (var tag in link.Tags)
{
parentTags[tag.Key] = tag.Value;
}
}
// Break after processing the first link, as there should only be one parent context.
break;
}
}
return parentTags;
}
/// <summary>
/// Extracts important information from an Activity into an ActivityInfo object.
/// </summary>
/// <param name="activity">The Activity from which to extract information.</param>
/// <returns>An ActivityInfo object containing the extracted information.</returns>
public static ActivityInfo ExtractImportantInformation(this Activity activity)
{
ArgumentNullException.ThrowIfNull(activity);
var activityInfo = new ActivityInfo
{
Name = activity.DisplayName,
StartTime = activity.StartTimeUtc,
Duration = activity.Duration,
Status =
activity.Tags.FirstOrDefault(tag => tag.Key == TelemetryTags.Tracing.Otel.StatusCode).Value
?? "Unknown",
StatusDescription = activity
.Tags.FirstOrDefault(tag => tag.Key == TelemetryTags.Tracing.Otel.StatusDescription)
.Value,
Tags = activity.Tags.ToDictionary(tag => tag.Key, tag => tag.Value),
Events = activity
.Events.Select(e => new ActivityEventInfo
{
Name = e.Name,
Timestamp = e.Timestamp,
Attributes = e.Tags.ToDictionary(tag => tag.Key, tag => tag.Value),
})
.ToList(),
TraceId = activity.TraceId.ToString(),
SpanId = activity.SpanId.ToString(),
};
return activityInfo;
}
/// <summary>
/// Sets an "OK" status on the provided Activity, indicating a successful operation.
/// </summary>
/// <param name="activity">The Activity to update.</param>
/// <param name="description">An optional description of the successful operation.</param>
/// <returns>The updated Activity with the status and tags set.</returns>
public static Activity SetOkStatus(this Activity activity, string? description = null)
{
ArgumentNullException.ThrowIfNull(activity);
// Set the status of the activity to "OK"
activity.SetStatus(ActivityStatusCode.Ok, description);
// Add telemetry tags for status
activity.SetTag(
TelemetryTags.Tracing.Otel.StatusCode,
nameof(ActivityStatusCode.Ok).ToUpper(CultureInfo.InvariantCulture)
);
if (!string.IsNullOrEmpty(description))
activity.SetTag(TelemetryTags.Tracing.Otel.StatusDescription, description);
return activity;
}
/// <summary>
/// Sets an "Unset" status on the provided Activity, indicating no explicit status was applied.
/// </summary>
/// <param name="activity">The Activity to update.</param>
/// <param name="description">An optional description of the unset status.</param>
/// <returns>The updated Activity with the status and tags set.</returns>
public static Activity SetUnsetStatus(this Activity activity, string? description = null)
{
ArgumentNullException.ThrowIfNull(activity);
// Set the status of the activity to "Unset"
activity.SetStatus(ActivityStatusCode.Unset, description);
// Add telemetry tags for status
activity.SetTag(
TelemetryTags.Tracing.Otel.StatusCode,
nameof(ActivityStatusCode.Unset).ToUpper(CultureInfo.InvariantCulture)
);
if (!string.IsNullOrEmpty(description))
activity.SetTag(TelemetryTags.Tracing.Otel.StatusDescription, description);
return activity;
}
/// <summary>
/// Sets an "Error" status on the provided Activity, indicating a failed operation.
/// </summary>
/// <param name="activity">The Activity to update.</param>
/// <param name="exception">The exception associated with the error, if available.</param>
/// <param name="description">An optional description of the error.</param>
/// <returns>The updated Activity with the status, error details, and tags set.</returns>
public static Activity SetErrorStatus(this Activity activity, System.Exception? exception, string? description = null)
{
ArgumentNullException.ThrowIfNull(activity);
// Add telemetry tags for status
activity.SetTag(
TelemetryTags.Tracing.Otel.StatusCode,
nameof(ActivityStatusCode.Error).ToUpper(CultureInfo.InvariantCulture)
);
if (!string.IsNullOrEmpty(description))
activity.SetTag(TelemetryTags.Tracing.Otel.StatusDescription, description);
// Add detailed exception tags, if an exception is provided
return activity.SetExceptionTags(exception);
}
// See https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/exceptions/
public static Activity SetExceptionTags(this Activity activity, System.Exception? ex)
{
if (ex is null)
{
return activity;
}
activity.SetStatus(ActivityStatusCode.Error);
activity.AddException(ex);
activity.AddTag(TelemetryTags.Tracing.Exception.Message, ex.Message);
activity.AddTag(TelemetryTags.Tracing.Exception.Stacktrace, ex.ToString());
activity.AddTag(TelemetryTags.Tracing.Exception.Type, ex.GetType().FullName);
return activity;
}
}

View File

@ -0,0 +1,29 @@
using System.Diagnostics;
namespace BuildingBlocks.OpenTelemetryCollector;
public class ActivityInfo
{
public string Name { get; set; } = default!;
public DateTime StartTime { get; set; }
public TimeSpan Duration { get; set; }
public string Status { get; set; } = default!;
public string? StatusDescription { get; set; }
public IDictionary<string, string?> Tags { get; set; } = new Dictionary<string, string?>();
public IList<ActivityEventInfo> Events { get; set; } = new List<ActivityEventInfo>();
public string TraceId { get; set; } = default!;
public string SpanId { get; set; } = default!;
public string? ParentId { get; set; }
public ActivityContext? Parent { get; set; }
public ActivityKind Kind { get; set; }
}
public class ActivityEventInfo
{
public string Name { get; set; } = default!;
public DateTimeOffset Timestamp { get; set; }
public IDictionary<string, object?> Attributes { get; set; } = new Dictionary<string, object?>();
}

View File

@ -0,0 +1,85 @@
using BuildingBlocks.Core.CQRS;
using BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Commands;
using BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Query;
using MediatR;
namespace BuildingBlocks.OpenTelemetryCollector.Behaviors;
public class ObservabilityPipelineBehavior<TRequest, TResponse>(
CommandHandlerActivity commandActivity,
CommandHandlerMetrics commandMetrics,
QueryHandlerActivity queryActivity,
QueryHandlerMetrics queryMetrics
) : IPipelineBehavior<TRequest, TResponse>
where TRequest : IRequest<TResponse>
where TResponse : notnull
{
public async Task<TResponse> Handle(TRequest message, RequestHandlerDelegate<TResponse> next, CancellationToken cancellationToken)
{
var isCommand = message is IQuery<TResponse>;
var isQuery = message is ICommand<TResponse>;
if (isCommand)
{
commandMetrics.StartExecuting<TRequest>();
}
if (isQuery)
{
queryMetrics.StartExecuting<TRequest>();
}
try
{
if (isCommand)
{
var commandResult = await commandActivity.Execute<TRequest, TResponse>(
async (activity, ct) =>
{
var response = await next();
return response;
},
cancellationToken
);
commandMetrics.FinishExecuting<TRequest>();
return commandResult;
}
if (isQuery)
{
var queryResult = await queryActivity.Execute<TRequest, TResponse>(
async (activity, ct) =>
{
var response = await next();
return response;
},
cancellationToken
);
queryMetrics.FinishExecuting<TRequest>();
return queryResult;
}
}
catch (System.Exception)
{
if (isQuery)
{
queryMetrics.FailedCommand<TRequest>();
}
if (isCommand)
{
commandMetrics.FailedCommand<TRequest>();
}
throw;
}
return await next();
}
}

View File

@ -0,0 +1,86 @@
using System.Diagnostics;
using BuildingBlocks.Core.CQRS;
using BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
namespace BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Commands;
public class CommandHandlerActivity(IDiagnosticsProvider diagnosticsProvider)
{
public async Task Execute<TCommand>(
Func<Activity?, CancellationToken, Task> action,
CancellationToken cancellationToken
)
{
var commandName = typeof(TCommand).Name;
var handlerType = typeof(TCommand)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(ICommandHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TCommand)
)
);
var commandHandlerName = handlerType?.Name;
// usually we use class/methodName
var activityName = $"{ObservabilityConstant.Components.CommandHandler}.{commandHandlerName}/{commandName}";
await diagnosticsProvider.ExecuteActivityAsync(
new CreateActivityInfo
{
Name = activityName,
ActivityKind = ActivityKind.Consumer,
Tags = new Dictionary<string, object?>
{
{ TelemetryTags.Tracing.Application.Commands.Command, commandName },
{ TelemetryTags.Tracing.Application.Commands.CommandType, typeof(TCommand).FullName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandler, commandHandlerName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandlerType, handlerType?.FullName },
},
},
action,
cancellationToken
);
}
public async Task<TResult> Execute<TCommand, TResult>(
Func<Activity?, CancellationToken, Task<TResult>> action,
CancellationToken cancellationToken
)
{
var commandName = typeof(TCommand).Name;
var handlerType = typeof(TCommand)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(ICommandHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TCommand)
)
);
var commandHandlerName = handlerType?.Name;
// usually we use class/methodName
var activityName = $"{ObservabilityConstant.Components.CommandHandler}.{commandHandlerName}/{commandName}";
return await diagnosticsProvider.ExecuteActivityAsync(
new CreateActivityInfo
{
Name = activityName,
ActivityKind = ActivityKind.Consumer,
Tags = new Dictionary<string, object?>
{
{ TelemetryTags.Tracing.Application.Commands.Command, commandName },
{ TelemetryTags.Tracing.Application.Commands.CommandType, typeof(TCommand).FullName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandler, commandHandlerName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandlerType, handlerType?.FullName },
},
},
action,
cancellationToken
);
}
}

View File

@ -0,0 +1,157 @@
using System.Diagnostics;
using System.Diagnostics.Metrics;
using BuildingBlocks.Core.CQRS;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
namespace BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Commands;
public class CommandHandlerMetrics
{
private readonly UpDownCounter<long> _activeCommandsCounter;
private readonly Counter<long> _totalCommandsNumber;
private readonly Counter<long> _successCommandsNumber;
private readonly Counter<long> _failedCommandsNumber;
private readonly Histogram<double> _handlerDuration;
private Stopwatch _timer;
public CommandHandlerMetrics(IDiagnosticsProvider diagnosticsProvider)
{
_activeCommandsCounter = diagnosticsProvider.Meter.CreateUpDownCounter<long>(
TelemetryTags.Metrics.Application.Commands.ActiveCount,
unit: "{active_commands}",
description: "Number of commands currently being handled"
);
_totalCommandsNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.TotalExecutedCount,
unit: "{total_commands}",
description: "Total number of executed command that sent to command handlers"
);
_successCommandsNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.SuccessCount,
unit: "{success_commands}",
description: "Number commands that handled successfully"
);
_failedCommandsNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.FaildCount,
unit: "{failed_commands}",
description: "Number commands that handled with errors"
);
_handlerDuration = diagnosticsProvider.Meter.CreateHistogram<double>(
TelemetryTags.Metrics.Application.Commands.HandlerDuration,
unit: "s",
description: "Measures the duration of command handler"
);
}
public void StartExecuting<TCommand>()
{
var commandName = typeof(TCommand).Name;
var handlerType = typeof(TCommand)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(ICommandHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TCommand)
)
);
var commandHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Commands.Command, commandName },
{ TelemetryTags.Tracing.Application.Commands.CommandType, typeof(TCommand).FullName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandler, commandHandlerName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandlerType, handlerType?.FullName },
};
if (_activeCommandsCounter.Enabled)
{
_activeCommandsCounter.Add(1, tags);
}
if (_totalCommandsNumber.Enabled)
{
_totalCommandsNumber.Add(1, tags);
}
_timer = Stopwatch.StartNew();
}
public void FinishExecuting<TCommand>()
{
var commandName = typeof(TCommand).Name;
var handlerType = typeof(TCommand)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(ICommandHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TCommand)
)
);
var commandHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Commands.Command, commandName },
{ TelemetryTags.Tracing.Application.Commands.CommandType, typeof(TCommand).FullName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandler, commandHandlerName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandlerType, handlerType?.FullName },
};
if (_activeCommandsCounter.Enabled)
{
_activeCommandsCounter.Add(-1, tags);
}
if (!_handlerDuration.Enabled)
return;
var elapsedTimeSeconds = _timer.Elapsed.Seconds;
_handlerDuration.Record(elapsedTimeSeconds, tags);
if (_successCommandsNumber.Enabled)
{
_successCommandsNumber.Add(1, tags);
}
}
public void FailedCommand<TCommand>()
{
var commandName = typeof(TCommand).Name;
var handlerType = typeof(TCommand)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(ICommandHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TCommand)
)
);
var commandHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Commands.Command, commandName },
{ TelemetryTags.Tracing.Application.Commands.CommandType, typeof(TCommand).FullName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandler, commandHandlerName },
{ TelemetryTags.Tracing.Application.Commands.CommandHandlerType, handlerType?.FullName },
};
if (_failedCommandsNumber.Enabled)
{
_failedCommandsNumber.Add(1, tags);
}
}
}

View File

@ -0,0 +1,47 @@
using System.Diagnostics;
using BuildingBlocks.Core.CQRS;
using BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
namespace BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Query;
public class QueryHandlerActivity(IDiagnosticsProvider diagnosticsProvider)
{
public async Task<TResult> Execute<TQuery, TResult>(
Func<Activity?, CancellationToken, Task<TResult>> action,
CancellationToken cancellationToken
)
{
var queryName = typeof(TQuery).Name;
var handlerType = typeof(TQuery)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(IQueryHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TQuery)
)
);
var queryHandlerName = handlerType?.Name;
// usually we use class/methodName
var activityName = $"{ObservabilityConstant.Components.QueryHandler}.{queryHandlerName}/{queryName}";
return await diagnosticsProvider.ExecuteActivityAsync(
new CreateActivityInfo
{
Name = activityName,
ActivityKind = ActivityKind.Consumer,
Tags = new Dictionary<string, object?>
{
{ TelemetryTags.Tracing.Application.Queries.Query, queryName },
{ TelemetryTags.Tracing.Application.Queries.QueryType, typeof(TQuery).FullName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandler, queryHandlerName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandlerType, handlerType?.FullName },
},
},
action,
cancellationToken
);
}
}

View File

@ -0,0 +1,156 @@
using System.Diagnostics;
using System.Diagnostics.Metrics;
using BuildingBlocks.Core.CQRS;
using BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
namespace BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Query;
public class QueryHandlerMetrics
{
private readonly UpDownCounter<long> _activeQueriesCounter;
private readonly Counter<long> _totalQueriesNumber;
private readonly Counter<long> _successQueriesNumber;
private readonly Counter<long> _failedQueriesNumber;
private readonly Histogram<double> _handlerDuration;
private Stopwatch _timer;
public QueryHandlerMetrics(IDiagnosticsProvider diagnosticsProvider)
{
_activeQueriesCounter = diagnosticsProvider.Meter.CreateUpDownCounter<long>(
TelemetryTags.Metrics.Application.Commands.ActiveCount,
unit: "{active_queries}",
description: "Number of queries currently being handled"
);
_totalQueriesNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.TotalExecutedCount,
unit: "{total_queries}",
description: "Total number of executed query that sent to query handlers"
);
_successQueriesNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.SuccessCount,
unit: "{success_queries}",
description: "Number queries that handled successfully"
);
_failedQueriesNumber = diagnosticsProvider.Meter.CreateCounter<long>(
TelemetryTags.Metrics.Application.Commands.FaildCount,
unit: "{failed_queries}",
description: "Number queries that handled with errors"
);
_handlerDuration = diagnosticsProvider.Meter.CreateHistogram<double>(
TelemetryTags.Metrics.Application.Commands.HandlerDuration,
unit: "s",
description: "Measures the duration of query handler"
);
}
public void StartExecuting<TQuery>()
{
var queryName = typeof(TQuery).Name;
var handlerType = typeof(TQuery)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(IQueryHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TQuery)
)
);
var queryHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Queries.Query, queryName },
{ TelemetryTags.Tracing.Application.Queries.QueryType, typeof(TQuery).FullName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandler, queryHandlerName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandlerType, handlerType?.FullName },
};
if (_activeQueriesCounter.Enabled)
{
_activeQueriesCounter.Add(1, tags);
}
if (_totalQueriesNumber.Enabled)
{
_totalQueriesNumber.Add(1, tags);
}
_timer = Stopwatch.StartNew();
}
public void FinishExecuting<TQuery>()
{
var queryName = typeof(TQuery).Name;
var handlerType = typeof(TQuery)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(IQueryHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TQuery)
)
);
var queryHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Queries.Query, queryName },
{ TelemetryTags.Tracing.Application.Queries.QueryType, typeof(TQuery).FullName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandler, queryHandlerName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandlerType, handlerType?.FullName },
};
if (_activeQueriesCounter.Enabled)
{
_activeQueriesCounter.Add(-1, tags);
}
if (!_handlerDuration.Enabled)
return;
var elapsedTimeSeconds = _timer.Elapsed.Seconds;
_handlerDuration.Record(elapsedTimeSeconds, tags);
if (_successQueriesNumber.Enabled)
{
_successQueriesNumber.Add(1, tags);
}
}
public void FailedCommand<TQuery>()
{
var queryName = typeof(TQuery).Name;
var handlerType = typeof(TQuery)
.Assembly.GetTypes()
.FirstOrDefault(t =>
t.GetInterfaces()
.Any(i =>
i.IsGenericType
&& i.GetGenericTypeDefinition() == typeof(IQueryHandler<,>)
&& i.GetGenericArguments()[0] == typeof(TQuery)
)
);
var queryHandlerName = handlerType?.Name;
var tags = new TagList
{
{ TelemetryTags.Tracing.Application.Queries.Query, queryName },
{ TelemetryTags.Tracing.Application.Queries.QueryType, typeof(TQuery).FullName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandler, queryHandlerName },
{ TelemetryTags.Tracing.Application.Queries.QueryHandlerType, handlerType?.FullName },
};
if (_failedQueriesNumber.Enabled)
{
_failedQueriesNumber.Add(1, tags);
}
}
}

View File

@ -0,0 +1,12 @@
using System.Diagnostics;
namespace BuildingBlocks.OpenTelemetryCollector;
public class CreateActivityInfo
{
public required string Name { get; set; }
public IDictionary<string, object?> Tags { get; set; } = new Dictionary<string, object?>();
public string? ParentId { get; set; }
public ActivityContext? Parent { get; set; }
public required ActivityKind ActivityKind = ActivityKind.Internal;
}

View File

@ -0,0 +1,132 @@
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Reflection;
using Microsoft.Extensions.Options;
namespace BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
public class CustomeDiagnosticsProvider(IMeterFactory meterFactory, IOptions<ObservabilityOptions> options)
: IDiagnosticsProvider
{
private readonly Version? _version = Assembly.GetCallingAssembly().GetName().Version;
private ActivitySource? _activitySource;
private ActivityListener? _listener;
private Meter? _meter;
public string InstrumentationName { get; } = options.Value.InstrumentationName ?? throw new ArgumentException("InstrumentationName cannot be null or empty.");
// https://learn.microsoft.com/en-us/dotnet/core/diagnostics/distributed-tracing-instrumentation-walkthroughs
public ActivitySource ActivitySource
{
get
{
if (_activitySource != null)
return _activitySource;
_activitySource = new(InstrumentationName, _version?.ToString());
_listener = new ActivityListener
{
ShouldListenTo = x => true,
Sample = (ref ActivityCreationOptions<ActivityContext> _) => ActivitySamplingResult.AllDataAndRecorded,
};
ActivitySource.AddActivityListener(_listener);
return _activitySource;
}
}
// https://learn.microsoft.com/en-us/dotnet/core/diagnostics/metrics-instrumentation
public Meter Meter
{
get
{
if (_meter != null)
return _meter;
_meter = meterFactory.Create(InstrumentationName, _version?.ToString());
return _meter;
}
}
public async Task ExecuteActivityAsync(
CreateActivityInfo createActivityInfo,
Func<Activity?, CancellationToken, Task> action,
CancellationToken cancellationToken = default
)
{
if (!options.Value.TracingEnabled)
{
await action(null, cancellationToken);
return;
}
using var activity =
ActivitySource
.CreateActivity(
name: $"{InstrumentationName}.{createActivityInfo.Name}",
kind: createActivityInfo.ActivityKind,
parentContext: createActivityInfo.Parent ?? default,
idFormat: ActivityIdFormat.W3C,
tags: createActivityInfo.Tags
)
?.Start() ?? Activity.Current;
try
{
await action(activity!, cancellationToken);
activity?.SetOkStatus();
}
catch (System.Exception ex)
{
activity?.SetErrorStatus(ex);
throw;
}
}
public async Task<TResult?> ExecuteActivityAsync<TResult>(
CreateActivityInfo createActivityInfo,
Func<Activity?, CancellationToken, Task<TResult>> action,
CancellationToken cancellationToken = default
)
{
if (!options.Value.TracingEnabled)
{
return await action(null, cancellationToken);
}
using var activity =
ActivitySource
.CreateActivity(
name: $"{InstrumentationName}.{createActivityInfo.Name}",
kind: createActivityInfo.ActivityKind,
parentContext: createActivityInfo.Parent ?? default,
idFormat: ActivityIdFormat.W3C,
tags: createActivityInfo.Tags
)
?.Start() ?? Activity.Current;
try
{
var result = await action(activity!, cancellationToken);
activity?.SetOkStatus();
return result;
}
catch (System.Exception ex)
{
activity?.SetErrorStatus(ex);
throw;
}
}
public void Dispose()
{
_listener?.Dispose();
_meter?.Dispose();
_activitySource?.Dispose();
}
}

View File

@ -0,0 +1,23 @@
using System.Diagnostics;
using System.Diagnostics.Metrics;
namespace BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
public interface IDiagnosticsProvider : IDisposable
{
string InstrumentationName { get; }
ActivitySource ActivitySource { get; }
Meter Meter { get; }
Task ExecuteActivityAsync(
CreateActivityInfo createActivityInfo,
Func<Activity?, CancellationToken, Task> action,
CancellationToken cancellationToken = default
);
Task<TResult> ExecuteActivityAsync<TResult>(
CreateActivityInfo createActivityInfo,
Func<Activity?, CancellationToken, Task<TResult>> action,
CancellationToken cancellationToken = default
);
}

View File

@ -0,0 +1,358 @@
using System.Diagnostics;
using System.Reflection;
using BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Commands;
using BuildingBlocks.OpenTelemetryCollector.CoreDiagnostics.Query;
using BuildingBlocks.OpenTelemetryCollector.DiagnosticsProvider;
using BuildingBlocks.Web;
using Grafana.OpenTelemetry;
using MassTransit.Logging;
using MassTransit.Monitoring;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Npgsql;
using OpenTelemetry;
using OpenTelemetry.Exporter;
using OpenTelemetry.Logs;
using OpenTelemetry.Metrics;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
namespace BuildingBlocks.OpenTelemetryCollector;
// https://learn.microsoft.com/en-us/dotnet/core/diagnostics/observability-otlp-example
// https://learn.microsoft.com/en-us/dotnet/core/diagnostics/observability-prgrja-example
// https://learn.microsoft.com/en-us/dotnet/core/diagnostics/observability-prgrja-example
// https://blog.codingmilitia.com/2023/09/05/observing-dotnet-microservices-with-opentelemetry-logs-traces-metrics/
public static class Extensions
{
public static WebApplicationBuilder AddCustomObservability(this WebApplicationBuilder builder)
{
Activity.DefaultIdFormat = ActivityIdFormat.W3C;
builder.Services.AddSingleton<IDiagnosticsProvider, CustomeDiagnosticsProvider>();
builder.AddCoreDiagnostics();
builder.Services.AddValidateOptions<ObservabilityOptions>();
var observabilityOptions = builder.Services.GetOptions<ObservabilityOptions>(nameof(ObservabilityOptions));
// InstrumentationName property option is mandatory and can't be empty
ArgumentException.ThrowIfNullOrEmpty(observabilityOptions.InstrumentationName);
ObservabilityConstant.InstrumentationName = observabilityOptions.InstrumentationName;
if (observabilityOptions is { MetricsEnabled: false, TracingEnabled: false, LoggingEnabled: false })
{
return builder;
}
void ConfigureResourceBuilder(ResourceBuilder resourceBuilder)
{
resourceBuilder.AddAttributes([new("service.environment", builder.Environment.EnvironmentName)]);
resourceBuilder.AddService(
serviceName: observabilityOptions.ServiceName ?? builder.Environment.ApplicationName,
serviceVersion: Assembly.GetCallingAssembly().GetName().Version?.ToString() ?? "unknown",
serviceInstanceId: Environment.MachineName
);
}
if (observabilityOptions.LoggingEnabled)
{
// logging
// opentelemtry logging works with .net default logging providers and doesn't work for `serilog`, in serilog we should enable `WriteToProviders=true`
builder.Logging.AddOpenTelemetry(options =>
{
var resourceBuilder = ResourceBuilder.CreateDefault();
ConfigureResourceBuilder(resourceBuilder);
options.SetResourceBuilder(resourceBuilder);
options.IncludeFormattedMessage = true;
options.IncludeScopes = true;
// this allows the state value passed to the logger.Log method to be parsed, in case it isn't a collection of KeyValuePair<string, object?>, which is the case when we use things like logger.LogInformation.
options.ParseStateValues = true;
// which means the message wouldn't have the placeholders replaced
options.IncludeFormattedMessage = true;
// add some metadata to exported logs
options.SetResourceBuilder(
ResourceBuilder
.CreateDefault()
.AddService(
observabilityOptions.ServiceName ?? builder.Environment.ApplicationName,
serviceVersion: Assembly.GetCallingAssembly().GetName().Version?.ToString() ?? "unknown",
serviceInstanceId: Environment.MachineName
)
);
options.AddLoggingExporters(observabilityOptions);
});
}
if (observabilityOptions is { MetricsEnabled: false, TracingEnabled: false })
{
return builder;
}
OpenTelemetryBuilder otel = null!;
if (observabilityOptions.MetricsEnabled || observabilityOptions.TracingEnabled)
{
// metrics and tracing
otel = builder.Services.AddOpenTelemetry();
otel.ConfigureResource(ConfigureResourceBuilder);
}
if (observabilityOptions.MetricsEnabled)
{
otel.WithMetrics(metrics =>
{
metrics
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.AddProcessInstrumentation()
.AddRuntimeInstrumentation()
.AddMeter(InstrumentationOptions.MeterName)
.AddMeter(observabilityOptions.InstrumentationName)
// metrics provides by ASP.NET Core in .NET 8
.AddView(
"http.server.request.duration",
new ExplicitBucketHistogramConfiguration
{
Boundaries = [0, 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10],
}
)
.AddMeter("System.Runtime")
.AddMeter("Microsoft.AspNetCore.Hosting")
.AddMeter("Microsoft.AspNetCore.Server.Kestrel");
AddMetricsExporter(observabilityOptions, metrics);
});
}
if (observabilityOptions.TracingEnabled)
{
otel.WithTracing(tracing =>
{
if (builder.Environment.IsDevelopment())
{
// We want to view all traces in development
tracing.SetSampler(new AlwaysOnSampler());
}
tracing
.SetErrorStatusOnException()
.AddAspNetCoreInstrumentation(options =>
{
options.RecordException = true;
})
.AddGrpcClientInstrumentation()
.AddHttpClientInstrumentation(instrumentationOptions =>
{
instrumentationOptions.RecordException = true;
})
.AddEntityFrameworkCoreInstrumentation(instrumentationOptions =>
{
instrumentationOptions.SetDbStatementForText = true;
})
.AddSource(DiagnosticHeaders.DefaultListenerName)
.AddNpgsql()
// `AddSource` for adding custom activity sources
.AddSource(observabilityOptions.InstrumentationName)
// metrics provides by ASP.NET Core in .NET 8
.AddSource("Microsoft.AspNetCore.Hosting")
.AddSource("Microsoft.AspNetCore.Server.Kestrel");
AddTracingExporter(observabilityOptions, tracing);
});
}
return builder;
}
public static WebApplication UseCustomObservability(this WebApplication app)
{
var options = app.Services.GetRequiredService<IOptions<ObservabilityOptions>>().Value;
app.Use(
async (context, next) =>
{
var metricsFeature = context.Features.Get<IHttpMetricsTagsFeature>();
if (metricsFeature != null && context.Request.Path is { Value: "/metrics" or "/health" })
{
metricsFeature.MetricsDisabled = true;
}
await next(context);
}
);
if (options.UsePrometheusExporter)
{
// export application metrics in `/metrics` endpoint and should scrape in the Prometheus config file and `scrape_configs`
// https://github.com/open-telemetry/opentelemetry-dotnet/tree/e330e57b04fa3e51fe5d63b52bfff891fb5b7961/src/OpenTelemetry.Exporter.Prometheus.AspNetCore
app.UseOpenTelemetryPrometheusScrapingEndpoint(); // http://localhost:4000/metrics
}
return app;
}
private static void AddTracingExporter(ObservabilityOptions observabilityOptions, TracerProviderBuilder tracing)
{
if (observabilityOptions.UseJaegerExporter)
{
ArgumentNullException.ThrowIfNull(observabilityOptions.JaegerOptions);
// https://github.com/open-telemetry/opentelemetry-dotnet/tree/e330e57b04fa3e51fe5d63b52bfff891fb5b7961/docs/trace/getting-started-jaeger
// `OpenTelemetry.Exporter.Jaeger` package and `AddJaegerExporter` to use Http endpoint (http://localhost:14268/api/traces) is deprecated, and we should use `OpenTelemetry.Exporter.OpenTelemetryProtocol` and `AddOtlpExporter` with OTLP port `4317` on Jaeger
// tracing.AddJaegerExporter(
// x => x.Endpoint = new Uri(observabilityOptions.JaegerOptions.HttpExporterEndpoint)); // http://localhost:14268/api/traces
tracing.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.JaegerOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseZipkinExporter)
{
ArgumentNullException.ThrowIfNull(observabilityOptions.ZipkinOptions);
// https://github.com/open-telemetry/opentelemetry-dotnet/tree/e330e57b04fa3e51fe5d63b52bfff891fb5b7961/src/OpenTelemetry.Exporter.Zipkin
tracing.AddZipkinExporter(x =>
x.Endpoint = new Uri(observabilityOptions.ZipkinOptions.HttpExporterEndpoint)
); // "http://localhost:9411/api/v2/spans"
}
if (observabilityOptions.UseConsoleExporter)
{
tracing.AddConsoleExporter();
}
if (observabilityOptions.UseOTLPExporter)
{
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
tracing.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.OTLPOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseAspireOTLPExporter)
{
// we can just one `AddOtlpExporter` and in development use `aspire-dashboard` OTLP endpoint address as `OTLPExporterEndpoint` and in production we can use `otel-collector` OTLP endpoint address
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
tracing.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.AspireDashboardOTLPOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseGrafanaExporter)
{
// https://github.com/grafana/grafana-opentelemetry-dotnet/blob/main/docs/configuration.md#aspnet-core
// https://github.com/grafana/grafana-opentelemetry-dotnet/
// https://github.com/grafana/grafana-opentelemetry-dotnet/blob/main/docs/configuration.md#sending-to-an-agent-or-collector-via-otlp
// https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/instrument/dotnet/
tracing.UseGrafana();
}
}
private static void AddMetricsExporter(ObservabilityOptions observabilityOptions, MeterProviderBuilder metrics)
{
if (observabilityOptions.UsePrometheusExporter)
{
// https://github.com/open-telemetry/opentelemetry-dotnet/tree/e330e57b04fa3e51fe5d63b52bfff891fb5b7961/src/OpenTelemetry.Exporter.Prometheus.AspNetCore
// for exporting app metrics to `/metrics` endpoint
metrics.AddPrometheusExporter(o => o.DisableTotalNameSuffixForCounters = true); // http://localhost:4000/metrics
}
if (observabilityOptions.UseConsoleExporter)
{
metrics.AddConsoleExporter();
}
if (observabilityOptions.UseOTLPExporter)
{
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
metrics.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.OTLPOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseAspireOTLPExporter)
{
// we can just one `AddOtlpExporter` and in development use `aspire-dashboard` OTLP endpoint address as `OTLPExporterEndpoint` and in production we can use `otel-collector` OTLP endpoint address
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
metrics.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.AspireDashboardOTLPOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseGrafanaExporter)
{
// https://github.com/grafana/grafana-opentelemetry-dotnet/blob/main/docs/configuration.md#aspnet-core
// https://github.com/grafana/grafana-opentelemetry-dotnet/
// https://github.com/grafana/grafana-opentelemetry-dotnet/blob/main/docs/configuration.md#sending-to-an-agent-or-collector-via-otlp
// https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/instrument/dotnet/
metrics.UseGrafana();
}
}
private static void AddLoggingExporters(
this OpenTelemetryLoggerOptions openTelemetryLoggerOptions,
ObservabilityOptions observabilityOptions
)
{
if (observabilityOptions.UseOTLPExporter)
{
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
openTelemetryLoggerOptions.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(observabilityOptions.OTLPOptions.OTLPGrpcExporterEndpoint);
options.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseAspireOTLPExporter)
{
// we can just one `AddOtlpExporter` and in development use `aspire-dashboard` OTLP endpoint address as `OTLPExporterEndpoint` and in production we can use `otel-collector` OTLP endpoint address
ArgumentNullException.ThrowIfNull(observabilityOptions.OTLPOptions);
openTelemetryLoggerOptions.AddOtlpExporter(x =>
{
x.Endpoint = new Uri(observabilityOptions.AspireDashboardOTLPOptions.OTLPGrpcExporterEndpoint);
x.Protocol = OtlpExportProtocol.Grpc;
});
}
if (observabilityOptions.UseGrafanaExporter)
{
// https://github.com/grafana/grafana-opentelemetry-dotnet/
// https://github.com/grafana/grafana-opentelemetry-dotnet/blob/main/docs/configuration.md#aspnet-core
// https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/instrument/dotnet/
openTelemetryLoggerOptions.UseGrafana();
}
if (observabilityOptions.UseConsoleExporter)
{
openTelemetryLoggerOptions.AddConsoleExporter();
}
}
private static WebApplicationBuilder AddCoreDiagnostics(this WebApplicationBuilder builder)
{
builder.Services.AddTransient<CommandHandlerActivity>();
builder.Services.AddTransient<CommandHandlerMetrics>();
builder.Services.AddTransient<QueryHandlerActivity>();
builder.Services.AddTransient<QueryHandlerMetrics>();
return builder;
}
}

View File

@ -0,0 +1,16 @@
namespace BuildingBlocks.OpenTelemetryCollector;
public static class ObservabilityConstant
{
public static string InstrumentationName = default!;
public static class Components
{
public const string CommandHandler = "CommandHandler";
public const string QueryHandler = "QueryHandler";
public const string EventStore = "EventStore";
public const string Producer = "Producer";
public const string Consumer = "Consumer";
public const string EventHandler = "EventHandler";
}
}

View File

@ -0,0 +1,47 @@
namespace BuildingBlocks.OpenTelemetryCollector;
public class ObservabilityOptions
{
public string InstrumentationName { get; set; } = default!;
public string? ServiceName { get; set; }
public bool MetricsEnabled { get; set; } = true;
public bool TracingEnabled { get; set; } = true;
public bool LoggingEnabled { get; set; } = true;
public bool UsePrometheusExporter { get; set; } = true;
public bool UseOTLPExporter { get; set; } = true;
public bool UseAspireOTLPExporter { get; set; } = true;
public bool UseGrafanaExporter { get; set; }
public bool UseConsoleExporter { get; set; }
public bool UseJaegerExporter { get; set; }
public bool UseZipkinExporter { get; set; }
public ZipkinOptions ZipkinOptions { get; set; } = default!;
public JaegerOptions JaegerOptions { get; set; } = default!;
public OTLPOptions OTLPOptions { get; set; } = default!;
public AspireDashboardOTLPOptions AspireDashboardOTLPOptions { get; set; } = default!;
}
// https://github.com/open-telemetry/opentelemetry-dotnet/blob/main/src/OpenTelemetry.Exporter.Zipkin/README.md
public class ZipkinOptions
{
/// <summary>
/// Gets or sets endpoint address to receive telemetry
/// </summary>
public string HttpExporterEndpoint { get; set; } = "http://localhost:9411/api/v2/spans";
}
public class JaegerOptions
{
public string OTLPGrpcExporterEndpoint { get; set; } = "http://localhost:14317";
public string HttpExporterEndpoint { get; set; } = "http://localhost:14268/api/traces";
}
public class OTLPOptions
{
public string OTLPGrpcExporterEndpoint { get; set; } = "http://localhost:4317";
public string OTLPHttpExporterEndpoint { get; set; } = "http://localhost:4318";
}
public class AspireDashboardOTLPOptions
{
public string OTLPGrpcExporterEndpoint { get; set; } = "http://localhost:4319";
}

View File

@ -0,0 +1,265 @@
namespace BuildingBlocks.OpenTelemetryCollector;
/// <summary>
/// Telemetry tags use for adding tags to activities as tag name
/// </summary>
public static class TelemetryTags
{
// https://opentelemetry.io/docs/specs/semconv/general/trace/
// https://opentelemetry.io/docs/specs/semconv/general/attribute-naming/
public static class Tracing
{
// https://opentelemetry.io/docs/specs/semconv/resource/#service
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/peer/#peer-attributes
public static class Service
{
public const string PeerService = "peer.service";
public const string Name = "service.name";
public const string InstanceId = "service.instance.id";
public const string Version = "service.version";
public const string NameSpace = "service.namespace";
}
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/messaging/#general-messaging-attributes
// https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/
public static class Messaging
{
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/messaging/#messaging-operation-type
public static class OperationType
{
public const string Key = "messaging.operation.type";
public const string Receive = "receive";
public const string Send = "send";
public const string Process = "process";
}
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/messaging/#messaging-system
public static class System
{
public const string Key = "messaging.system";
public const string ActiveMQ = "activemq";
public const string RabbitMQ = "rabbitmq";
public const string AwsSqs = "aws_sqs";
public const string EventGrid = "eventgrid";
public const string EventHubs = "eventhubs";
public const string GcpPubSub = "gcp_pubsub";
public const string Kafka = "kafka";
public const string Pulsar = "pulsar";
public const string ServiceBus = "servicebus";
}
public const string Destination = "messaging.destination";
public const string DestinationKind = "messaging.destination_kind";
public const string Url = "messaging.url";
public const string MessageId = "messaging.message_id";
public const string ConversationId = "messaging.conversation_id";
public const string CorrelationId = "messaging.correlation_id";
public const string CausationId = "messaging.causation_id";
public const string Operation = "messaging.operation";
public const string OperationName = "messaging.operation.name";
public const string DestinationName = "messaging.destination.name";
public const string ConsumerGroup = "messaging.consumer.group.name";
public const string DestinationPartition = "messaging.destination.partition.id";
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/messaging/#rabbitmq-attributes
// https://opentelemetry.io/docs/specs/semconv/messaging/rabbitmq/
public static class RabbitMQ
{
public const string RoutingKey = "messaging.rabbitmq.destination.routing_key";
public const string DeliveryTag = "messaging.rabbitmq.message.delivery_tag";
public static IDictionary<string, object?> ProducerTags(
string serviceName,
string topicName,
string routingKey,
string? deliveryTag = null
) =>
new Dictionary<string, object?>
{
{ System.Key, System.Kafka },
{ DeliveryTag, deliveryTag },
{ Destination, topicName },
{ OperationType.Key, OperationType.Send },
{ Service.Name, serviceName },
{ RoutingKey, routingKey },
};
public static IDictionary<string, object?> ConsumerTags(
string serviceName,
string topicName,
string routingKey,
string? consumerGroup = null
) =>
new Dictionary<string, object?>
{
{ System.Key, System.Kafka },
{ Destination, topicName },
{ OperationType.Key, OperationType.Receive },
{ Service.Name, serviceName },
{ ConsumerGroup, consumerGroup },
{ RoutingKey, routingKey },
};
}
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/messaging/#kafka-attributes
// https://opentelemetry.io/docs/specs/semconv/messaging/kafka/
public static class Kafka
{
public const string MessageKey = "messaging.kafka.message.key";
public const string Tombstone = "messaging.kafka.message.tombstone";
public const string Offset = "messaging.kafka.offset";
public static IDictionary<string, object?> ProducerTags(
string serviceName,
string topicName,
string messageKey
) =>
new Dictionary<string, object?>
{
{ System.Key, System.Kafka },
{ Destination, topicName },
{ OperationType.Key, OperationType.Send },
{ Service.Name, serviceName },
{ MessageKey, messageKey },
};
public static IDictionary<string, object?> ConsumerTags(
string serviceName,
string topicName,
string messageKey,
string partitionName,
string consumerGroup
) =>
new Dictionary<string, object?>
{
{ System.Key, System.Kafka },
{ Destination, topicName },
{ OperationType.Key, OperationType.Receive },
{ Service.Name, serviceName },
{ MessageKey, messageKey },
{ DestinationPartition, partitionName },
{ ConsumerGroup, consumerGroup },
};
}
}
// https://opentelemetry.io/docs/specs/semconv/database/database-spans/#common-attributes
// https://opentelemetry.io/docs/specs/semconv/database/postgresql/#attributes
public static class Db
{
public const string System = "db.system";
public const string ConnectionString = "db.connection_string";
public const string User = "db.user";
public const string MsSqlInstanceName = "db.mssql.instance_name";
public const string Name = "db.name";
public const string Statement = "db.statement";
public const string Operation = "db.operation";
public const string Instance = "db.instance";
public const string Url = "db.url";
public const string CassandraKeyspace = "db.cassandra.keyspace";
public const string RedisDatabaseIndex = "db.redis.database_index";
public const string MongoDbCollection = "db.mongodb.collection";
}
// https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#exception-event
public static class Exception
{
public const string EventName = "exception";
public const string Type = "exception.type";
public const string Message = "exception.message";
public const string Stacktrace = "exception.stacktrace";
}
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/otel/#otel-attributes
public static class Otel
{
public const string StatusCode = "otel.status_code";
public const string StatusDescription = "otel.status_description";
}
public static class Message
{
public const string Type = "message.type";
public const string Id = "message.id";
}
public static class Application
{
public static string AppService = $"{ObservabilityConstant.InstrumentationName}.appservice";
public static string Consumer = $"{ObservabilityConstant.InstrumentationName}.consumer";
public static string Producer = $"{ObservabilityConstant.InstrumentationName}.producer";
public static class Commands
{
public static string Command = $"{ObservabilityConstant.InstrumentationName}.command";
public static string CommandType = $"{Command}.type";
public static string CommandHandler = $"{Command}.handler";
public static string CommandHandlerType = $"{CommandHandler}.type";
}
public static class Queries
{
public static string Query = $"{ObservabilityConstant.InstrumentationName}.query";
public static string QueryType = $"{Query}.type";
public static string QueryHandler = $"{Query}.handler";
public static string QueryHandlerType = $"{QueryHandler}.type";
}
public static class Events
{
public static string Event = $"{ObservabilityConstant.InstrumentationName}.event";
public static string EventType = $"{Event}.type";
public static string EventHandler = $"{Event}.handler";
public static string EventHandlerType = $"{EventHandler}.type";
}
}
}
// https://opentelemetry.io/docs/specs/semconv/general/metrics/
// https://opentelemetry.io/docs/specs/semconv/general/attribute-naming/
public static class Metrics
{
public static class Application
{
public static string AppService = $"{ObservabilityConstant.InstrumentationName}.appservice";
public static string Consumer = $"{ObservabilityConstant.InstrumentationName}.consumer";
public static string Producer = $"{ObservabilityConstant.InstrumentationName}.producer";
public static class Commands
{
public static string Command = $"{ObservabilityConstant.InstrumentationName}.command";
public static string CommandType = $"{Command}.type";
public static string CommandHandler = $"{Command}.handler";
public static string SuccessCount = $"{CommandHandler}.success.count";
public static string FaildCount = $"{CommandHandler}.failed.count";
public static string ActiveCount = $"{CommandHandler}.active.count";
public static string TotalExecutedCount = $"{CommandHandler}.total.count";
public static string HandlerDuration = $"{CommandHandler}.duration";
}
public static class Queries
{
public static string Query = $"{ObservabilityConstant.InstrumentationName}.query";
public static string QueryType = $"{Query}.type";
public static string QueryHandler = $"{Query}.handler";
public static string SuccessCount = $"{QueryHandler}.success.count";
public static string FaildCount = $"{QueryHandler}.failed.count";
public static string ActiveCount = $"{QueryHandler}.active.count";
public static string TotalExecutedCount = $"{QueryHandler}.total.count";
public static string HandlerDuration = $"{QueryHandler}.duration";
}
public static class Events
{
public static string Event = $"{ObservabilityConstant.InstrumentationName}.event";
public static string EventType = $"{Event}.type";
public static string EventHandler = $"{Event}.handler";
public static string SuccessCount = $"{EventHandler}.success.count";
public static string FaildCount = $"{EventHandler}.failed.count";
public static string ActiveCount = $"{EventHandler}.active.count";
public static string TotalExecutedCount = $"{EventHandler}.total.count";
public static string HandlerDuration = $"{EventHandler}.duration";
}
}
}
}

View File

@ -38,21 +38,11 @@
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "elasticsearch:9200"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"AllowedHosts": "*"
}

View File

@ -1,67 +1,78 @@
{
"AppOptions": {
"Name": "Booking-Service"
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "http://localhost:9200"
"AppOptions": {
"Name": "Booking-Service"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
},
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "booking-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "booking",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"Grpc": {
"FlightAddress": "https://localhost:5003",
"PassengerAddress": "https://localhost:5012"
},
"PolicyOptions": {
"Retry": {
"RetryCount": 3,
"SleepDuration": 1
},
"CircuitBreaker": {
"RetryCount": 5,
"BreakDuration" : 30
}
},
"EventStoreOptions": {
"ConnectionString": "esdb://localhost:2113?tls=false"
},
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "booking-db"
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"AllowedHosts": "*"
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "booking-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "booking",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"Grpc": {
"FlightAddress": "https://localhost:5003",
"PassengerAddress": "https://localhost:5012"
},
"PolicyOptions": {
"Retry": {
"RetryCount": 3,
"SleepDuration": 1
},
"CircuitBreaker": {
"RetryCount": 5,
"BreakDuration": 30
}
},
"EventStoreOptions": {
"ConnectionString": "esdb://localhost:2113?tls=false"
},
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "booking-db"
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"ObservabilityOptions": {
"InstrumentationName": "booking_service",
"OTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4317"
},
"AspireDashboardOTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4319"
},
"ZipkinOptions": {
"HttpExporterEndpoint": "http://localhost:9411/api/v2/spans"
},
"JaegerOptions": {
"OTLPGrpcExporterEndpoint": "http://localhost:14317",
"HttpExporterEndpoint": "http://localhost:14268/api/traces"
},
"UsePrometheusExporter": true,
"UseOTLPExporter": true,
"UseAspireOTLPExporter": true,
"UseGrafanaExporter": false,
"ServiceName": "Booking Service"
},
"AllowedHosts": "*"
}

View File

@ -9,7 +9,7 @@ using BuildingBlocks.Mapster;
using BuildingBlocks.MassTransit;
using BuildingBlocks.Mongo;
using BuildingBlocks.OpenApi;
using BuildingBlocks.OpenTelemetry;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.PersistMessageProcessor;
using BuildingBlocks.ProblemDetails;
using BuildingBlocks.Web;
@ -73,7 +73,7 @@ public static class InfrastructureExtensions
builder.Services.AddCustomMapster(typeof(BookingRoot).Assembly);
builder.Services.AddCustomHealthCheck();
builder.Services.AddCustomMassTransit(env, typeof(BookingRoot).Assembly);
builder.Services.AddCustomOpenTelemetry();
builder.AddCustomObservability();
builder.Services.AddTransient<AuthHeaderHandler>();
// ref: https://github.com/oskardudycz/EventSourcing.NetCore/tree/main/Sample/EventStoreDB/ECommerce
@ -91,7 +91,7 @@ public static class InfrastructureExtensions
var env = app.Environment;
var appOptions = app.GetOptions<AppOptions>(nameof(AppOptions));
app.MapPrometheusScrapingEndpoint();
app.UseCustomObservability();
app.UseCustomProblemDetails();
app.UseSerilogRequestLogging(options =>

View File

@ -3,20 +3,10 @@
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "elasticsearch:9200"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel": "error"
}
},
"PostgresOptions": {

View File

@ -1,53 +1,64 @@
{
"AppOptions": {
"Name": "Flight-Service"
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "http://localhost:9200"
"AppOptions": {
"Name": "Flight-Service"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
}
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel": "error"
}
},
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=flight;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "flight-db"
},
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "flight-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "flight",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"HealthOptions": {
"Enabled": false
},
"AllowedHosts": "*"
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=flight;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "flight-db"
},
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "flight-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "flight",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"HealthOptions": {
"Enabled": false
},
"ObservabilityOptions": {
"InstrumentationName": "flight_service",
"OTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4317"
},
"AspireDashboardOTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4319"
},
"ZipkinOptions": {
"HttpExporterEndpoint": "http://localhost:9411/api/v2/spans"
},
"JaegerOptions": {
"OTLPGrpcExporterEndpoint": "http://localhost:14317",
"HttpExporterEndpoint": "http://localhost:14268/api/traces"
},
"UsePrometheusExporter": true,
"UseOTLPExporter": true,
"UseAspireOTLPExporter": true,
"UseGrafanaExporter": false,
"ServiceName": "Flight Service"
},
"AllowedHosts": "*"
}

View File

@ -9,7 +9,7 @@ using BuildingBlocks.Mapster;
using BuildingBlocks.MassTransit;
using BuildingBlocks.Mongo;
using BuildingBlocks.OpenApi;
using BuildingBlocks.OpenTelemetry;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.PersistMessageProcessor;
using BuildingBlocks.ProblemDetails;
using BuildingBlocks.Web;
@ -78,7 +78,7 @@ public static class InfrastructureExtensions
builder.Services.AddCustomMapster(typeof(FlightRoot).Assembly);
builder.Services.AddHttpContextAccessor();
builder.Services.AddCustomMassTransit(env, typeof(FlightRoot).Assembly);
builder.Services.AddCustomOpenTelemetry();
builder.AddCustomObservability();
builder.Services.AddCustomHealthCheck();
builder.Services.AddGrpc(options =>
@ -97,7 +97,7 @@ public static class InfrastructureExtensions
var env = app.Environment;
var appOptions = app.GetOptions<AppOptions>(nameof(AppOptions));
app.MapPrometheusScrapingEndpoint();
app.UseCustomObservability();
app.UseCustomProblemDetails();
app.UseSerilogRequestLogging(options =>

View File

@ -9,9 +9,7 @@ using Xunit;
namespace Integration.Test.Flight.Features;
using global::Flight.Flights.Features.CreatingFlight.V1;
using global::Flight.Flights.Features.GettingFlightById.V1;
using Thrift.Protocol;
public class GetFlightByIdTests : FlightIntegrationTestBase
{

View File

@ -1,6 +1,3 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Unit.Test.Common;
using Unit.Test.Fakes;
@ -8,7 +5,6 @@ using Xunit;
namespace Unit.Test.Aircraft.Features.CreateAircraftTests;
using global::Flight.Aircrafts.Dtos;
using global::Flight.Aircrafts.Features.CreatingAircraft.V1;
[Collection(nameof(UnitTestFixture))]

View File

@ -1,6 +1,3 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Unit.Test.Common;
using Unit.Test.Fakes;

View File

@ -1,5 +1,3 @@
using System;
using System.Collections.Generic;
using Flight.Data;
using Flight.Flights.Enums;
using Flight.Seats.Enums;

View File

@ -1,4 +1,3 @@
using System;
using Flight.Data;
using MapsterMapper;
using Xunit;

View File

@ -1,5 +1,3 @@
using System;
using System.Collections.Generic;
using Flight.Flights.Dtos;
using MapsterMapper;
using Unit.Test.Common;
@ -10,12 +8,10 @@ namespace Unit.Test.Flight;
[Collection(nameof(UnitTestFixture))]
public class FlightMappingTests
{
private readonly UnitTestFixture _fixture;
private readonly IMapper _mapper;
public FlightMappingTests(UnitTestFixture fixture)
{
_fixture = fixture;
_mapper = fixture.Mapper;
}

View File

@ -1,6 +1,3 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Unit.Test.Common;
using Unit.Test.Fakes;

View File

@ -1,5 +1,3 @@
using System;
using System.Collections.Generic;
using Flight.Seats.Dtos;
using MapsterMapper;
using Unit.Test.Common;

View File

@ -21,20 +21,10 @@
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "elasticsearch:9200"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"AllowedHosts": "*"

View File

@ -1,46 +1,57 @@
{
"AppOptions": {
"Name": "Identity-Service"
},
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=identity;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"AuthOptions": {
"IssuerUri": "http://localhost:6005"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "identity",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "http://localhost:9200"
"AppOptions": {
"Name": "Identity-Service"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=identity;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"AllowedHosts": "*"
"AuthOptions": {
"IssuerUri": "http://localhost:6005"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "identity",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
}
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"ObservabilityOptions": {
"InstrumentationName": "identity_service",
"OTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4317"
},
"AspireDashboardOTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4319"
},
"ZipkinOptions": {
"HttpExporterEndpoint": "http://localhost:9411/api/v2/spans"
},
"JaegerOptions": {
"OTLPGrpcExporterEndpoint": "http://localhost:14317",
"HttpExporterEndpoint": "http://localhost:14268/api/traces"
},
"UsePrometheusExporter": true,
"UseOTLPExporter": true,
"UseAspireOTLPExporter": true,
"UseGrafanaExporter": false,
"ServiceName": "Identity Service"
},
"AllowedHosts": "*"
}

View File

@ -6,7 +6,7 @@ using BuildingBlocks.Logging;
using BuildingBlocks.Mapster;
using BuildingBlocks.MassTransit;
using BuildingBlocks.OpenApi;
using BuildingBlocks.OpenTelemetry;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.PersistMessageProcessor;
using BuildingBlocks.ProblemDetails;
using BuildingBlocks.Web;
@ -74,7 +74,7 @@ public static class InfrastructureExtensions
builder.Services.AddCustomHealthCheck();
builder.Services.AddCustomMassTransit(env, typeof(IdentityRoot).Assembly);
builder.Services.AddCustomOpenTelemetry();
builder.AddCustomObservability();
builder.AddCustomIdentityServer();
@ -93,7 +93,7 @@ public static class InfrastructureExtensions
var env = app.Environment;
var appOptions = app.GetOptions<AppOptions>(nameof(AppOptions));
app.MapPrometheusScrapingEndpoint();
app.UseCustomObservability();
app.UseForwardedHeaders();

View File

@ -28,20 +28,10 @@
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "elasticsearch:9200"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"AllowedHosts": "*"

View File

@ -1,53 +1,64 @@
{
"AppOptions": {
"Name": "Passenger-Service"
},
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=passenger;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "passenger-db"
},
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "passenger-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "passenger",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"Elastic": {
"Enabled": true,
"ElasticServiceUrl": "http://localhost:9200"
"AppOptions": {
"Name": "Passenger-Service"
},
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
"PostgresOptions": {
"ConnectionString": "Server=localhost;Port=5432;Database=passenger;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"Sentry": {
"Enabled": false,
"Dsn": "",
"MinimumBreadcrumbLevel": "information",
"MinimumEventLevel":"error"
}
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"AllowedHosts": "*"
"MongoOptions": {
"ConnectionString": "mongodb://localhost:27017",
"DatabaseName": "passenger-db"
},
"Jwt": {
"Authority": "http://localhost:6005",
"Audience": "passenger-api",
"RequireHttpsMetadata": false,
"MetadataAddress": "http://localhost:6005/.well-known/openid-configuration"
},
"RabbitMqOptions": {
"HostName": "localhost",
"ExchangeName": "passenger",
"UserName": "guest",
"Password": "guest",
"Port": 5672
},
"LogOptions": {
"Level": "information",
"LogTemplate": "{Timestamp:HH:mm:ss} [{Level:u4}] {Message:lj}{NewLine}{Exception}",
"File": {
"Enabled": false,
"Path": "logs/logs.txt",
"Interval": "day"
}
},
"HealthOptions": {
"Enabled": false
},
"PersistMessageOptions": {
"Interval": 30,
"Enabled": true,
"ConnectionString": "Server=localhost;Port=5432;Database=persist_message;User Id=postgres;Password=postgres;Include Error Detail=true"
},
"ObservabilityOptions": {
"InstrumentationName": "passenger_service",
"OTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4317"
},
"AspireDashboardOTLPOptions": {
"OTLPGrpExporterEndpoint": "http://localhost:4319"
},
"ZipkinOptions": {
"HttpExporterEndpoint": "http://localhost:9411/api/v2/spans"
},
"JaegerOptions": {
"OTLPGrpcExporterEndpoint": "http://localhost:14317",
"HttpExporterEndpoint": "http://localhost:14268/api/traces"
},
"UsePrometheusExporter": true,
"UseOTLPExporter": true,
"UseAspireOTLPExporter": true,
"UseGrafanaExporter": false,
"ServiceName": "Passenger Service"
},
"AllowedHosts": "*"
}

View File

@ -9,7 +9,7 @@ using BuildingBlocks.Mapster;
using BuildingBlocks.MassTransit;
using BuildingBlocks.Mongo;
using BuildingBlocks.OpenApi;
using BuildingBlocks.OpenTelemetry;
using BuildingBlocks.OpenTelemetryCollector;
using BuildingBlocks.PersistMessageProcessor;
using BuildingBlocks.ProblemDetails;
using BuildingBlocks.Web;
@ -26,7 +26,6 @@ using Serilog;
namespace Passenger.Extensions.Infrastructure;
public static class InfrastructureExtensions
{
public static WebApplicationBuilder AddInfrastructure(this WebApplicationBuilder builder)
@ -38,27 +37,31 @@ public static class InfrastructureExtensions
builder.Services.AddScoped<IEventMapper, EventMapper>();
builder.Services.AddScoped<IEventDispatcher, EventDispatcher>();
builder.Services.Configure<ApiBehaviorOptions>(options =>
{
options.SuppressModelStateInvalidFilter = true;
});
builder.Services.Configure<ApiBehaviorOptions>(
options =>
{
options.SuppressModelStateInvalidFilter = true;
});
var appOptions = builder.Services.GetOptions<AppOptions>(nameof(AppOptions));
Console.WriteLine(FiggleFonts.Standard.Render(appOptions.Name));
builder.Services.AddRateLimiter(options =>
{
options.GlobalLimiter = PartitionedRateLimiter.Create<HttpContext, string>(httpContext =>
RateLimitPartition.GetFixedWindowLimiter(
partitionKey: httpContext.User.Identity?.Name ?? httpContext.Request.Headers.Host.ToString(),
factory: partition => new FixedWindowRateLimiterOptions
{
AutoReplenishment = true,
PermitLimit = 10,
QueueLimit = 0,
Window = TimeSpan.FromMinutes(1)
}));
});
builder.Services.AddRateLimiter(
options =>
{
options.GlobalLimiter = PartitionedRateLimiter.Create<HttpContext, string>(
httpContext =>
RateLimitPartition.GetFixedWindowLimiter(
partitionKey: httpContext.User.Identity?.Name ??
httpContext.Request.Headers.Host.ToString(),
factory: partition => new FixedWindowRateLimiterOptions
{
AutoReplenishment = true,
PermitLimit = 10,
QueueLimit = 0,
Window = TimeSpan.FromMinutes(1)
}));
});
builder.Services.AddPersistMessageProcessor();
builder.Services.AddCustomDbContext<PassengerDbContext>();
@ -76,11 +79,13 @@ public static class InfrastructureExtensions
builder.Services.AddHttpContextAccessor();
builder.Services.AddCustomHealthCheck();
builder.Services.AddCustomMassTransit(env, typeof(PassengerRoot).Assembly);
builder.Services.AddCustomOpenTelemetry();
builder.Services.AddGrpc(options =>
{
options.Interceptors.Add<GrpcExceptionInterceptor>();
});
builder.AddCustomObservability();
builder.Services.AddGrpc(
options =>
{
options.Interceptors.Add<GrpcExceptionInterceptor>();
});
return builder;
}
@ -91,13 +96,16 @@ public static class InfrastructureExtensions
var env = app.Environment;
var appOptions = app.GetOptions<AppOptions>(nameof(AppOptions));
app.MapPrometheusScrapingEndpoint();
app.UseCustomObservability();
app.UseCustomProblemDetails();
app.UseSerilogRequestLogging(options =>
{
options.EnrichDiagnosticContext = LogEnrichHelper.EnrichFromRequest;
});
app.UseSerilogRequestLogging(
options =>
{
options.EnrichDiagnosticContext = LogEnrichHelper.EnrichFromRequest;
});
app.UseCorrelationId();
app.UseMigration<PassengerDbContext>();
app.UseCustomHealthCheck();

View File

@ -10,8 +10,6 @@ using Xunit;
namespace Integration.Test.Passenger.Features;
using global::Passenger.Passengers.Features.GettingPassengerById.V1;
using Humanizer;
using Thrift.Protocol;
public class GetPassengerByIdTests : PassengerIntegrationTestBase
{