diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..de00a1c5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +sandbox/.docker \ No newline at end of file diff --git a/.gitignore b/.gitignore index 763b5bae..9736c07e 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,5 @@ bin/ .env /logs/ /infrastructure/src/main/resources/logs/ +sandbox/.docker +sandbox/app/.env diff --git a/Dockerfile b/Dockerfile index 46692df3..215a0558 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # To build and run: # build stage -FROM gradle:8.0.2-jdk17-alpine AS builder +FROM gradle:8.5-jdk21-alpine AS builder WORKDIR /app @@ -10,13 +10,19 @@ COPY . . RUN gradle bootJar # build runtime -FROM eclipse-temurin:17.0.6_10-jre-alpine +FROM eclipse-temurin:21-jre-alpine ARG JAR_FILE=/app/build/libs/app*.jar COPY --from=builder $JAR_FILE /app.jar -COPY --from=docker.elastic.co/observability/apm-agent-java:1.38.0 /usr/agent/elastic-apm-agent.jar /apm-agent.jar -COPY opentelemetry-javaagent.jar /opentelemetry-javaagent.jar + +# Download do OpenTelemetry Java Agent +#RUN wget -O /opentelemetry-javaagent.jar https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.31.0/opentelemetry-javaagent.jar +RUN wget -O /opentelemetry-javaagent.jar https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar + +# Download do Elastic APM Java Agent +#RUN wget -O /apm-agent.jar https://repo1.maven.org/maven2/co/elastic/apm/elastic-apm-agent/1.46.0/elastic-apm-agent-1.46.0.jar +COPY --from=docker.elastic.co/observability/apm-agent-java:latest /usr/agent/elastic-apm-agent.jar /apm-agent.jar RUN addgroup -S spring && adduser -S spring -G spring USER spring:spring @@ -24,15 +30,12 @@ USER spring:spring ENTRYPOINT [ "java", \ "-javaagent:/apm-agent.jar", \ "-Delastic.apm.service_name=admin-do-catalogo", \ - "-Delastic.apm.server_url=http://apm-admin-do-catalogo:8200", \ + "-Delastic.apm.server_url=http://apm-codeflix:8200", \ + "-Delastic.apm.environment=codeflix", \ "-Delastic.apm.application_packages=com.lukinhasssss", \ "-javaagent:/opentelemetry-javaagent.jar", \ "-Dotel.service.name=admin-do-catalogo", \ - "-Dotel.traces.exporter=otlp", \ - "-Dotel.metrics.exporter=otlp", \ - "-Dotel.integration.jdbc.datasource.enabled=true", \ - "-Dotel.instrumentation.jdbc.datasource.enabled=true", \ - "-Dotel.exporter.otlp.endpoint=http://collector-admin-do-catalogo:4318", \ + "-Dotel.exporter.otlp.endpoint=http://otel-collector-codeflix:4318", \ "-Dotel.exporter.otlp.protocol=http/protobuf", \ "-jar", "/app.jar" \ ] \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml.old similarity index 100% rename from docker-compose.yaml rename to docker-compose.yaml.old diff --git a/infrastructure/src/main/kotlin/com/lukinhasssss/admin/catalogo/infrastructure/configuration/EventConfig.kt b/infrastructure/src/main/kotlin/com/lukinhasssss/admin/catalogo/infrastructure/configuration/EventConfig.kt index 99074d31..c9c03225 100644 --- a/infrastructure/src/main/kotlin/com/lukinhasssss/admin/catalogo/infrastructure/configuration/EventConfig.kt +++ b/infrastructure/src/main/kotlin/com/lukinhasssss/admin/catalogo/infrastructure/configuration/EventConfig.kt @@ -17,7 +17,7 @@ class EventConfig { @Bean @VideoCreatedQueue @Profile(value = ["development"]) - fun inMemoryVideoCreatedEventService(): EventService = InMemoryEventService() + fun videoCreatedInMemoryEventService(): EventService = InMemoryEventService() @Bean @VideoCreatedQueue diff --git a/infrastructure/src/main/resources/application.yaml b/infrastructure/src/main/resources/application.yaml index a74a346e..2b948d2e 100644 --- a/infrastructure/src/main/resources/application.yaml +++ b/infrastructure/src/main/resources/application.yaml @@ -56,6 +56,7 @@ spring: hikari: # É responsável por gerenciar o pool de conexões auto-commit: false connection-timeout: 250 # É uma configuração em milliseconds. O ideal é manter baixo para que estoure timeout logo e não prenda as threads. + idle-timeout: 60000 max-lifetime: 600000 # Tempo máximo que uma conexão pode ficar aberta (10 min) - security. maximum-pool-size: 20 # Mantemos até no máx 20 conexões com o banco de dados. O ideal é manter baixo mesmo, pois é algo custoso para o banco gerenciar. https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing minimum-idle: 10 @@ -70,7 +71,7 @@ spring: persistence: sharedCache: mode: UNSPECIFIED # https://github.com/spring-projects/spring-data-jpa/issues/2717 - "[hibernate.dialect]": org.hibernate.dialect.PostgreSQLDialect +# "[hibernate.dialect]": org.hibernate.dialect.PostgreSQLDialect "[hibernate.generate_statistics]": false "[hibernate.connection.provider_disables_autocommit]": true # Para aumentar a performance ao máximo, desabilitamos o auto-commit e o open-in-view. diff --git a/infrastructure/src/main/resources/collector/otel-collector-config.yaml b/infrastructure/src/main/resources/collector/otel-collector-config.yaml deleted file mode 100644 index 327ef377..00000000 --- a/infrastructure/src/main/resources/collector/otel-collector-config.yaml +++ /dev/null @@ -1,21 +0,0 @@ -extensions: - health_check: - -receivers: - otlp: - protocols: - grpc: - http: - -exporters: - jaeger: - endpoint: jaeger-admin-do-catalogo:14250 - tls: - insecure: true - -service: - pipelines: - traces: - receivers: [otlp] - exporters: [jaeger] - extensions: [health_check] \ No newline at end of file diff --git a/opentelemetry-javaagent.jar b/opentelemetry-javaagent.jar deleted file mode 100644 index 0ca2f585..00000000 Binary files a/opentelemetry-javaagent.jar and /dev/null differ diff --git a/sandbox/app/docker-compose.yml b/sandbox/app/docker-compose.yml new file mode 100644 index 00000000..77252bc0 --- /dev/null +++ b/sandbox/app/docker-compose.yml @@ -0,0 +1,51 @@ +version: '3.9' + +services: + admin-do-catalogo: + container_name: admin-do-catalogo + build: + context: ../../ + dockerfile: Dockerfile +# image: lukinhasssss/admin-do-catalogo + ports: + - "8080:8080" + env_file: + - ../../.env + restart: on-failure + networks: + - admin-do-catalogo + - admin-do-catalogo-services +# - open-telemetry-codeflix +# - elasticsearch-codeflix + labels: + filebeat_collector: true + + filebeat-admin-do-catalogo: + container_name: filebeat-admin-do-catalogo + image: docker.elastic.co/beats/filebeat:8.12.0 + volumes: + - /var/lib/docker/containers:/var/lib/docker/containers:ro # Docker logs + - /var/run/docker.sock:/var/run/docker.sock:ro # Additional information about containers + - ./filebeat/filebeat.yaml:/usr/share/filebeat/filebeat.yml:ro # Configuration file + - ../.docker/filebeat:/usr/share/filebeat/data:rw # Persistence data + user: root # Allow access to log files and docker.sock + environment: + - ELASTIC_HOSTS=http://elasticsearch-codeflix:9200 + - KIBANA_HOSTS=http://kibana-codeflix:5601 + - LOGSTASH_HOSTS=http://logstash-codeflix:9600 + restart: on-failure + command: + - '-strict.perms=false' + networks: + - admin-do-catalogo + - elasticsearch-codeflix + +networks: + admin-do-catalogo: + external: true + admin-do-catalogo-services: + external: true + open-telemetry-codeflix: + external: true + elasticsearch-codeflix: + external: true \ No newline at end of file diff --git a/sandbox/app/filebeat/filebeat.yaml b/sandbox/app/filebeat/filebeat.yaml new file mode 100644 index 00000000..14e15af3 --- /dev/null +++ b/sandbox/app/filebeat/filebeat.yaml @@ -0,0 +1,36 @@ +filebeat: + autodiscover: + providers: + - type: docker + labels.dedot: true + templates: + - condition: + contains: + container.labels.filebeat_collector: "true" + config: + - type: container + format: docker + paths: + - "/var/lib/docker/containers/${data.docker.container.id}/*.log" + processors: + - decode_json_fields: + when.equals: + docker.container.labels.decode_log_event_to_json_object: "true" + fields: ["message"] + target: "" + overwrite_keys: true + +output: + logstash: + hosts: logstash-codeflix:5044 + +#output.elasticsearch: +# hosts: ["elasticsearch-codeflix:9200"] + +setup: + kibana: + host: "kibana-codeflix:5601" + dashboards: + enabled: true + +logging.metrics.enabled: false \ No newline at end of file diff --git a/sandbox/elk/apm/apm-server.yaml b/sandbox/elk/apm/apm-server.yaml new file mode 100644 index 00000000..37eb3946 --- /dev/null +++ b/sandbox/elk/apm/apm-server.yaml @@ -0,0 +1,23 @@ +apm-server: + host: "apm-codeflix:8200" + rum: + enabled: true + source_mapping: + enabled: true + elasticsearch: + hosts: ["elasticsearch-codeflix:9200"] + username: "elastic" + password: "changeme" + expiration: 5m + index_pattern: "apm-*-sourcemap*" + kibana: + enabled: true + host: "kibana-codeflix:5601" +output.elasticsearch: + hosts: ["elasticsearch-codeflix:9200"] + username: 'elastic' + password: 'changeme' + enabled: true +output.logstash: + hosts: ["logstash-codeflix:5044"] + enabled: false \ No newline at end of file diff --git a/sandbox/elk/beats/heartbeat.yaml b/sandbox/elk/beats/heartbeat.yaml new file mode 100644 index 00000000..721197e6 --- /dev/null +++ b/sandbox/elk/beats/heartbeat.yaml @@ -0,0 +1,27 @@ +heartbeat.monitors: + - type: http + schedule: '@every 5s' + urls: + - http://elasticsearch-codeflix:9200 + - http://kibana-codeflix:5601 + - http://admin-do-catalogo:8080/api + - http://catalogo-de-videos:8081/api + + - type: icmp + schedule: '@every 5s' + hosts: + - elasticsearch-codeflix + - kibana-codeflix + - apm-codeflix + - metricbeat-codeflix + +processors: + - add_cloud_metadata: ~ + +#output.logstash: +# hosts: 'logstash-codeflix:5044' + +output.elasticsearch: + hosts: 'elasticsearch-codeflix:9200' + username: 'elastic' # elasticsearch default user + password: 'changeme' # elasticsearch default password \ No newline at end of file diff --git a/sandbox/elk/beats/metricbeat.yaml b/sandbox/elk/beats/metricbeat.yaml new file mode 100755 index 00000000..9194a209 --- /dev/null +++ b/sandbox/elk/beats/metricbeat.yaml @@ -0,0 +1,32 @@ +metricbeat.modules: + - module: docker + metricsets: [ "container", "cpu", "diskio", "event", "healthcheck", "image", "info", "memory", "network" ] + hosts: [ "unix:///var/run/docker.sock" ] + period: 10s + + - module: elasticsearch + metricsets: [ "node", "node_stats", "cluster_stats", "index" ] + period: 10s + hosts: ["elasticsearch-codeflix:9200"] + + - module: prometheus + period: 10s + metricsets: [ "collector" ] + hosts: [ "prometheus-codeflix:9090" ] + metrics_path: /metrics + + - module: postgresql + metricsets: [ "database", "bgwriter", "activity" ] + period: 10s + hosts: [ "postgres:////localhost:5432", "postgres://postgres-admin-do-catalogo:5432", "postgres://postgres-admin-do-catalogo:5432/adm_videos", "postgres-admin-do-catalogo:5432" ] + +output.elasticsearch: + hosts: ["elasticsearch-codeflix:9200"] + +#output.logstash: +# hosts: ["logstash-codeflix:5044"] + +setup.kibana: + host: "kibana-codeflix:5601" + +setup.dashboards.enabled: true \ No newline at end of file diff --git a/sandbox/elk/docker-compose.yml b/sandbox/elk/docker-compose.yml new file mode 100644 index 00000000..0385bb5c --- /dev/null +++ b/sandbox/elk/docker-compose.yml @@ -0,0 +1,147 @@ +version: '3.9' + +services: + elasticsearch-codeflix: + container_name: elasticsearch-admin-do-catalogo + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 +# image: docker.elastic.co/elasticsearch/elasticsearch:8.7.0 + ports: + - "9200:9200" + environment: + - node.name=elasticsearch-admin-do-catalogo + - cluster.name=es-docker-cluster + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - http.cors.enabled=true + - http.cors.allow-origin="*" + - discovery.type=single-node + - xpack.security.enabled=false + - xpack.license.self_generated.type=basic + ulimits: + memlock: + soft: -1 + hard: -1 + volumes: + - ../.docker/elasticsearch-admin-do-catalogo:/usr/share/elasticsearch/data + healthcheck: + test: [ "CMD-SHELL", "curl --silent --fail http://localhost:9200/_cat/health?h=status" ] + interval: 5s + timeout: 10s + retries: 5 + networks: + - elasticsearch-codeflix + - admin-do-catalogo + + kibana-codeflix: + container_name: kibana-codeflix + image: docker.elastic.co/kibana/kibana:8.12.0 +# image: docker.elastic.co/kibana/kibana:8.7.0 + ports: + - "5601:5601" + environment: + ELASTICSEARCH_URL: http://elasticsearch-codeflix:9200 + ELASTICSEARCH_HOSTS: '["http://elasticsearch-codeflix:9200"]' + depends_on: + elasticsearch-codeflix: + condition: service_healthy + healthcheck: + test: [ "CMD-SHELL", "curl --silent --fail http://localhost:5601/api/status" ] + interval: 5s + timeout: 10s + retries: 5 + networks: + - elasticsearch-codeflix + + apm-codeflix: + container_name: apm-codeflix +# image: docker.elastic.co/apm/apm-server:8.12.0 + image: docker.elastic.co/apm/apm-server:7.17.9 + ports: + - "8200:8200" + command: + - '-strict.perms=false' + restart: on-failure + volumes: + - ./apm/apm-server.yaml:/usr/share/apm-server/apm-server.yml + healthcheck: + test: [ "CMD-SHELL", "curl --silent --fail http://localhost:8200/healthcheck" ] + interval: 5s + timeout: 10s + retries: 5 + networks: + - elasticsearch-codeflix + - admin-do-catalogo + + metricbeat-codeflix: + container_name: metricbeat-codeflix + image: docker.elastic.co/beats/metricbeat:8.12.0 +# image: docker.elastic.co/beats/metricbeat:8.7.0 + ports: + - "5066:5066" + command: + - '-strict.perms=false' + user: root + restart: on-failure + environment: + - ELASTIC_HOSTS=http://elasticsearch-codeflix:9200 + - KIBANA_HOSTS=http://kibana-codeflix:5601 + - LOGSTASH_HOSTS=http://logstash-codeflix:9600 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./beats/metricbeat.yaml:/usr/share/metricbeat/metricbeat.yml + depends_on: + kibana-codeflix: + condition: service_healthy + networks: + - elasticsearch-codeflix + - admin-do-catalogo-services + - monitoramento-codeflix + + heartbeat-codeflix: + container_name: heartbeat-codeflix + image: docker.elastic.co/beats/heartbeat:8.12.0 + environment: + - ELASTIC_HOSTS=http://elasticsearch-codeflix:9200 + - KIBANA_HOSTS=http://kibana-codeflix:5601 + - LOGSTASH_HOSTS=http://logstash-codeflix:9600 + command: + - '-strict.perms=false' + volumes: + - ./beats/heartbeat.yaml:/usr/share/heartbeat/heartbeat.yml + depends_on: + kibana-codeflix: + condition: service_healthy + networks: + - elasticsearch-codeflix + - admin-do-catalogo + - catalogo-de-videos + + logstash-codeflix: + container_name: logstash-codeflix + image: docker.elastic.co/logstash/logstash:8.12.0 + volumes: + - ./logstash/pipeline:/usr/share/logstash/pipeline:ro + environment: + - "LS_JAVA_OPTS=-Xms256m -Xmx256m" + - ELASTIC_HOSTS=http://elasticsearch-codeflix:9200 + - xpack.monitoring.enabled=false + depends_on: + elasticsearch-codeflix: + condition: service_healthy + ports: + - "5044:5044" + - "25826:25826" + networks: + - elasticsearch-codeflix + +networks: + elasticsearch-codeflix: + external: true + admin-do-catalogo: + external: true + admin-do-catalogo-services: + external: true + monitoramento-codeflix: + external: true + catalogo-de-videos: + external: true diff --git a/infrastructure/src/main/resources/elasticstack/apm/apm-server.yaml b/sandbox/elk/elasticstack_bkp/apm/apm-server.yaml similarity index 100% rename from infrastructure/src/main/resources/elasticstack/apm/apm-server.yaml rename to sandbox/elk/elasticstack_bkp/apm/apm-server.yaml diff --git a/infrastructure/src/main/resources/elasticstack/apm/apm-server.yaml.old b/sandbox/elk/elasticstack_bkp/apm/apm-server.yaml.old similarity index 100% rename from infrastructure/src/main/resources/elasticstack/apm/apm-server.yaml.old rename to sandbox/elk/elasticstack_bkp/apm/apm-server.yaml.old diff --git a/infrastructure/src/main/resources/elasticstack/beats/heartbeat.yaml b/sandbox/elk/elasticstack_bkp/beats/heartbeat.yaml similarity index 100% rename from infrastructure/src/main/resources/elasticstack/beats/heartbeat.yaml rename to sandbox/elk/elasticstack_bkp/beats/heartbeat.yaml diff --git a/infrastructure/src/main/resources/elasticstack/beats/metricbeat.yaml b/sandbox/elk/elasticstack_bkp/beats/metricbeat.yaml similarity index 100% rename from infrastructure/src/main/resources/elasticstack/beats/metricbeat.yaml rename to sandbox/elk/elasticstack_bkp/beats/metricbeat.yaml diff --git a/infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml b/sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml similarity index 100% rename from infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml rename to sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml diff --git a/infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml.bkp b/sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml.bkp similarity index 100% rename from infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml.bkp rename to sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml.bkp diff --git a/infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml.old b/sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml.old similarity index 100% rename from infrastructure/src/main/resources/elasticstack/filebeat/filebeat.yaml.old rename to sandbox/elk/elasticstack_bkp/filebeat/filebeat.yaml.old diff --git a/infrastructure/src/main/resources/elasticstack/logstash/pipeline/logstash.conf b/sandbox/elk/elasticstack_bkp/logstash/pipeline/logstash.conf similarity index 100% rename from infrastructure/src/main/resources/elasticstack/logstash/pipeline/logstash.conf rename to sandbox/elk/elasticstack_bkp/logstash/pipeline/logstash.conf diff --git a/sandbox/elk/filebeat/filebeat.yaml b/sandbox/elk/filebeat/filebeat.yaml new file mode 100644 index 00000000..e40b0acf --- /dev/null +++ b/sandbox/elk/filebeat/filebeat.yaml @@ -0,0 +1,84 @@ +filebeat: + autodiscover: + providers: + - type: docker + labels.dedot: true + templates: + - condition: + contains: + container.labels.filebeat_collector: "true" + config: + - type: container + format: docker + paths: + - "/var/lib/docker/containers/${data.docker.container.id}/*.log" + processors: + - decode_json_fields: + when.equals: + docker.container.labels.decode_log_event_to_json_object: "true" + fields: ["message"] + target: "" + overwrite_keys: true + +output.elasticsearch: + hosts: ["elasticsearch-codeflix:9200"] + +setup: + kibana: + host: "kibana-codeflix:5601" + dashboards: + enabled: true + +logging.metrics.enabled: false + + + +#filebeat: +# autodiscover: +# providers: +# - type: docker +# labels.dedot: true +# templates: +# - condition: +# contains: +# container.labels.filebeat_collector: "true" +# config: +# - type: container +# format: docker +# paths: +# - "/var/lib/docker/containers/${data.docker.container.id}/*.log" +# processors: +# - decode_json_fields: +# when.equals: +# docker.container.labels.decode_log_event_to_json_object: "true" +# fields: ["message"] +# target: "" +# overwrite_keys: true +# modules: +# - module: postgresql +# log: +# enabled: true +#setup: +# kibana: +# host: "kibana-admin-do-catalogo:5601" +# dashboards: +# enabled: true +# +#output: +# logstash: +# hosts: ["logstash-admin-do-catalogo:5044"] +## elasticsearch: +## hosts: [ "elasticsearch-admin-do-catalogo:9200" ] +## username: "elastic" +## password: "changeme" +# +#logging: +# metrics: +# enabled: false +# +#processors: +# - add_host_metadata: +# when.not.contains.tags: forwarded +# - add_cloud_metadata: ~ +# - add_docker_metadata: ~ +# - add_kubernetes_metadata: ~ \ No newline at end of file diff --git a/sandbox/elk/filebeat/filebeat.yaml.bkp b/sandbox/elk/filebeat/filebeat.yaml.bkp new file mode 100644 index 00000000..ead635af --- /dev/null +++ b/sandbox/elk/filebeat/filebeat.yaml.bkp @@ -0,0 +1,271 @@ +###################### Filebeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The filebeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +# For more available modules and options, please see the filebeat.reference.yml sample +# configuration file. + +# ============================== Filebeat inputs =============================== + +filebeat.inputs: + + # Each - is an input. Most options can be set at the input level, so + # you can use different inputs for various configurations. + # Below are the input specific configurations. + + - type: log + + # Change to true to enable this input configuration. + enabled: true + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # filestream is an experimental input. It is going to replace log input in the future. + - type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + +# ============================== Filebeat modules ============================== + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: true + + # Period on which files under path should be checked for changes + #reload.period: 10s + +# ======================= Elasticsearch template setting ======================= + +setup.template.settings: + index.number_of_shards: 1 + #index.codec: best_compression + #_source.enabled: false + + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + +# ================================= Dashboards ================================= +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here or by using the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "kibana-admin-do-catalogo:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: +# cloud.id: "observability-deployment:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ4MzE2OGQ1MGUwN2U0ZmFhYTk1MjgxNzQxMTUwMzA2NSQzMGRiOWRjOTk4YTc0MGEwYjg0OThmODZhZmRjNTBiMw==" +# cloud.auth: "elastic:PoFpWBdAoU9Y7kbAU64xXyyh" + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["elasticsearch-admin-do-catalogo:9200"] + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + username: "elastic" + password: "changeme" + + # ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # The Logstash hosts +# hosts: ["logstash-admin-do-catalogo:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +# ================================= Processors ================================= +processors: + - add_host_metadata: + when.not.contains.tags: forwarded + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ + + # ================================== Logging =================================== + + # Sets log level. The default log level is info. + # Available log levels are: error, warning, info, debug + #logging.level: debug + + # At debug level, you can selectively enable logging only for some components. + # To enable all selectors use ["*"]. Examples of other selectors are "beat", + # "publisher", "service". + #logging.selectors: ["*"] + + # ============================= X-Pack Monitoring ============================== + # Filebeat can export internal metrics to a central Elasticsearch monitoring + # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The + # reporting is disabled by default. + + # Set to true to enable the monitoring reporter. + #monitoring.enabled: false + + # Sets the UUID of the Elasticsearch cluster under which monitoring data for this + # Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch + # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. + #monitoring.cluster_uuid: + + # Uncomment to send the metrics to Elasticsearch. Most settings from the + # Elasticsearch output are accepted here as well. + # Note that the settings should point to your Elasticsearch *monitoring* cluster. + # Any setting that is not set is automatically inherited from the Elasticsearch + # output configuration, so if you have the Elasticsearch output configured such + # that it is pointing to your Elasticsearch monitoring cluster, you can simply + # uncomment the following line. + #monitoring.elasticsearch: + + # ============================== Instrumentation =============================== + + # Instrumentation support for the filebeat. + #instrumentation: + # Set to true to enable instrumentation of filebeat. + #enabled: false + + # Environment in which filebeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: true diff --git a/sandbox/elk/filebeat/filebeat.yaml.old b/sandbox/elk/filebeat/filebeat.yaml.old new file mode 100644 index 00000000..0570f2ea --- /dev/null +++ b/sandbox/elk/filebeat/filebeat.yaml.old @@ -0,0 +1,86 @@ +# ============================== Filebeat inputs =============================== + +#filebeat.inputs: +# - type: log +# enabled: true +# paths: +# - /var/log/*.log +# - type: filestream +# enabled: false +# paths: +# - /var/log/*.log + +# ============================== Filebeat modules ============================== + +# REFERENCE: https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +filebeat.modules: +# - module: elasticsearch +# server: +# enabled: false +# gc: +# enabled: false +# audit: +# enabled: false +# slowlog: +# enabled: false +# deprecation: +# enabled: false + + - module: logstash + log: + enabled: true + var.paths: + - /var/log/*.log + + - module: postgresql + log: + enabled: true + +# ======================= Elasticsearch template setting ======================= + +setup.template.settings: + index.number_of_shards: 1 + #index.codec: best_compression + #_source.enabled: false + +# =================================== Kibana =================================== + +setup.kibana: + host: "kibana-admin-do-catalogo:5601" + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- + +output.elasticsearch: + hosts: ["elasticsearch-admin-do-catalogo:9200"] + username: "elastic" + password: "changeme" + +# ------------------------------ Logstash Output ------------------------------- + +#output.logstash: + # The Logstash hosts +# hosts: ["logstash-admin-do-catalogo:5044"] + +# ================================= Processors ================================= + +processors: + - add_host_metadata: + when.not.contains.tags: forwarded + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ diff --git a/sandbox/elk/logstash/pipeline/logstash.conf b/sandbox/elk/logstash/pipeline/logstash.conf new file mode 100644 index 00000000..9e1e4ccb --- /dev/null +++ b/sandbox/elk/logstash/pipeline/logstash.conf @@ -0,0 +1,25 @@ +input { + beats { + port => 5044 + } +} + +filter { + ruby { + code => 'event.set("kv_tags", event.get("message").scan(/\[(?:[^\]\[]+|\[(?:[^\]\[]+|\[[^\]\[]*\])*\])*\]/))' + } + + kv { + source => "kv_tags" + field_split_pattern => "(?:^\[|\]$)" + value_split => ":" + trim_key => " " + trim_value => " " + } +} + +output { + elasticsearch { + hosts => "elasticsearch-codeflix:9200" + } +} \ No newline at end of file diff --git a/infrastructure/src/main/resources/alertmanager/alertmanager.yaml b/sandbox/monitoramento/alertmanager/alertmanager.yaml similarity index 97% rename from infrastructure/src/main/resources/alertmanager/alertmanager.yaml rename to sandbox/monitoramento/alertmanager/alertmanager.yaml index cf6a38a6..fc73096b 100644 --- a/infrastructure/src/main/resources/alertmanager/alertmanager.yaml +++ b/sandbox/monitoramento/alertmanager/alertmanager.yaml @@ -14,7 +14,7 @@ route: receivers: - name: 'ops-codeflix' slack_configs: - - channel: '#admin-do-catalogo' + - channel: '#codeflix' send_resolved: true icon_url: https://avatars3.githubusercontent.com/u/3380462 title: |- diff --git a/sandbox/monitoramento/docker-compose.yml b/sandbox/monitoramento/docker-compose.yml new file mode 100644 index 00000000..78db507a --- /dev/null +++ b/sandbox/monitoramento/docker-compose.yml @@ -0,0 +1,60 @@ +version: '3.9' + +services: + prometheus-codeflix: + container_name: prometheus-codeflix + image: prom/prometheus + ports: + - "9090:9090" + restart: always + volumes: + - ./prometheus/prometheus.yaml:/etc/prometheus/prometheus.yml + - ./prometheus/alert_rules.yaml:/etc/prometheus/alert_rules.yaml + - prometheus-data-codeflix:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + networks: + - monitoramento-codeflix + - admin-do-catalogo + + alertmanager-codeflix: + container_name: alertmanager-codeflix + image: prom/alertmanager + ports: + - "9093:9093" + depends_on: + - prometheus-codeflix + restart: unless-stopped + volumes: + - ./alertmanager/:/etc/alertmanager/:ro + command: + - '--config.file=/etc/alertmanager/alertmanager.yaml' + - '--storage.path=/alertmanager' + networks: + - monitoramento-codeflix + + grafana-codeflix: + container_name: grafana-codeflix + image: grafana/grafana + ports: + - "3000:3000" + depends_on: + - prometheus-codeflix + restart: always + volumes: + - grafana-data-codeflix:/var/lib/grafana + networks: + - monitoramento-codeflix + +networks: + monitoramento-codeflix: + external: true + admin-do-catalogo: + external: true + +volumes: + prometheus-data-codeflix: + grafana-data-codeflix: diff --git a/infrastructure/src/main/resources/grafana/dashboard.json b/sandbox/monitoramento/grafana/dashboard.json similarity index 100% rename from infrastructure/src/main/resources/grafana/dashboard.json rename to sandbox/monitoramento/grafana/dashboard.json diff --git a/infrastructure/src/main/resources/prometheus/alert_rules.yaml b/sandbox/monitoramento/prometheus/alert_rules.yaml similarity index 50% rename from infrastructure/src/main/resources/prometheus/alert_rules.yaml rename to sandbox/monitoramento/prometheus/alert_rules.yaml index 2ba014da..dcb8e176 100644 --- a/infrastructure/src/main/resources/prometheus/alert_rules.yaml +++ b/sandbox/monitoramento/prometheus/alert_rules.yaml @@ -22,6 +22,32 @@ groups: severity: 'critical' group: 'ops-codeflix' env: 'production' + annotations: + title: 'ERRO 500' + summary: 'Erro 500 acima de 1% no último minuto.' + description: 'API de administração do catálogo está com taxa de erros 500 acima de 1% no último minuto.' + + - alert: 'QUEBRA DE SLO' + expr: (histogram_quantile(0.90, sum(rate(http_server_requests_seconds_bucket{job="app-catalogo-de-videos", uri!="/actuator/prometheus"}[1m])) by (le))) >= 0.5 + for: 1m + labels: + app: 'api-catalogo-de-videos' + severity: 'critical' + group: 'ops-codeflix' + env: 'production' + annotations: + title: 'QUEBRA DE SLO' + summary: '90% das requisições estão sendo atendidas a 500ms ou mais.' + description: 'API de administração do catálogo está quebrando o SLO, 90% das requisições estão sendo atendidas em 500ms ou mais no último minuto.' + + - alert: 'ERRO 500' + expr: (sum(rate(http_server_requests_seconds_count{job="app-catalogo-de-videos", status=~"5..", uri!="/actuator/prometheus"}[1m])) / sum(rate(http_server_requests_seconds_count{job="app-catalogo-de-videos", uri!="/actuator/prometheus"}[1m]))) >= 0.01 + for: 1m + labels: + app: 'api-catalogo-de-videos' + severity: 'critical' + group: 'ops-codeflix' + env: 'production' annotations: title: 'ERRO 500' summary: 'Erro 500 acima de 1% no último minuto.' diff --git a/infrastructure/src/main/resources/prometheus/prometheus.yaml b/sandbox/monitoramento/prometheus/prometheus.yaml similarity index 65% rename from infrastructure/src/main/resources/prometheus/prometheus.yaml rename to sandbox/monitoramento/prometheus/prometheus.yaml index 49c67eec..918e8ee8 100644 --- a/infrastructure/src/main/resources/prometheus/prometheus.yaml +++ b/sandbox/monitoramento/prometheus/prometheus.yaml @@ -11,20 +11,32 @@ alerting: alertmanagers: - static_configs: - targets: - - alertmanager-admin-do-catalogo:9093 + - alertmanager-codeflix:9093 # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'prometheus-admin-do-catalogo' + - job_name: 'prometheus-codeflix' # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - - targets: ['prometheus-admin-do-catalogo:9090'] + - targets: ['prometheus-codeflix:9090'] + + - job_name: 'otel-collector-codeflix-codeflix' + scrape_interval: 5s + static_configs: + - targets: ['otel-collector-codeflix:8888'] + - targets: ['otel-collector-codeflix:8889'] - job_name: 'app-admin-do-catalogo' metrics_path: '/api/actuator/prometheus' scrape_interval: 5s static_configs: - - targets: [ 'admin-do-catalogo:8080' ] # 192.168.15.127:8080 \ No newline at end of file + - targets: [ 'admin-do-catalogo:8080' ] # 192.168.15.127:8080 + + - job_name: 'app-catalogo-de-videos' + metrics_path: '/api/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: [ 'catalogo-de-videos:8081' ] # 192.168.15.127:8081 \ No newline at end of file diff --git a/sandbox/open-telemetry/collector/otel-collector-config.yaml b/sandbox/open-telemetry/collector/otel-collector-config.yaml new file mode 100644 index 00000000..9e255509 --- /dev/null +++ b/sandbox/open-telemetry/collector/otel-collector-config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +exporters: + prometheus: + endpoint: "0.0.0.0:8889" + namespace: prometheus-codeflix + + otlp/jaeger: + endpoint: jaeger-codeflix:14250 + tls: + insecure: true + +# Alternatively, use jaeger_thrift_http with the settings below. In this case +# update the list of exporters on the traces pipeline. +# +# jaeger_thrift_http: +# url: http://jaeger-all-in-one:14268/api/traces + +processors: + batch: + +extensions: + health_check: + +service: + extensions: [health_check] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp/jaeger] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [prometheus] \ No newline at end of file diff --git a/sandbox/open-telemetry/docker-compose.yml b/sandbox/open-telemetry/docker-compose.yml new file mode 100644 index 00000000..46c76535 --- /dev/null +++ b/sandbox/open-telemetry/docker-compose.yml @@ -0,0 +1,57 @@ +version: '3.9' + +services: + jaeger-codeflix: + container_name: jaeger-codeflix + image: jaegertracing/opentelemetry-all-in-one + ports: + - "14250:14250" + - "14268:14268" + - "6831:6831/udp" + - "16686:16686" + - "16685:16685" + environment: + - COLLECTOR_OLTP_ENABLED=true + - METRICS_STORAGE_TYPE=prometheus + - PROMETHEUS_SERVER_URL=http://prometheus-codeflix:9090 +# volumes: +# - ./jaeger/jaeger-ui.json:/etc/jaeger/jaeger-ui.json +# command: --query.ui-config /etc/jaeger/jaeger-ui.json + networks: + - open-telemetry-codeflix + - admin-do-catalogo + - admin-do-catalogo-services + - monitoramento-codeflix + + otel-collector-codeflix: + container_name: otel-collector-codeflix + image: otel/opentelemetry-collector + ports: + - "1888:1888" # pprof extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP http receiver + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "13133:13133" # health_check extension + - "55679:55679" # zpages extension + volumes: + - ./collector/otel-collector-config.yaml:/etc/otel-collector-config.yaml + command: + - "--config=/etc/otel-collector-config.yaml" + depends_on: + - jaeger-codeflix + networks: + - open-telemetry-codeflix + - monitoramento-codeflix + - admin-do-catalogo + - admin-do-catalogo-services + +networks: + open-telemetry-codeflix: + external: true + monitoramento-codeflix: + external: true + admin-do-catalogo: + external: true + admin-do-catalogo-services: + external: true diff --git a/sandbox/open-telemetry/jaeger/jaeger-ui.json b/sandbox/open-telemetry/jaeger/jaeger-ui.json new file mode 100644 index 00000000..ce95752c --- /dev/null +++ b/sandbox/open-telemetry/jaeger/jaeger-ui.json @@ -0,0 +1,8 @@ +{ + "monitor": { + "menuEnabled": true + }, + "dependencies": { + "menuEnabled": true + } +} \ No newline at end of file diff --git a/sandbox/run.sh b/sandbox/run.sh new file mode 100755 index 00000000..721b1e4b --- /dev/null +++ b/sandbox/run.sh @@ -0,0 +1,29 @@ +printf "Criando as pastas com permissões...\n" +sudo chown root app/filebeat/filebeat.yaml +mkdir -m 777 .docker +mkdir -m 777 .docker/postgresql +mkdir -m 777 .docker/rabbitmq +mkdir -m 777 .docker/keycloak +mkdir -m 777 .docker/filebeat +mkdir -m 777 .docker/elasticsearch-admin-do-catalogo + +# ---------------------------------------------------- + +printf "Criando as docker networks...\n" +docker network create admin-do-catalogo +docker network create admin-do-catalogo-services +docker network create monitoramento-codeflix +docker network create open-telemetry-codeflix +docker network create elasticsearch-codeflix +docker network create catalogo-de-videos + + +# ---------------------------------------------------- + +printf "Inicializando os container...\n" +docker compose -f services/docker-compose.yml up -d +docker compose -f elk/docker-compose.yml up -d +docker compose -f monitoramento/docker-compose.yml up -d +docker compose -f open-telemetry/docker-compose.yml up -d +docker compose -f app/docker-compose.yml up -d +#docker compose -f app/docker-compose.yml up -d --build --force-recreate diff --git a/sandbox/services/docker-compose.yml b/sandbox/services/docker-compose.yml new file mode 100644 index 00000000..82dba0bf --- /dev/null +++ b/sandbox/services/docker-compose.yml @@ -0,0 +1,68 @@ +version: '3.9' + +services: + postgres-admin-do-catalogo: + container_name: postgres-admin-do-catalogo + image: postgres:latest + restart: unless-stopped + ports: + - "5432:5432" + environment: + POSTGRES_USER: ${POSTGRES_USER-username} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD-password} + POSTGRES_DB: ${POSTGRES_DB-adm_videos} + security_opt: + - seccomp:unconfined + volumes: + - ../.docker/postgresql:/var/lib/postgresql/data + healthcheck: + test: [ "CMD", "pg_isready -U lukinhasssss -d adm_videos" ] + timeout: 45s + interval: 10s + retries: 10 + networks: + - admin-do-catalogo-services + + rabbitmq-admin-do-catalogo: + container_name: rabbitmq-admin-do-catalogo + image: rabbitmq:3-management + ports: + - "15672:15672" + - "5672:5672" + environment: + - RABBITMQ_ERLANG_COOKIE=SWQOKODSQALRPCLNMEQG + - RABBITMQ_DEFAULT_USER=adm_videos + - RABBITMQ_DEFAULT_PASS=123456 + - RABBITMQ_DEFAULT_VHOST=/ + volumes: + - ../.docker/rabbitmq:/var/lib/rabbitmq:rw + healthcheck: + test: [ "CMD-SHELL", "rabbitmq-diagnostics -q ping" ] + interval: 5s + timeout: 10s + retries: 5 + networks: + - admin-do-catalogo-services + + keycloak-admin-do-catalogo: + container_name: keycloak-admin-do-catalogo + image: quay.io/keycloak/keycloak:21.0.1 + ports: + - "8443:8080" +# - "18443:8443" +# - "19990:9990" + environment: + - KEYCLOAK_ADMIN=admin + - KEYCLOAK_ADMIN_PASSWORD=admin + volumes: + # - keycloak_data:/opt/keycloak/data/ + - ../.docker/keycloak:/opt/keycloak/data/h2:rw + command: +# - start-dev # Deve ser utilizado quando a aplicação estiver rodando fora de um container + - start-dev --hostname-url=http://keycloak-admin-do-catalogo:8080 # Deve ser utilizado quando a aplicação estiver rodando dentro de um container + networks: + - admin-do-catalogo-services + +networks: + admin-do-catalogo-services: + external: true \ No newline at end of file diff --git a/infrastructure/src/main/resources/nginx/Dockerfile b/sandbox/services/nginx/Dockerfile similarity index 100% rename from infrastructure/src/main/resources/nginx/Dockerfile rename to sandbox/services/nginx/Dockerfile diff --git a/infrastructure/src/main/resources/nginx/default.conf b/sandbox/services/nginx/default.conf similarity index 100% rename from infrastructure/src/main/resources/nginx/default.conf rename to sandbox/services/nginx/default.conf diff --git a/infrastructure/src/main/resources/nginx/nginx.conf.old b/sandbox/services/nginx/nginx.conf.old similarity index 100% rename from infrastructure/src/main/resources/nginx/nginx.conf.old rename to sandbox/services/nginx/nginx.conf.old diff --git a/infrastructure/src/main/resources/nginx/opentelemetry_module.conf b/sandbox/services/nginx/opentelemetry_module.conf similarity index 100% rename from infrastructure/src/main/resources/nginx/opentelemetry_module.conf rename to sandbox/services/nginx/opentelemetry_module.conf diff --git a/infrastructure/src/main/resources/nginx/proxy.conf.old b/sandbox/services/nginx/proxy.conf.old similarity index 100% rename from infrastructure/src/main/resources/nginx/proxy.conf.old rename to sandbox/services/nginx/proxy.conf.old