Skip to content

Commit

Permalink
update 06/28/24 12:42:47
Browse files Browse the repository at this point in the history
  • Loading branch information
Qovery committed Jun 28, 2024
1 parent c608c1e commit 0ec0018
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 4 deletions.
19 changes: 17 additions & 2 deletions charts/qovery/values-aws.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,13 @@ ingress-nginx:
default-ssl-certificate: "qovery/letsencrypt-acme-qovery-cert"
updateStrategy:
rollingUpdate:
# set the minimum acceptable number of unavailable pods during a rolling update
maxUnavailable: 1
# AWS LB is slow to catchup change in the topology, so we go 1 by 1 to not have any downtime
maxSurge: 1
maxUnavailable: 0
# AWS LB is slow to catchup change in the topology, so we go slowly to let AWS catchup change before moving to the next instance
# LB healthcheck is 6, and need 2 rounds to consider the instance as (un)healthy. Double the time to be safe
readinessProbe:
initialDelaySeconds: 30
# enable autoscaling if you want to scale the number of replicas based on CPU usage
autoscaling:
enabled: false # set-by-customer
Expand All @@ -144,6 +149,16 @@ ingress-nginx:
externalTrafficPolicy: "Local"
sessionAffinity: ""
healthCheckNodePort: 0
# force a connection for 30 seconds before shutting down, to avoid exiting too early
# and let time to AWS LB to catchup change in the topology
# When /wait-shutdown is called, the LB healthcheck /healthz endpoint return an error, but nginx keep processing request
lifecycle:
preStop:
exec:
command:
- sh
- -c
- (sleep 30 | nc localhost 80)& sleep 1 ; /wait-shutdown
topologySpreadConstraints:
- labelSelector:
matchLabels:
Expand Down
19 changes: 17 additions & 2 deletions charts/qovery/values-demo-aws.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,13 @@ ingress-nginx:
default-ssl-certificate: "qovery/letsencrypt-acme-qovery-cert"
updateStrategy:
rollingUpdate:
# set the minimum acceptable number of unavailable pods during a rolling update
maxUnavailable: 1
# AWS LB is slow to catchup change in the topology, so we go 1 by 1 to not have any downtime
maxSurge: 1
maxUnavailable: 0
# AWS LB is slow to catchup change in the topology, so we go slowly to let AWS catchup change before moving to the next instance
# LB healthcheck is 6, and need 2 rounds to consider the instance as (un)healthy. Double the time to be safe
readinessProbe:
initialDelaySeconds: 30
# enable autoscaling if you want to scale the number of replicas based on CPU usage
autoscaling:
enabled: false # set-by-customer
Expand All @@ -144,6 +149,16 @@ ingress-nginx:
externalTrafficPolicy: "Local"
sessionAffinity: ""
healthCheckNodePort: 0
# force a connection for 30 seconds before shutting down, to avoid exiting too early
# and let time to AWS LB to catchup change in the topology
# When /wait-shutdown is called, the LB healthcheck /healthz endpoint return an error, but nginx keep processing request
lifecycle:
preStop:
exec:
command:
- sh
- -c
- (sleep 30 | nc localhost 80)& sleep 1 ; /wait-shutdown
topologySpreadConstraints:
- labelSelector:
matchLabels:
Expand Down

0 comments on commit 0ec0018

Please sign in to comment.