Skip to content

Commit

Permalink
Merge pull request #67 from ministryofjustice/NIT-1120-alfresco-recov…
Browse files Browse the repository at this point in the history
…er-deleted-documents

Nit 1120 alfresco recover deleted documents
  • Loading branch information
pbasumatary authored Jun 12, 2024
2 parents 4572091 + c0bd561 commit e766dd6
Showing 1 changed file with 31 additions and 51 deletions.
82 changes: 31 additions & 51 deletions .github/workflows/restore-docs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ permissions:
jobs:
restore-docs-worker:
runs-on: ubuntu-latest

environment:
name: ${{ github.event.inputs.which_env }}
steps:
- name: Checkout current repo
uses: actions/checkout@v3
Expand All @@ -40,30 +41,47 @@ jobs:
KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}

- name: Restore from Glacier by executing in the service pod
env:
JOB_TIER: ${{ github.event.inputs.job_tier }}
S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }}
run: |
#!/bin/bash
set -xe
set -e
local SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}")
local SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}")
local S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d)
SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}")
SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}")
S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d)
# Exec into the service pod and execute the script
kubectl exec $SERVICE_POD_NAME -- /bin/sh -c '
kubectl exec $SERVICE_POD_NAME -- env S3_BUCKET_NAME=${S3_BUCKET_NAME} S3_OBJECT_KEY=${S3_OBJECT_KEY} JOB_TIER=${JOB_TIER} /bin/sh -c '
# check if object is present or not
object_versions=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}")
if [[ -z "$object_versions" ]]; then
echo "Object not found in the bucket."
exit 1
fi
# Delete the delete marker versions
local version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "Versions[?IsLatest==\`true\`].[VersionId]" --output text | jq -r ".[0]")
version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "DeleteMarkers[0].[VersionId]" --output text)
if [[ "$version_id" = "None" ]]; then
echo "There is no Delete Marker present for the object, so no version id found, exiting normally."
exit 0
fi
aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --version-id "$version_id"
echo "Deleted marker version: $version_id"
# Restore from Glacier
aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"'$JOB_TIER'\"}}"
aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"$JOB_TIER\"}}"
# Wait for restoration to complete
local wait_interval=20
local restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore')
if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then
wait_interval=30
result=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY")
# Check if the Restore field contains the substring "ongoing-request=\"false\"", that means restore complete.
restore_status=$(echo "$result" | grep -q 'ongoing-request=\\"false\\"' && echo true || echo false)
if ! $restore_status; then
#restore in progress
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!"
sleep "$wait_interval"
Expand All @@ -73,42 +91,4 @@ jobs:
aws s3 cp "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" --storage-class STANDARD
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete."
' - "$JOB_TIER" "${{ github.event.inputs.s3_object_key }}"
env:
JOB_TIER: ${{ github.event.inputs.job_tier }}
S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }}

# restore-docs-worker:
# name: Restore Docs from Glacier
# runs-on: ubuntu-22.04
# environment:
# name: ${{ github.event.inputs.which_env }}
# steps:
# - name: Check out code
# uses: actions/checkout@v4.1.1

# - name: Configure kubectl
# run: |
# echo "${{ secrets.KUBE_CERT }}" > ca.crt
# kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER}
# kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }}
# kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE}
# kubectl config use-context ${KUBE_CLUSTER}
# env:
# KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }}
# KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}

# - name: Create ConfigMap using the restore-docs-worker.sh script
# run: |
# kubectl create configmap restore-docs-worker-cm --from-file=scripts/restore-docs-worker.sh

# - name: Start Restore Docs Job
# run: |
# kubectl apply -f jobs/restore-docs-worker.yaml
# kubectl wait --timeout 10m --for=condition=complete job/restore-docs-worker

# - name: Delete Restore Docs Job
# run: kubectl delete job restore-docs-worker

# - name: Delete configmap
# run: kubectl delete cm restore-docs-worker-cm
'

0 comments on commit e766dd6

Please sign in to comment.