-
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathevents.go
113 lines (96 loc) · 2.99 KB
/
events.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
package main
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
libpack_logger "github.com/lukaszraczylo/graphql-monitoring-proxy/logging"
)
const (
initialDelay = 60 * time.Second
cleanupInterval = 1 * time.Hour
)
var delQueries = [...]string{
"DELETE FROM hdb_catalog.event_invocation_logs WHERE created_at < NOW() - interval '%d days';",
"DELETE FROM hdb_catalog.event_log WHERE created_at < NOW() - interval '%d days';",
"DELETE FROM hdb_catalog.hdb_action_log WHERE created_at < NOW() - INTERVAL '%d days';",
"DELETE FROM hdb_catalog.hdb_cron_event_invocation_logs WHERE created_at < NOW() - INTERVAL '%d days';",
"DELETE FROM hdb_catalog.hdb_scheduled_event_invocation_logs WHERE created_at < NOW() - INTERVAL '%d days';",
}
func enableHasuraEventCleaner() {
cfgMutex.RLock()
if !cfg.HasuraEventCleaner.Enable {
cfgMutex.RUnlock()
return
}
eventMetadataDb := cfg.HasuraEventCleaner.EventMetadataDb
if eventMetadataDb == "" {
logger := cfg.Logger
cfgMutex.RUnlock()
logger.Warning(&libpack_logger.LogMessage{
Message: "Event metadata db URL not specified, event cleaner not active",
})
return
}
clearOlderThan := cfg.HasuraEventCleaner.ClearOlderThan
logger := cfg.Logger
cfgMutex.RUnlock()
logger.Info(&libpack_logger.LogMessage{
Message: "Event cleaner enabled",
Pairs: map[string]interface{}{"interval_in_days": clearOlderThan},
})
go func(dbURL string, clearOlderThan int, logger *libpack_logger.Logger) {
pool, err := pgxpool.New(context.Background(), dbURL)
if err != nil {
logger.Error(&libpack_logger.LogMessage{
Message: "Failed to create connection pool",
Pairs: map[string]interface{}{"error": err.Error()},
})
return
}
defer pool.Close()
time.Sleep(initialDelay)
logger.Info(&libpack_logger.LogMessage{
Message: "Initial cleanup of old events",
})
cleanEvents(pool, clearOlderThan, logger)
ticker := time.NewTicker(cleanupInterval)
defer ticker.Stop()
for range ticker.C {
logger.Info(&libpack_logger.LogMessage{
Message: "Cleaning up old events",
})
cleanEvents(pool, clearOlderThan, logger)
}
}(eventMetadataDb, clearOlderThan, logger)
}
func cleanEvents(pool *pgxpool.Pool, clearOlderThan int, logger *libpack_logger.Logger) {
ctx := context.Background()
var errors []error
var failedQueries []string
for _, query := range delQueries {
_, err := pool.Exec(ctx, fmt.Sprintf(query, clearOlderThan))
if err != nil {
errors = append(errors, err)
failedQueries = append(failedQueries, query)
} else {
logger.Debug(&libpack_logger.LogMessage{
Message: "Successfully executed query",
Pairs: map[string]interface{}{"query": query},
})
}
}
if len(errors) > 0 {
var errMsgs []string
for _, err := range errors {
errMsgs = append(errMsgs, err.Error())
}
logger.Error(&libpack_logger.LogMessage{
Message: "Failed to execute some queries",
Pairs: map[string]interface{}{
"failed_queries": failedQueries,
"errors": errMsgs,
},
})
}
}