aboutsummaryrefslogtreecommitdiff
path: root/main.go
diff options
context:
space:
mode:
authorJulien Dessaux2020-02-22 11:57:50 +0100
committerJulien Dessaux2020-02-22 11:57:50 +0100
commitbcfaffac240d74cd79bec3c2a9d3c144d215b495 (patch)
treeedd2c5f1e011afee759970323042fcf35bf68962 /main.go
parentImproved tests for job package (diff)
downloadbareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.tar.gz
bareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.tar.bz2
bareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.zip
Added tests to the state package, and reworked the code around that
Diffstat (limited to 'main.go')
-rw-r--r--main.go39
1 files changed, 35 insertions, 4 deletions
diff --git a/main.go b/main.go
index 3e206a0..bc5945c 100644
--- a/main.go
+++ b/main.go
@@ -6,6 +6,7 @@ import (
"bareos-zabbix-check/spool"
"bareos-zabbix-check/state"
"fmt"
+ "log"
"os"
"time"
)
@@ -14,22 +15,52 @@ func main() {
var (
config config.Config
spool spool.Spool
- state state.State
errorString string
missingString string
)
config.Init()
- err := state.Parse(&config)
+ // Open the state file
+ stateFile, err := os.Open(config.StateFile())
if err != nil {
- fmt.Print(err)
+ fmt.Printf("INFO Couldn't open state file : %s", err)
os.Exit(0)
}
+ defer stateFile.Close()
+ // parse the state file
+ header, err := state.ParseHeader(stateFile)
+ if err != nil {
+ fmt.Printf("INFO Could not parse state file header : %s", err)
+ os.Exit(0)
+ }
+ if config.Verbose() {
+ log.Printf("Parsed header: %+s\n", header)
+ }
+ // seek to the job entries in the state file
+ offset, err := stateFile.Seek(int64(header.LastJobsAddr), 0)
+ if err != nil {
+ fmt.Printf("INFO Couldn't seek to jobs position in state file : %s", err)
+ }
+ if uint64(offset) != header.LastJobsAddr {
+ fmt.Print("INFO Truncated state file")
+ }
+ // Then parse the jobs in the state file
+ jobs, err := state.ParseJobs(stateFile)
+ if err != nil {
+ fmt.Printf("INFO Could not parse jobs in state file : %s", err)
+ }
+ if config.Verbose() {
+ log.Printf("%d jobs found in state file\n", len(jobs))
+ for i := 0; i < len(jobs); i++ {
+ log.Print(jobs[i])
+ }
+ }
+
// We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
// in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
// avoid false positives during backup bootstrap
err = spool.Load(&config)
- jobs := job.KeepOldestOnly(append(state.Jobs(), spool.Jobs()...))
+ jobs = job.KeepOldestOnly(append(jobs, spool.Jobs()...))
spool.SetJobs(job.KeepSuccessOnly(jobs))
// we write this new spool