aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulien Dessaux2020-01-30 17:44:42 +0100
committerJulien Dessaux2020-01-30 17:44:42 +0100
commitbea8e5aba8fc84dcb0c980c3948ed6d78719dded (patch)
treef84f2722b98f4c2996b64ce24ac322c9709617ed
parentDocument where C structures for state file header and job entry come from (diff)
downloadbareos-zabbix-check-bea8e5aba8fc84dcb0c980c3948ed6d78719dded.tar.gz
bareos-zabbix-check-bea8e5aba8fc84dcb0c980c3948ed6d78719dded.tar.bz2
bareos-zabbix-check-bea8e5aba8fc84dcb0c980c3948ed6d78719dded.zip
Big rafactoring : code split in several modules and some other best practices
-rw-r--r--config/config.go21
-rw-r--r--config/init.go16
-rw-r--r--config/statefile.go39
-rw-r--r--config/workdir.go40
-rw-r--r--job/job.go14
-rw-r--r--job/utils.go27
-rw-r--r--main.go151
-rw-r--r--spool.go70
-rw-r--r--spool/load.go40
-rw-r--r--spool/save.go28
-rw-r--r--spool/spool.go26
-rw-r--r--state.go199
-rw-r--r--state/header.go57
-rw-r--r--state/job.go103
-rw-r--r--state/parser.go40
-rw-r--r--state/state.go21
16 files changed, 506 insertions, 386 deletions
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 0000000..262a5d2
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,21 @@
+package config
+
+// Config object
+type Config struct {
+ verbose bool
+ quiet bool
+ stateFile string
+ workDir string
+}
+
+// Verbose gets the verbose field of the configuration
+func (config *Config) Verbose() bool { return config.verbose }
+
+// Quiet gets the quiet field of the configuration
+func (config *Config) Quiet() bool { return config.quiet }
+
+// StateFile gets the stateFile field of the configuration
+func (config *Config) StateFile() string { return config.stateFile }
+
+// WorkDir gets the workDir field of the configuration
+func (config *Config) WorkDir() string { return config.workDir }
diff --git a/config/init.go b/config/init.go
new file mode 100644
index 0000000..f737fb3
--- /dev/null
+++ b/config/init.go
@@ -0,0 +1,16 @@
+package config
+
+import "flag"
+
+// Init initialises a program config from the command line flags
+func (c *Config) Init() {
+ flag.BoolVar(&c.verbose, "v", false, "Activates verbose debugging output, defaults to false.")
+ flag.BoolVar(&c.quiet, "q", false, "Suppress all output, suitable to force a silent update of the spool file.")
+ flag.StringVar(&c.stateFile, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
+ flag.StringVar(&c.workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
+
+ // command line arguments parsing
+ flag.Parse()
+ c.checkWorkDir()
+ c.checkStateFile()
+}
diff --git a/config/statefile.go b/config/statefile.go
new file mode 100644
index 0000000..8b9f0a9
--- /dev/null
+++ b/config/statefile.go
@@ -0,0 +1,39 @@
+package config
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path"
+)
+
+const (
+ bareosStateFile = "bareos-fd.9102.state"
+ baculaStateFile = "bacula-fd.9102.state"
+)
+
+func (c *Config) checkStateFile() {
+ // Finds the state file to parse
+ if c.stateFile != "" {
+ c.stateFile = path.Join(c.workDir, c.stateFile)
+ info, err := os.Stat(c.stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ fmt.Printf("INFO The state file %s does not exist.\n", c.stateFile)
+ os.Exit(0)
+ }
+ } else {
+ c.stateFile = path.Join(c.workDir, bareosStateFile)
+ info, err := os.Stat(c.stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ c.stateFile = path.Join(c.workDir, baculaStateFile)
+ info, err = os.Stat(c.stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ fmt.Println("INFO Could not find a suitable state file. Has a job ever run?")
+ os.Exit(0)
+ }
+ }
+ }
+ if c.verbose {
+ log.Println("Using state file ", c.stateFile)
+ }
+}
diff --git a/config/workdir.go b/config/workdir.go
new file mode 100644
index 0000000..283fdc5
--- /dev/null
+++ b/config/workdir.go
@@ -0,0 +1,40 @@
+package config
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path"
+)
+
+const (
+ bareosWorkDir = "/var/lib/bareos"
+ baculaWorkDir = "/var/lib/bacula"
+)
+
+// checkWorkDir checks if a work directory is valid
+func (c *Config) checkWorkDir() {
+ // Determine the work directory to use.
+ if c.workDir != "" {
+ info, err := os.Stat(c.workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ fmt.Printf("INFO Invalid work directory %s : it does not exist or is not a directory.\n", c.workDir)
+ os.Exit(0)
+ }
+ } else {
+ c.workDir = bareosWorkDir
+ info, err := os.Stat(c.workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ c.workDir = baculaWorkDir
+ info, err := os.Stat(c.workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ fmt.Println("INFO Could not find a suitable work directory. Is bareos or bacula installed?")
+ os.Exit(0)
+ }
+ }
+ }
+ c.workDir = path.Clean(c.workDir)
+ if c.verbose {
+ log.Println("Setting work directory to ", c.workDir)
+ }
+}
diff --git a/job/job.go b/job/job.go
new file mode 100644
index 0000000..e15b2eb
--- /dev/null
+++ b/job/job.go
@@ -0,0 +1,14 @@
+package job
+
+import "fmt"
+
+// Job is a bareos job
+type Job struct {
+ Name string
+ Timestamp uint64
+ Success bool
+}
+
+func (job Job) String() string {
+ return fmt.Sprintf("Name: \"%s\", Timestamp: \"%d\", Success: \"%t\"", job.Name, job.Timestamp, job.Success)
+}
diff --git a/job/utils.go b/job/utils.go
new file mode 100644
index 0000000..8bbb6eb
--- /dev/null
+++ b/job/utils.go
@@ -0,0 +1,27 @@
+package job
+
+// KeepOldestOnly filters a job list and keeps only the most recent entry for a job name
+func KeepOldestOnly(jobs []Job) []Job {
+ tmpMap := make(map[string]Job)
+ for _, elt := range jobs {
+ prev, exists := tmpMap[elt.Name]
+ if !exists || (exists && prev.Timestamp < elt.Timestamp) {
+ tmpMap[elt.Name] = elt
+ }
+ }
+ values := make([]Job, 0, len(tmpMap))
+ for _, value := range tmpMap {
+ values = append(values, value)
+ }
+ return values
+}
+
+// KeepSuccessOnly returns only the successful jobs from a job list (suiatble to write a new spool file)
+func KeepSuccessOnly(jobs []Job) (result []Job) {
+ for _, job := range jobs {
+ if job.Success {
+ result = append(result, job)
+ }
+ }
+ return
+}
diff --git a/main.go b/main.go
index 9d7aa30..3e206a0 100644
--- a/main.go
+++ b/main.go
@@ -1,146 +1,63 @@
package main
import (
- "flag"
+ "bareos-zabbix-check/config"
+ "bareos-zabbix-check/job"
+ "bareos-zabbix-check/spool"
+ "bareos-zabbix-check/state"
"fmt"
- "log"
"os"
- "path"
"time"
)
-const (
- bareosWorkDir = "/var/lib/bareos"
- bareosStateFile = "bareos-fd.9102.state"
- baculaWorkDir = "/var/lib/bacula"
- baculaStateFile = "bacula-fd.9102.state"
- spoolFile = "bareos-zabbix-check.spool"
-)
-
-// We declare globally the variables that will hold the command line arguments
-var (
- verbose bool
- quiet bool
- stateFile string
- workDir string
-)
-
func main() {
var (
- info os.FileInfo
- err error
- successfulJobs jobs
- errorJobs jobs
- spoolJobs jobs
- jobName string
- ts uint64
- now uint64
- errorString string
- missingString string
+ config config.Config
+ spool spool.Spool
+ state state.State
+ errorString string
+ missingString string
)
-
- // command line arguments parsing
- flag.BoolVar(&verbose, "v", false, "Activates verbose debugging output, defaults to false.")
- flag.BoolVar(&quiet, "q", false, "Suppress all output, suitable to force a silent update of the spool file.")
- flag.StringVar(&stateFile, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
- flag.StringVar(&workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
- flag.Parse()
-
- // Determine the work directory to use.
- if workDir != "" {
- info, err = os.Stat(workDir)
- if os.IsNotExist(err) || !info.IsDir() {
- fmt.Printf("INFO Invalid work directory %s : it does not exist or is not a directory.\n", workDir)
- os.Exit(0)
- }
- } else {
- workDir = "/var/lib/bareos"
- info, err = os.Stat(workDir)
- if os.IsNotExist(err) || !info.IsDir() {
- workDir = "/var/lib/bacula"
- info, err := os.Stat(workDir)
- if os.IsNotExist(err) || !info.IsDir() {
- fmt.Println("INFO Could not find a suitable work directory. Is bareos or bacula installed?")
- os.Exit(0)
- }
- }
- }
- workDir = path.Clean(workDir)
- if verbose {
- log.Println("Setting work directory to ", workDir)
- }
-
- // Finds the state file to parse
- if stateFile != "" {
- stateFile = path.Join(workDir, stateFile)
- info, err = os.Stat(stateFile)
- if os.IsNotExist(err) || info.IsDir() {
- fmt.Printf("INFO The state file %s does not exist.\n", stateFile)
- os.Exit(0)
- }
- } else {
- stateFile = path.Join(workDir, bareosStateFile)
- info, err = os.Stat(stateFile)
- if os.IsNotExist(err) || info.IsDir() {
- stateFile = path.Join(workDir, baculaStateFile)
- info, err = os.Stat(stateFile)
- if os.IsNotExist(err) || info.IsDir() {
- fmt.Println("INFO Could not find a suitable state file. Has a job ever run?")
- os.Exit(0)
- }
- }
- }
- if verbose {
- log.Println("Using state file ", stateFile)
- }
-
- successfulJobs, errorJobs, err = parseStateFile()
+ config.Init()
+ err := state.Parse(&config)
if err != nil {
fmt.Print(err)
os.Exit(0)
}
- // We will check for errors in loading the spool file only if necessary. If all jobs ran successfully without errors in the state file and we manage to write
- // a new spool file without errors, then we will ignore any error here to avoid false positives during backup bootstrap
- spoolJobs, err = loadSpool()
+ // We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
+ // in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
+ // avoid false positives during backup bootstrap
+ err = spool.Load(&config)
+
+ jobs := job.KeepOldestOnly(append(state.Jobs(), spool.Jobs()...))
+ spool.SetJobs(job.KeepSuccessOnly(jobs))
- // if we have jobs in the spool we merge this list with successfull jobs from the state file
- if err == nil {
- for jobName, ts = range spoolJobs {
- var (
- current uint64
- ok bool
- )
- current, ok = successfulJobs[jobName]
- if !ok || current < ts {
- successfulJobs[jobName] = ts
- }
- }
- }
// we write this new spool
- if err2 := saveSpool(successfulJobs); err2 != nil {
+ if err2 := spool.Save(); err2 != nil {
fmt.Printf("AVERAGE: Error saving the spool file : %s\n", err2)
os.Exit(0)
}
- // We build the error string listing the jobs in error
- for jobName, ts = range errorJobs {
- if errorString == "" {
- errorString = fmt.Sprintf("errors: %s", jobName)
+ now := uint64(time.Now().Unix())
+ // We build the error strings
+ for _, job := range jobs {
+ if job.Success {
+ if job.Timestamp < now-24*3600 {
+ if missingString == "" {
+ missingString = fmt.Sprintf("missing: %s", job.Name)
+ } else {
+ missingString = fmt.Sprintf("%s, %s", missingString, job.Name)
+ }
+ }
} else {
- errorString = fmt.Sprintf("%s, %s", errorString, jobName)
- }
- }
- now = uint64(time.Now().Unix())
- // Next we check if all jobs ran recently and build the missing string
- for jobName, ts = range successfulJobs {
- if ts < now-24*3600 {
- if missingString == "" {
- missingString = fmt.Sprintf("missing: %s", jobName)
+ if errorString == "" {
+ errorString = fmt.Sprintf("errors: %s", job.Name)
} else {
- missingString = fmt.Sprintf("%s, %s", missingString, jobName)
+ errorString = fmt.Sprintf("%s, %s", errorString, job.Name)
}
}
}
+ // Finally we output
if errorString != "" || missingString != "" {
fmt.Printf("AVERAGE: %s %s", errorString, missingString)
if err != nil {
diff --git a/spool.go b/spool.go
deleted file mode 100644
index 071a469..0000000
--- a/spool.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package main
-
-import (
- "encoding/csv"
- "fmt"
- "log"
- "os"
- "path"
- "strconv"
-)
-
-// jobs is a map that matches a job name string to its last successfull run timestamp
-type jobs map[string]uint64
-
-func loadSpool() (entries jobs, err error) {
- var (
- file *os.File
- lines [][]string
- )
- // We read the spool
- file, err = os.Open(path.Join(workDir, spoolFile))
- if err != nil {
- return nil, fmt.Errorf("Couldn't open spool file, starting from scratch: %s", err)
- }
- defer file.Close()
- lines, err = csv.NewReader(file).ReadAll()
- if err != nil {
- return nil, fmt.Errorf("Corrupted spool file, starting from scratch : %s", err)
- }
- if verbose {
- log.Printf("Spool file content : %v\n", lines)
- }
-
- entries = make(map[string]uint64)
- for _, line := range lines {
- var i int
- i, err = strconv.Atoi(line[1])
- if err != nil {
- return nil, fmt.Errorf("Corrupted spool file : couldn't parse timestamp entry")
- }
- entries[line[0]] = uint64(i)
- }
- return
-}
-
-func saveSpool(entries jobs) (err error) {
- var (
- file *os.File
- lines [][]string
- jobName string
- ts uint64
- i int
- )
- file, err = os.Create(path.Join(workDir, spoolFile))
- if err != nil {
- return
- }
- defer file.Close()
-
- lines = make([][]string, len(entries))
- i = 0
- for jobName, ts = range entries {
- lines[i] = make([]string, 2)
- lines[i][0] = jobName
- lines[i][1] = fmt.Sprintf("%d", ts)
- i++
- }
- err = csv.NewWriter(file).WriteAll(lines)
- return
-}
diff --git a/spool/load.go b/spool/load.go
new file mode 100644
index 0000000..282fdc6
--- /dev/null
+++ b/spool/load.go
@@ -0,0 +1,40 @@
+package spool
+
+import (
+ "bareos-zabbix-check/config"
+ "bareos-zabbix-check/job"
+ "encoding/csv"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "strconv"
+)
+
+// Load loads a spool file in path
+func (s *Spool) Load(c *config.Config) (err error) {
+ s.config = c
+ // We read the spool
+ file, err := os.Open(path.Join(c.WorkDir(), spoolFile))
+ if err != nil {
+ return fmt.Errorf("Couldn't open spool file, starting from scratch: %s", err)
+ }
+ defer file.Close()
+ lines, err := csv.NewReader(file).ReadAll()
+ if err != nil {
+ return fmt.Errorf("Corrupted spool file, starting from scratch : %s", err)
+ }
+ if c.Verbose() {
+ log.Printf("Spool file content : %v\n", lines)
+ }
+
+ for _, line := range lines {
+ var i int
+ i, err = strconv.Atoi(line[1])
+ if err != nil {
+ return fmt.Errorf("Corrupted spool file : couldn't parse timestamp entry")
+ }
+ s.jobs = append(s.jobs, job.Job{Name: line[0], Timestamp: uint64(i), Success: true})
+ }
+ return
+}
diff --git a/spool/save.go b/spool/save.go
new file mode 100644
index 0000000..b01dc7b
--- /dev/null
+++ b/spool/save.go
@@ -0,0 +1,28 @@
+package spool
+
+import (
+ "encoding/csv"
+ "fmt"
+ "os"
+ "path"
+)
+
+// Save writes a spool on the disk
+func (s *Spool) Save() (err error) {
+ file, err := os.Create(path.Join(s.config.WorkDir(), spoolFile))
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ lines := make([][]string, len(s.jobs))
+ var i int = 0
+ for _, job := range s.jobs {
+ lines[i] = make([]string, 2)
+ lines[i][0] = job.Name
+ lines[i][1] = fmt.Sprintf("%d", job.Timestamp)
+ i++
+ }
+ err = csv.NewWriter(file).WriteAll(lines)
+ return
+}
diff --git a/spool/spool.go b/spool/spool.go
new file mode 100644
index 0000000..e095979
--- /dev/null
+++ b/spool/spool.go
@@ -0,0 +1,26 @@
+package spool
+
+import (
+ "bareos-zabbix-check/config"
+ "bareos-zabbix-check/job"
+)
+
+const (
+ spoolFile = "bareos-zabbix-check.spool"
+)
+
+// Spool is an object for manipulating a bareos spool file
+type Spool struct {
+ config *config.Config
+ jobs []job.Job
+}
+
+// Jobs exports a spool to a jobs list
+func (s *Spool) Jobs() []job.Job {
+ return s.jobs
+}
+
+// SetJobs sets a jobs list
+func (s *Spool) SetJobs(jobs []job.Job) {
+ s.jobs = jobs
+}
diff --git a/state.go b/state.go
deleted file mode 100644
index 9c5ac49..0000000
--- a/state.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package main
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "log"
- "os"
- "regexp"
- "time"
-)
-
-// stateFileHeader : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
-// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
-type stateFileHeader struct {
- ID [14]byte
- _ int16
- Version int32
- _ int32
- LastJobsAddr uint64
- EndOfRecentJobResultsList uint64
- Reserved [19]uint64
-}
-
-func (sfh stateFileHeader) String() string {
- return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
-}
-
-// jobEntry : A structure to hold a job result from the state file
-// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
-type jobEntry struct {
- Pad [16]byte
- Errors int32
- JobType int32
- JobStatus int32
- JobLevel int32
- JobID uint32
- VolSessionID uint32
- VolSessionTime uint32
- JobFiles uint32
- JobBytes uint64
- StartTime uint64
- EndTime uint64
- Job [maxNameLength]byte
-}
-
-func (je jobEntry) String() string {
- var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
- var jobNameLen int
- if len(matches) >= 4 {
- jobNameLen = matches[3]
- }
- return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %s, EndTime: %s, Job: %s",
- je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
-}
-
-const (
- // maxNameLength : the maximum length of a string, hard coded in bareos
- maxNameLength = 128
- // stateFileHeaderLength : the length of the state file header struct
- stateFileHeaderLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
- // jobResultLength : the length of the job result struct
- jobResultLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
-)
-
-var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
-
-// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
-func readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
- bytes = make([]byte, number)
- n, err = file.Read(bytes)
- if err != nil {
- return 0, nil, fmt.Errorf("file.Read failed in %s : %s", stateFile, err)
- }
-
- return
-}
-
-func parseStateFile() (successfulJobs jobs, errorJobs jobs, err error) {
- var (
- n int
- stateFileHandle *os.File
- data []byte
- buffer *bytes.Buffer
- numberOfJobs uint32
- matches []int
- )
- // Open the state file
- stateFileHandle, err = os.Open(stateFile)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Couldn't open state file : %s", err)
- }
- defer stateFileHandle.Close()
-
- // Parsing the state file header
- var header stateFileHeader
- n, data, err = readNextBytes(stateFileHandle, stateFileHeaderLength)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
- }
- if n != stateFileHeaderLength {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid header length in %s", stateFile)
- }
- buffer = bytes.NewBuffer(data)
- err = binary.Read(buffer, binary.LittleEndian, &header)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", stateFile, err)
- }
- if verbose {
- log.Printf("Parsed header: %+s\n", header)
- }
- if id := string(header.ID[:len(header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", stateFile)
- }
- if header.Version != 4 {
- return nil, nil, fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", header.Version)
- }
- if header.LastJobsAddr == 0 {
- return nil, nil, fmt.Errorf("INFO No jobs exist in the state file")
- }
-
- // We seek to the jobs position in the state file
- stateFileHandle.Seek(int64(header.LastJobsAddr), 0)
-
- // We read how many jobs there are in the state file
- n, data, err = readNextBytes(stateFileHandle, 4)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
- }
- if n != 4 {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", stateFile)
- }
- buffer = bytes.NewBuffer(data)
- err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", stateFile, err)
- }
- if verbose {
- log.Printf("%d jobs found in state file\n", numberOfJobs)
- }
-
- // We parse the job entries
- successfulJobs = make(map[string]uint64)
- errorJobs = make(map[string]uint64)
- for ; numberOfJobs > 0; numberOfJobs-- {
- var (
- jobResult jobEntry
- jobName string
- )
- n, data, err = readNextBytes(stateFileHandle, jobResultLength)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
- }
- if n != jobResultLength {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", stateFile)
- }
- buffer = bytes.NewBuffer(data)
- err = binary.Read(buffer, binary.LittleEndian, &jobResult)
- if err != nil {
- return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", stateFile, err)
- }
- matches = jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
- if len(matches) >= 4 {
- jobName = string(jobResult.Job[:matches[3]])
- } else {
- return nil, nil, fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
- }
- if verbose {
- log.Printf("Parsed job entry: %s\n", jobResult)
- }
- // If the job is of type backup (B == ascii 66)
- if jobResult.JobType == 66 {
- var (
- successExists bool
- errorExists bool
- currentSuccess uint64
- currentError uint64
- )
- currentSuccess, successExists = successfulJobs[jobName]
- currentError, errorExists = errorJobs[jobName]
- // If the job is of status success (T == ascii 84)
- if jobResult.JobStatus == 84 {
- // if there is an older entry in errorJobs we delete it
- if errorExists && jobResult.StartTime > currentError {
- delete(errorJobs, jobName)
- }
- // if there are no entries more recent in successfulJobs we add this one
- if !successExists || successExists && jobResult.StartTime > currentSuccess {
- successfulJobs[jobName] = jobResult.StartTime
- }
- } else {
- if !errorExists || jobResult.StartTime > currentError {
- errorJobs[jobName] = jobResult.StartTime
- }
- }
- }
- }
- return
-}
diff --git a/state/header.go b/state/header.go
new file mode 100644
index 0000000..3e55899
--- /dev/null
+++ b/state/header.go
@@ -0,0 +1,57 @@
+package state
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "os"
+)
+
+// c.StateFile()HeaderLength : the length of the state file header struct
+const headerLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
+
+// header : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
+// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
+type header struct {
+ ID [14]byte
+ _ int16
+ Version int32
+ _ int32
+ LastJobsAddr uint64
+ EndOfRecentJobResultsList uint64
+ Reserved [19]uint64
+}
+
+func (sfh header) String() string {
+ return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
+}
+
+func (s *State) parseHeader(file *os.File) (err error) {
+ // Parsing the state file header
+ n, data, err := s.readNextBytes(file, headerLength)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : %s", err)
+ }
+ if n != headerLength {
+ return fmt.Errorf("INFO Corrupted state file : invalid header length in %s", s.config.StateFile())
+ }
+ buffer := bytes.NewBuffer(data)
+ err = binary.Read(buffer, binary.LittleEndian, &s.header)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", s.config.StateFile(), err)
+ }
+ if s.config.Verbose() {
+ log.Printf("Parsed header: %+s\n", s.header)
+ }
+ if id := string(s.header.ID[:len(s.header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
+ return fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", s.config.StateFile())
+ }
+ if s.header.Version != 4 {
+ return fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", s.header.Version)
+ }
+ if s.header.LastJobsAddr == 0 {
+ return fmt.Errorf("INFO No jobs exist in the state file")
+ }
+ return
+}
diff --git a/state/job.go b/state/job.go
new file mode 100644
index 0000000..8d5b04f
--- /dev/null
+++ b/state/job.go
@@ -0,0 +1,103 @@
+package state
+
+import (
+ "bareos-zabbix-check/job"
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "os"
+ "regexp"
+ "time"
+)
+
+// jobLength : the length of the job result struct
+const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
+
+var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
+
+// jobEntry : A structure to hold a job result from the state file
+// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
+type jobEntry struct {
+ Pad [16]byte
+ Errors int32
+ JobType int32
+ JobStatus int32
+ JobLevel int32
+ JobID uint32
+ VolSessionID uint32
+ VolSessionTime uint32
+ JobFiles uint32
+ JobBytes uint64
+ StartTime uint64
+ EndTime uint64
+ Job [maxNameLength]byte
+}
+
+func (je jobEntry) String() string {
+ var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
+ var jobNameLen int
+ if len(matches) >= 4 {
+ jobNameLen = matches[3]
+ }
+ return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %s, EndTime: %s, Job: %s",
+ je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
+}
+
+func (s *State) parseJobs(file *os.File) (err error) {
+ // We seek to the jobs position in the state file
+ file.Seek(int64(s.header.LastJobsAddr), 0)
+
+ // We read how many jobs there are in the state file
+ n, data, err := s.readNextBytes(file, 4)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : %s", err)
+ }
+ if n != 4 {
+ return fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", s.config.StateFile())
+ }
+ buffer := bytes.NewBuffer(data)
+ var numberOfJobs uint32
+ err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", s.config.StateFile(), err)
+ }
+ if s.config.Verbose() {
+ log.Printf("%d jobs found in state file\n", numberOfJobs)
+ }
+
+ // We parse the job entries
+ for ; numberOfJobs > 0; numberOfJobs-- {
+ var (
+ jobResult jobEntry
+ jobName string
+ )
+ n, data, err = s.readNextBytes(file, jobLength)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : %s", err)
+ }
+ if n != jobLength {
+ return fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", s.config.StateFile())
+ }
+ buffer = bytes.NewBuffer(data)
+ err = binary.Read(buffer, binary.LittleEndian, &jobResult)
+ if err != nil {
+ return fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", s.config.StateFile(), err)
+ }
+ matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
+ if len(matches) >= 4 {
+ jobName = string(jobResult.Job[:matches[3]])
+ } else {
+ return fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
+ }
+ if s.config.Verbose() {
+ log.Printf("Parsed job entry: %s\n", jobResult)
+ }
+ // If the job is of type backup (B == ascii 66)
+ if jobResult.JobType == 66 {
+ // If the job is of status success JobStatus is equals to 84 (T == ascii 84)
+ s.jobs = append(s.jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
+ }
+ }
+ return
+}
diff --git a/state/parser.go b/state/parser.go
new file mode 100644
index 0000000..60f5394
--- /dev/null
+++ b/state/parser.go
@@ -0,0 +1,40 @@
+package state
+
+import (
+ "bareos-zabbix-check/config"
+ "fmt"
+ "os"
+)
+
+// Parse parses a bareos state file
+func (s *State) Parse(c *config.Config) (err error) {
+ s.config = c
+ // Open the state file
+ file, err := os.Open(c.StateFile())
+ if err != nil {
+ return fmt.Errorf("INFO Couldn't open state file : %s", err)
+ }
+ defer file.Close()
+
+ err = s.parseHeader(file)
+ if err != nil {
+ return err
+ }
+ err = s.parseJobs(file)
+ if err != nil {
+ return err
+ }
+
+ return
+}
+
+// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
+func (s *State) readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
+ bytes = make([]byte, number)
+ n, err = file.Read(bytes)
+ if err != nil {
+ return 0, nil, fmt.Errorf("file.Read failed in %s : %s", s.config.StateFile(), err)
+ }
+
+ return
+}
diff --git a/state/state.go b/state/state.go
new file mode 100644
index 0000000..d28a701
--- /dev/null
+++ b/state/state.go
@@ -0,0 +1,21 @@
+package state
+
+import (
+ "bareos-zabbix-check/config"
+ "bareos-zabbix-check/job"
+)
+
+// maxNameLength : the maximum length of a string, hard coded in bareos
+const maxNameLength = 128
+
+// State is an object for manipulating a bareos state file
+type State struct {
+ config *config.Config
+ header header
+ jobs []job.Job
+}
+
+// Jobs returns the jobs from the state file
+func (s *State) Jobs() []job.Job {
+ return s.jobs
+}