aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulien Dessaux2020-01-08 10:51:59 +0100
committerJulien Dessaux2020-01-08 10:51:59 +0100
commit18e91937cd94ea199acff95c2e25015482d82005 (patch)
treee09378ea4ee0969d638554bf1f9c58f65b3468ff
downloadbareos-zabbix-check-18e91937cd94ea199acff95c2e25015482d82005.tar.gz
bareos-zabbix-check-18e91937cd94ea199acff95c2e25015482d82005.tar.bz2
bareos-zabbix-check-18e91937cd94ea199acff95c2e25015482d82005.zip
Initial import
Diffstat (limited to '')
-rw-r--r--go.mod3
-rw-r--r--main.go152
-rw-r--r--spool.go72
-rw-r--r--state.go188
4 files changed, 415 insertions, 0 deletions
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..505d991
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module bareos-zabbix-check
+
+go 1.13
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..10a0d63
--- /dev/null
+++ b/main.go
@@ -0,0 +1,152 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ bareosWorkDir = "/var/lib/bareos"
+ bareosStateFile = "bareos-fd.9102.state"
+ baculaWorkDir = "/var/lib/bacula"
+ baculaStateFile = "bacula-fd.9102.state"
+ spoolFile = "bareos-zabbix-check.spool"
+)
+
+// We declare globally the variables that will hold the command line arguments
+var (
+ verbose bool
+ quiet bool
+ stateFile string
+ workDir string
+)
+
+func main() {
+ var (
+ info os.FileInfo
+ err error
+ successfulJobs jobs
+ errorJobs jobs
+ spoolJobs jobs
+ jobName string
+ ts uint64
+ now uint64
+ errorString string
+ missingString string
+ )
+
+ // command line arguments parsing
+ flag.BoolVar(&verbose, "v", false, "Activates verbose debugging output, defaults to false.")
+ flag.BoolVar(&quiet, "q", false, "Suppress all output, suitable to force a silent update of the spool file.")
+ flag.StringVar(&stateFile, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
+ flag.StringVar(&workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
+ flag.Parse()
+
+ // Determine the work directory to use.
+ if workDir != "" {
+ info, err = os.Stat(workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ fmt.Printf("INFO Invalid work directory %s : it does not exist or is not a directory.\n", workDir)
+ os.Exit(0)
+ }
+ } else {
+ workDir = "/var/lib/bareos"
+ info, err = os.Stat(workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ workDir = "/var/lib/bacula"
+ info, err := os.Stat(workDir)
+ if os.IsNotExist(err) || !info.IsDir() {
+ fmt.Println("INFO Could not find a suitable work directory. Is bareos or bacula installed?")
+ os.Exit(0)
+ }
+ }
+ }
+ workDir = path.Clean(workDir)
+ if verbose {
+ log.Println("Setting work directory to ", workDir)
+ }
+
+ // Finds the state file to parse
+ if stateFile != "" {
+ stateFile = path.Join(workDir, stateFile)
+ info, err = os.Stat(stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ fmt.Printf("INFO The state file %s does not exist.\n", stateFile)
+ os.Exit(0)
+ }
+ } else {
+ stateFile = path.Join(workDir, bareosStateFile)
+ info, err = os.Stat(stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ stateFile = path.Join(workDir, baculaStateFile)
+ info, err = os.Stat(stateFile)
+ if os.IsNotExist(err) || info.IsDir() {
+ fmt.Println("INFO Could not find a suitable state file. Has a job ever run?")
+ os.Exit(0)
+ }
+ }
+ }
+ if verbose {
+ log.Println("Using state file ", stateFile)
+ }
+
+ successfulJobs, errorJobs, err = parseStateFile()
+ if err != nil {
+ fmt.Print(err)
+ os.Exit(0)
+ }
+ // We will check for errors in loading the spool file only if necessary. If all jobs ran successfully without errors in the state file and we manage to write
+ // a new spool file without errors, then we will ignore any error here to avoid false positives during backup bootstrap
+ spoolJobs, err = loadSpool()
+
+ // if we have jobs in the spool we merge this list with successfull jobs from the state file
+ if err == nil {
+ for jobName, ts = range spoolJobs {
+ var (
+ current uint64
+ ok bool
+ )
+ current, ok = successfulJobs[jobName]
+ if !ok || current < ts {
+ successfulJobs[jobName] = ts
+ }
+ }
+ }
+ // we write this new spool
+ if err2 := saveSpool(successfulJobs); err2 != nil {
+ fmt.Printf("AVERAGE: Couldn't save spool : %s\n", err2)
+ os.Exit(0)
+ }
+
+ // We build the error string listing the jobs in error
+ for jobName, ts = range errorJobs {
+ if errorString == "" {
+ errorString = fmt.Sprintf("errors: %s", jobName)
+ } else {
+ errorString = fmt.Sprintf("%s, %s", errorString, jobName)
+ }
+ }
+ now = uint64(time.Now().Unix())
+ // Next we check if all jobs ran recently and build the missing string
+ for jobName, ts = range successfulJobs {
+ if ts < now-24*3600 {
+ if missingString == "" {
+ missingString = fmt.Sprintf("missing: %s", jobName)
+ } else {
+ missingString = fmt.Sprintf("%s, %s", missingString, jobName)
+ }
+ }
+ }
+ if errorString != "" || missingString != "" {
+ fmt.Printf("AVERAGE: %s %s", errorString, missingString)
+ if err != nil {
+ fmt.Printf(" additionnal errors: %s", err)
+ }
+ } else {
+ fmt.Printf("OK")
+ }
+}
diff --git a/spool.go b/spool.go
new file mode 100644
index 0000000..e3d00d9
--- /dev/null
+++ b/spool.go
@@ -0,0 +1,72 @@
+package main
+
+import (
+ "encoding/csv"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "strconv"
+)
+
+// jobs is a map that matches a job name string to its last successfull run timestamp
+type jobs map[string]uint64
+
+func loadSpool() (entries jobs, err error) {
+ var (
+ file *os.File
+ lines [][]string
+ )
+ // We read the spool
+ file, err = os.Open(path.Join(workDir, spoolFile))
+ if err != nil {
+ return nil, fmt.Errorf("INFO Couldn't open spool file: %s", err)
+ }
+ defer file.Close()
+ lines, err = csv.NewReader(file).ReadAll()
+ if err != nil {
+ return nil, fmt.Errorf("INFO Corrupted spool file : %s", err)
+ }
+ if verbose {
+ log.Printf("Spool file content : %v\n", lines)
+ }
+
+ entries = make(map[string]uint64)
+ for _, line := range lines {
+ var (
+ i int
+ )
+ i, err = strconv.Atoi(line[1])
+ if err != nil {
+ return nil, fmt.Errorf("INFO Corrupted spool file : couldn't parse timestamp entry")
+ }
+ entries[line[0]] = uint64(i)
+ }
+ return
+}
+
+func saveSpool(entries jobs) (err error) {
+ var (
+ file *os.File
+ lines [][]string
+ jobName string
+ ts uint64
+ i int
+ )
+ file, err = os.Create(path.Join(workDir, spoolFile))
+ if err != nil {
+ return fmt.Errorf("INFO Couldn't open spool file for writing : %s", err)
+ }
+ defer file.Close()
+
+ lines = make([][]string, len(entries))
+ i = 0
+ for jobName, ts = range entries {
+ lines[i] = make([]string, 2)
+ lines[i][0] = jobName
+ lines[i][1] = fmt.Sprintf("%d", ts)
+ i++
+ }
+ err = csv.NewWriter(file).WriteAll(lines)
+ return
+}
diff --git a/state.go b/state.go
new file mode 100644
index 0000000..2cfb5df
--- /dev/null
+++ b/state.go
@@ -0,0 +1,188 @@
+package main
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "os"
+ "regexp"
+)
+
+// stateFileHeader : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
+type stateFileHeader struct {
+ ID [14]byte
+ _ int16
+ Version int32
+ _ int32
+ LastJobsAddr uint64
+ EndOfRecentJobResultsList uint64
+ Reserved [19]uint64
+}
+
+func (sfh stateFileHeader) String() string {
+ return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
+}
+
+// jobEntry : A structure to hold a job result from the state file
+type jobEntry struct {
+ Pad [16]byte
+ Errors int32
+ JobType int32
+ JobStatus int32
+ JobLevel int32
+ JobID uint32
+ VolSessionID uint32
+ VolSessionTime uint32
+ JobFiles uint32
+ JobBytes uint64
+ StartTime uint64
+ EndTime uint64
+ Job [maxNameLength]byte
+}
+
+func (je jobEntry) String() string {
+ var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
+ var jobNameLen int
+ if len(matches) >= 4 {
+ jobNameLen = matches[3]
+ }
+ return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %d, EndTime: %d, Job: %s",
+ je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, je.StartTime, je.EndTime, je.Job[:jobNameLen])
+}
+
+const (
+ // maxNameLength : the maximum length of a string, hard coded in bareos
+ maxNameLength = 128
+ // stateFileHeaderLength : the length of the state file header struct
+ stateFileHeaderLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
+ // jobResultLength : the length of the job result struct
+ jobResultLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
+)
+
+var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
+
+// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
+func readNextBytes(file *os.File, number int) (int, []byte) {
+ var bytes = make([]byte, number)
+
+ var n, err = file.Read(bytes)
+ if err != nil {
+ fmt.Printf("INFO Corrupted state file : file.Read failed in %s : %s\n", stateFile, err)
+ }
+
+ return n, bytes
+}
+
+func parseStateFile() (successfulJobs jobs, errorJobs jobs, err error) {
+ var (
+ n int
+ stateFileHandle *os.File
+ data []byte
+ buffer *bytes.Buffer
+ numberOfJobs uint32
+ matches []int
+ )
+ // Open the state file
+ stateFileHandle, err = os.Open(stateFile)
+ if err != nil {
+ return nil, nil, fmt.Errorf("INFO Couldn't open state file : %s", err)
+ }
+ defer stateFileHandle.Close()
+
+ // Parsing the state file header
+ var header stateFileHeader
+ n, data = readNextBytes(stateFileHandle, stateFileHeaderLength)
+ if n != stateFileHeaderLength {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid header length in %s", stateFile)
+ }
+ buffer = bytes.NewBuffer(data)
+ err = binary.Read(buffer, binary.LittleEndian, &header)
+ if err != nil {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", stateFile, err)
+ }
+ if verbose {
+ log.Printf("Parsed header: %+s\n", header)
+ }
+ if id := string(header.ID[:len(header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", stateFile)
+ }
+ if header.Version != 4 {
+ return nil, nil, fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", header.Version)
+ }
+ if header.LastJobsAddr == 0 {
+ return nil, nil, fmt.Errorf("INFO No jobs exist in the state file")
+ }
+
+ // We seek to the jobs position in the state file
+ stateFileHandle.Seek(int64(header.LastJobsAddr), 0)
+
+ // We read how many jobs there are in the state file
+ n, data = readNextBytes(stateFileHandle, 4)
+ if n != 4 {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", stateFile)
+ }
+ buffer = bytes.NewBuffer(data)
+ err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
+ if err != nil {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", stateFile, err)
+ }
+ if verbose {
+ log.Printf("%d jobs found in state file\n", numberOfJobs)
+ }
+
+ // We parse the job entries
+ successfulJobs = make(map[string]uint64)
+ errorJobs = make(map[string]uint64)
+ for ; numberOfJobs > 0; numberOfJobs-- {
+ var (
+ jobResult jobEntry
+ jobName string
+ )
+ n, data = readNextBytes(stateFileHandle, jobResultLength)
+ if n != jobResultLength {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", stateFile)
+ }
+ buffer = bytes.NewBuffer(data)
+ err = binary.Read(buffer, binary.LittleEndian, &jobResult)
+ if err != nil {
+ return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", stateFile, err)
+ }
+ matches = jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
+ if len(matches) >= 4 {
+ jobName = string(jobResult.Job[:matches[3]])
+ } else {
+ return nil, nil, fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
+ }
+ if verbose {
+ log.Printf("Parsed job entry: %s\n", jobResult)
+ }
+ // If the job is of type backup (B == ascii 66)
+ if jobResult.JobType == 66 {
+ var (
+ successExists bool
+ errorExists bool
+ currentSuccess uint64
+ currentError uint64
+ )
+ currentSuccess, successExists = successfulJobs[jobName]
+ currentError, errorExists = errorJobs[jobName]
+ // If the job is of status success (T == ascii 84)
+ if jobResult.JobStatus == 84 {
+ // if there is an older entry in errorJobs we delete it
+ if errorExists && jobResult.StartTime > currentError {
+ delete(errorJobs, jobName)
+ }
+ // if there are no entries more recent in successfulJobs we add this one
+ if !successExists || successExists && jobResult.StartTime > currentSuccess {
+ successfulJobs[jobName] = jobResult.StartTime
+ }
+ } else {
+ if !errorExists || jobResult.StartTime > currentError {
+ errorJobs[jobName] = jobResult.StartTime
+ }
+ }
+ }
+ }
+ return
+}