Big rafactoring : code split in several modules and some other best practices
This commit is contained in:
parent
e07ce016c4
commit
bea8e5aba8
16 changed files with 506 additions and 386 deletions
21
config/config.go
Normal file
21
config/config.go
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
// Config object
|
||||||
|
type Config struct {
|
||||||
|
verbose bool
|
||||||
|
quiet bool
|
||||||
|
stateFile string
|
||||||
|
workDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verbose gets the verbose field of the configuration
|
||||||
|
func (config *Config) Verbose() bool { return config.verbose }
|
||||||
|
|
||||||
|
// Quiet gets the quiet field of the configuration
|
||||||
|
func (config *Config) Quiet() bool { return config.quiet }
|
||||||
|
|
||||||
|
// StateFile gets the stateFile field of the configuration
|
||||||
|
func (config *Config) StateFile() string { return config.stateFile }
|
||||||
|
|
||||||
|
// WorkDir gets the workDir field of the configuration
|
||||||
|
func (config *Config) WorkDir() string { return config.workDir }
|
16
config/init.go
Normal file
16
config/init.go
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import "flag"
|
||||||
|
|
||||||
|
// Init initialises a program config from the command line flags
|
||||||
|
func (c *Config) Init() {
|
||||||
|
flag.BoolVar(&c.verbose, "v", false, "Activates verbose debugging output, defaults to false.")
|
||||||
|
flag.BoolVar(&c.quiet, "q", false, "Suppress all output, suitable to force a silent update of the spool file.")
|
||||||
|
flag.StringVar(&c.stateFile, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
|
||||||
|
flag.StringVar(&c.workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
|
||||||
|
|
||||||
|
// command line arguments parsing
|
||||||
|
flag.Parse()
|
||||||
|
c.checkWorkDir()
|
||||||
|
c.checkStateFile()
|
||||||
|
}
|
39
config/statefile.go
Normal file
39
config/statefile.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bareosStateFile = "bareos-fd.9102.state"
|
||||||
|
baculaStateFile = "bacula-fd.9102.state"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Config) checkStateFile() {
|
||||||
|
// Finds the state file to parse
|
||||||
|
if c.stateFile != "" {
|
||||||
|
c.stateFile = path.Join(c.workDir, c.stateFile)
|
||||||
|
info, err := os.Stat(c.stateFile)
|
||||||
|
if os.IsNotExist(err) || info.IsDir() {
|
||||||
|
fmt.Printf("INFO The state file %s does not exist.\n", c.stateFile)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.stateFile = path.Join(c.workDir, bareosStateFile)
|
||||||
|
info, err := os.Stat(c.stateFile)
|
||||||
|
if os.IsNotExist(err) || info.IsDir() {
|
||||||
|
c.stateFile = path.Join(c.workDir, baculaStateFile)
|
||||||
|
info, err = os.Stat(c.stateFile)
|
||||||
|
if os.IsNotExist(err) || info.IsDir() {
|
||||||
|
fmt.Println("INFO Could not find a suitable state file. Has a job ever run?")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.verbose {
|
||||||
|
log.Println("Using state file ", c.stateFile)
|
||||||
|
}
|
||||||
|
}
|
40
config/workdir.go
Normal file
40
config/workdir.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bareosWorkDir = "/var/lib/bareos"
|
||||||
|
baculaWorkDir = "/var/lib/bacula"
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkWorkDir checks if a work directory is valid
|
||||||
|
func (c *Config) checkWorkDir() {
|
||||||
|
// Determine the work directory to use.
|
||||||
|
if c.workDir != "" {
|
||||||
|
info, err := os.Stat(c.workDir)
|
||||||
|
if os.IsNotExist(err) || !info.IsDir() {
|
||||||
|
fmt.Printf("INFO Invalid work directory %s : it does not exist or is not a directory.\n", c.workDir)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.workDir = bareosWorkDir
|
||||||
|
info, err := os.Stat(c.workDir)
|
||||||
|
if os.IsNotExist(err) || !info.IsDir() {
|
||||||
|
c.workDir = baculaWorkDir
|
||||||
|
info, err := os.Stat(c.workDir)
|
||||||
|
if os.IsNotExist(err) || !info.IsDir() {
|
||||||
|
fmt.Println("INFO Could not find a suitable work directory. Is bareos or bacula installed?")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.workDir = path.Clean(c.workDir)
|
||||||
|
if c.verbose {
|
||||||
|
log.Println("Setting work directory to ", c.workDir)
|
||||||
|
}
|
||||||
|
}
|
14
job/job.go
Normal file
14
job/job.go
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package job
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Job is a bareos job
|
||||||
|
type Job struct {
|
||||||
|
Name string
|
||||||
|
Timestamp uint64
|
||||||
|
Success bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (job Job) String() string {
|
||||||
|
return fmt.Sprintf("Name: \"%s\", Timestamp: \"%d\", Success: \"%t\"", job.Name, job.Timestamp, job.Success)
|
||||||
|
}
|
27
job/utils.go
Normal file
27
job/utils.go
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package job
|
||||||
|
|
||||||
|
// KeepOldestOnly filters a job list and keeps only the most recent entry for a job name
|
||||||
|
func KeepOldestOnly(jobs []Job) []Job {
|
||||||
|
tmpMap := make(map[string]Job)
|
||||||
|
for _, elt := range jobs {
|
||||||
|
prev, exists := tmpMap[elt.Name]
|
||||||
|
if !exists || (exists && prev.Timestamp < elt.Timestamp) {
|
||||||
|
tmpMap[elt.Name] = elt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values := make([]Job, 0, len(tmpMap))
|
||||||
|
for _, value := range tmpMap {
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeepSuccessOnly returns only the successful jobs from a job list (suiatble to write a new spool file)
|
||||||
|
func KeepSuccessOnly(jobs []Job) (result []Job) {
|
||||||
|
for _, job := range jobs {
|
||||||
|
if job.Success {
|
||||||
|
result = append(result, job)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
147
main.go
147
main.go
|
@ -1,146 +1,63 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"bareos-zabbix-check/config"
|
||||||
|
"bareos-zabbix-check/job"
|
||||||
|
"bareos-zabbix-check/spool"
|
||||||
|
"bareos-zabbix-check/state"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
bareosWorkDir = "/var/lib/bareos"
|
|
||||||
bareosStateFile = "bareos-fd.9102.state"
|
|
||||||
baculaWorkDir = "/var/lib/bacula"
|
|
||||||
baculaStateFile = "bacula-fd.9102.state"
|
|
||||||
spoolFile = "bareos-zabbix-check.spool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// We declare globally the variables that will hold the command line arguments
|
|
||||||
var (
|
|
||||||
verbose bool
|
|
||||||
quiet bool
|
|
||||||
stateFile string
|
|
||||||
workDir string
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
info os.FileInfo
|
config config.Config
|
||||||
err error
|
spool spool.Spool
|
||||||
successfulJobs jobs
|
state state.State
|
||||||
errorJobs jobs
|
|
||||||
spoolJobs jobs
|
|
||||||
jobName string
|
|
||||||
ts uint64
|
|
||||||
now uint64
|
|
||||||
errorString string
|
errorString string
|
||||||
missingString string
|
missingString string
|
||||||
)
|
)
|
||||||
|
config.Init()
|
||||||
// command line arguments parsing
|
err := state.Parse(&config)
|
||||||
flag.BoolVar(&verbose, "v", false, "Activates verbose debugging output, defaults to false.")
|
|
||||||
flag.BoolVar(&quiet, "q", false, "Suppress all output, suitable to force a silent update of the spool file.")
|
|
||||||
flag.StringVar(&stateFile, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
|
|
||||||
flag.StringVar(&workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
// Determine the work directory to use.
|
|
||||||
if workDir != "" {
|
|
||||||
info, err = os.Stat(workDir)
|
|
||||||
if os.IsNotExist(err) || !info.IsDir() {
|
|
||||||
fmt.Printf("INFO Invalid work directory %s : it does not exist or is not a directory.\n", workDir)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
workDir = "/var/lib/bareos"
|
|
||||||
info, err = os.Stat(workDir)
|
|
||||||
if os.IsNotExist(err) || !info.IsDir() {
|
|
||||||
workDir = "/var/lib/bacula"
|
|
||||||
info, err := os.Stat(workDir)
|
|
||||||
if os.IsNotExist(err) || !info.IsDir() {
|
|
||||||
fmt.Println("INFO Could not find a suitable work directory. Is bareos or bacula installed?")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
workDir = path.Clean(workDir)
|
|
||||||
if verbose {
|
|
||||||
log.Println("Setting work directory to ", workDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finds the state file to parse
|
|
||||||
if stateFile != "" {
|
|
||||||
stateFile = path.Join(workDir, stateFile)
|
|
||||||
info, err = os.Stat(stateFile)
|
|
||||||
if os.IsNotExist(err) || info.IsDir() {
|
|
||||||
fmt.Printf("INFO The state file %s does not exist.\n", stateFile)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stateFile = path.Join(workDir, bareosStateFile)
|
|
||||||
info, err = os.Stat(stateFile)
|
|
||||||
if os.IsNotExist(err) || info.IsDir() {
|
|
||||||
stateFile = path.Join(workDir, baculaStateFile)
|
|
||||||
info, err = os.Stat(stateFile)
|
|
||||||
if os.IsNotExist(err) || info.IsDir() {
|
|
||||||
fmt.Println("INFO Could not find a suitable state file. Has a job ever run?")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
log.Println("Using state file ", stateFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
successfulJobs, errorJobs, err = parseStateFile()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Print(err)
|
fmt.Print(err)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
// We will check for errors in loading the spool file only if necessary. If all jobs ran successfully without errors in the state file and we manage to write
|
// We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
|
||||||
// a new spool file without errors, then we will ignore any error here to avoid false positives during backup bootstrap
|
// in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
|
||||||
spoolJobs, err = loadSpool()
|
// avoid false positives during backup bootstrap
|
||||||
|
err = spool.Load(&config)
|
||||||
|
|
||||||
|
jobs := job.KeepOldestOnly(append(state.Jobs(), spool.Jobs()...))
|
||||||
|
spool.SetJobs(job.KeepSuccessOnly(jobs))
|
||||||
|
|
||||||
// if we have jobs in the spool we merge this list with successfull jobs from the state file
|
|
||||||
if err == nil {
|
|
||||||
for jobName, ts = range spoolJobs {
|
|
||||||
var (
|
|
||||||
current uint64
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
current, ok = successfulJobs[jobName]
|
|
||||||
if !ok || current < ts {
|
|
||||||
successfulJobs[jobName] = ts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// we write this new spool
|
// we write this new spool
|
||||||
if err2 := saveSpool(successfulJobs); err2 != nil {
|
if err2 := spool.Save(); err2 != nil {
|
||||||
fmt.Printf("AVERAGE: Error saving the spool file : %s\n", err2)
|
fmt.Printf("AVERAGE: Error saving the spool file : %s\n", err2)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We build the error string listing the jobs in error
|
now := uint64(time.Now().Unix())
|
||||||
for jobName, ts = range errorJobs {
|
// We build the error strings
|
||||||
if errorString == "" {
|
for _, job := range jobs {
|
||||||
errorString = fmt.Sprintf("errors: %s", jobName)
|
if job.Success {
|
||||||
} else {
|
if job.Timestamp < now-24*3600 {
|
||||||
errorString = fmt.Sprintf("%s, %s", errorString, jobName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
now = uint64(time.Now().Unix())
|
|
||||||
// Next we check if all jobs ran recently and build the missing string
|
|
||||||
for jobName, ts = range successfulJobs {
|
|
||||||
if ts < now-24*3600 {
|
|
||||||
if missingString == "" {
|
if missingString == "" {
|
||||||
missingString = fmt.Sprintf("missing: %s", jobName)
|
missingString = fmt.Sprintf("missing: %s", job.Name)
|
||||||
} else {
|
} else {
|
||||||
missingString = fmt.Sprintf("%s, %s", missingString, jobName)
|
missingString = fmt.Sprintf("%s, %s", missingString, job.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if errorString == "" {
|
||||||
|
errorString = fmt.Sprintf("errors: %s", job.Name)
|
||||||
|
} else {
|
||||||
|
errorString = fmt.Sprintf("%s, %s", errorString, job.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Finally we output
|
||||||
if errorString != "" || missingString != "" {
|
if errorString != "" || missingString != "" {
|
||||||
fmt.Printf("AVERAGE: %s %s", errorString, missingString)
|
fmt.Printf("AVERAGE: %s %s", errorString, missingString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
70
spool.go
70
spool.go
|
@ -1,70 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// jobs is a map that matches a job name string to its last successfull run timestamp
|
|
||||||
type jobs map[string]uint64
|
|
||||||
|
|
||||||
func loadSpool() (entries jobs, err error) {
|
|
||||||
var (
|
|
||||||
file *os.File
|
|
||||||
lines [][]string
|
|
||||||
)
|
|
||||||
// We read the spool
|
|
||||||
file, err = os.Open(path.Join(workDir, spoolFile))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Couldn't open spool file, starting from scratch: %s", err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
lines, err = csv.NewReader(file).ReadAll()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Corrupted spool file, starting from scratch : %s", err)
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
log.Printf("Spool file content : %v\n", lines)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = make(map[string]uint64)
|
|
||||||
for _, line := range lines {
|
|
||||||
var i int
|
|
||||||
i, err = strconv.Atoi(line[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Corrupted spool file : couldn't parse timestamp entry")
|
|
||||||
}
|
|
||||||
entries[line[0]] = uint64(i)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func saveSpool(entries jobs) (err error) {
|
|
||||||
var (
|
|
||||||
file *os.File
|
|
||||||
lines [][]string
|
|
||||||
jobName string
|
|
||||||
ts uint64
|
|
||||||
i int
|
|
||||||
)
|
|
||||||
file, err = os.Create(path.Join(workDir, spoolFile))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
lines = make([][]string, len(entries))
|
|
||||||
i = 0
|
|
||||||
for jobName, ts = range entries {
|
|
||||||
lines[i] = make([]string, 2)
|
|
||||||
lines[i][0] = jobName
|
|
||||||
lines[i][1] = fmt.Sprintf("%d", ts)
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
err = csv.NewWriter(file).WriteAll(lines)
|
|
||||||
return
|
|
||||||
}
|
|
40
spool/load.go
Normal file
40
spool/load.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package spool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bareos-zabbix-check/config"
|
||||||
|
"bareos-zabbix-check/job"
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Load loads a spool file in path
|
||||||
|
func (s *Spool) Load(c *config.Config) (err error) {
|
||||||
|
s.config = c
|
||||||
|
// We read the spool
|
||||||
|
file, err := os.Open(path.Join(c.WorkDir(), spoolFile))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Couldn't open spool file, starting from scratch: %s", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
lines, err := csv.NewReader(file).ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Corrupted spool file, starting from scratch : %s", err)
|
||||||
|
}
|
||||||
|
if c.Verbose() {
|
||||||
|
log.Printf("Spool file content : %v\n", lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
var i int
|
||||||
|
i, err = strconv.Atoi(line[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Corrupted spool file : couldn't parse timestamp entry")
|
||||||
|
}
|
||||||
|
s.jobs = append(s.jobs, job.Job{Name: line[0], Timestamp: uint64(i), Success: true})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
28
spool/save.go
Normal file
28
spool/save.go
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package spool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Save writes a spool on the disk
|
||||||
|
func (s *Spool) Save() (err error) {
|
||||||
|
file, err := os.Create(path.Join(s.config.WorkDir(), spoolFile))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
lines := make([][]string, len(s.jobs))
|
||||||
|
var i int = 0
|
||||||
|
for _, job := range s.jobs {
|
||||||
|
lines[i] = make([]string, 2)
|
||||||
|
lines[i][0] = job.Name
|
||||||
|
lines[i][1] = fmt.Sprintf("%d", job.Timestamp)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
err = csv.NewWriter(file).WriteAll(lines)
|
||||||
|
return
|
||||||
|
}
|
26
spool/spool.go
Normal file
26
spool/spool.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package spool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bareos-zabbix-check/config"
|
||||||
|
"bareos-zabbix-check/job"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
spoolFile = "bareos-zabbix-check.spool"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spool is an object for manipulating a bareos spool file
|
||||||
|
type Spool struct {
|
||||||
|
config *config.Config
|
||||||
|
jobs []job.Job
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jobs exports a spool to a jobs list
|
||||||
|
func (s *Spool) Jobs() []job.Job {
|
||||||
|
return s.jobs
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetJobs sets a jobs list
|
||||||
|
func (s *Spool) SetJobs(jobs []job.Job) {
|
||||||
|
s.jobs = jobs
|
||||||
|
}
|
199
state.go
199
state.go
|
@ -1,199 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// stateFileHeader : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
|
|
||||||
// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
|
|
||||||
type stateFileHeader struct {
|
|
||||||
ID [14]byte
|
|
||||||
_ int16
|
|
||||||
Version int32
|
|
||||||
_ int32
|
|
||||||
LastJobsAddr uint64
|
|
||||||
EndOfRecentJobResultsList uint64
|
|
||||||
Reserved [19]uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sfh stateFileHeader) String() string {
|
|
||||||
return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
|
|
||||||
}
|
|
||||||
|
|
||||||
// jobEntry : A structure to hold a job result from the state file
|
|
||||||
// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
|
|
||||||
type jobEntry struct {
|
|
||||||
Pad [16]byte
|
|
||||||
Errors int32
|
|
||||||
JobType int32
|
|
||||||
JobStatus int32
|
|
||||||
JobLevel int32
|
|
||||||
JobID uint32
|
|
||||||
VolSessionID uint32
|
|
||||||
VolSessionTime uint32
|
|
||||||
JobFiles uint32
|
|
||||||
JobBytes uint64
|
|
||||||
StartTime uint64
|
|
||||||
EndTime uint64
|
|
||||||
Job [maxNameLength]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (je jobEntry) String() string {
|
|
||||||
var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
|
|
||||||
var jobNameLen int
|
|
||||||
if len(matches) >= 4 {
|
|
||||||
jobNameLen = matches[3]
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %s, EndTime: %s, Job: %s",
|
|
||||||
je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// maxNameLength : the maximum length of a string, hard coded in bareos
|
|
||||||
maxNameLength = 128
|
|
||||||
// stateFileHeaderLength : the length of the state file header struct
|
|
||||||
stateFileHeaderLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
|
|
||||||
// jobResultLength : the length of the job result struct
|
|
||||||
jobResultLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
|
|
||||||
)
|
|
||||||
|
|
||||||
var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
|
|
||||||
|
|
||||||
// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
|
|
||||||
func readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
|
|
||||||
bytes = make([]byte, number)
|
|
||||||
n, err = file.Read(bytes)
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil, fmt.Errorf("file.Read failed in %s : %s", stateFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseStateFile() (successfulJobs jobs, errorJobs jobs, err error) {
|
|
||||||
var (
|
|
||||||
n int
|
|
||||||
stateFileHandle *os.File
|
|
||||||
data []byte
|
|
||||||
buffer *bytes.Buffer
|
|
||||||
numberOfJobs uint32
|
|
||||||
matches []int
|
|
||||||
)
|
|
||||||
// Open the state file
|
|
||||||
stateFileHandle, err = os.Open(stateFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Couldn't open state file : %s", err)
|
|
||||||
}
|
|
||||||
defer stateFileHandle.Close()
|
|
||||||
|
|
||||||
// Parsing the state file header
|
|
||||||
var header stateFileHeader
|
|
||||||
n, data, err = readNextBytes(stateFileHandle, stateFileHeaderLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
|
|
||||||
}
|
|
||||||
if n != stateFileHeaderLength {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid header length in %s", stateFile)
|
|
||||||
}
|
|
||||||
buffer = bytes.NewBuffer(data)
|
|
||||||
err = binary.Read(buffer, binary.LittleEndian, &header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", stateFile, err)
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
log.Printf("Parsed header: %+s\n", header)
|
|
||||||
}
|
|
||||||
if id := string(header.ID[:len(header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", stateFile)
|
|
||||||
}
|
|
||||||
if header.Version != 4 {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", header.Version)
|
|
||||||
}
|
|
||||||
if header.LastJobsAddr == 0 {
|
|
||||||
return nil, nil, fmt.Errorf("INFO No jobs exist in the state file")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We seek to the jobs position in the state file
|
|
||||||
stateFileHandle.Seek(int64(header.LastJobsAddr), 0)
|
|
||||||
|
|
||||||
// We read how many jobs there are in the state file
|
|
||||||
n, data, err = readNextBytes(stateFileHandle, 4)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
|
|
||||||
}
|
|
||||||
if n != 4 {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", stateFile)
|
|
||||||
}
|
|
||||||
buffer = bytes.NewBuffer(data)
|
|
||||||
err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", stateFile, err)
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
log.Printf("%d jobs found in state file\n", numberOfJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We parse the job entries
|
|
||||||
successfulJobs = make(map[string]uint64)
|
|
||||||
errorJobs = make(map[string]uint64)
|
|
||||||
for ; numberOfJobs > 0; numberOfJobs-- {
|
|
||||||
var (
|
|
||||||
jobResult jobEntry
|
|
||||||
jobName string
|
|
||||||
)
|
|
||||||
n, data, err = readNextBytes(stateFileHandle, jobResultLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : %s", err)
|
|
||||||
}
|
|
||||||
if n != jobResultLength {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", stateFile)
|
|
||||||
}
|
|
||||||
buffer = bytes.NewBuffer(data)
|
|
||||||
err = binary.Read(buffer, binary.LittleEndian, &jobResult)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", stateFile, err)
|
|
||||||
}
|
|
||||||
matches = jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
|
|
||||||
if len(matches) >= 4 {
|
|
||||||
jobName = string(jobResult.Job[:matches[3]])
|
|
||||||
} else {
|
|
||||||
return nil, nil, fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
log.Printf("Parsed job entry: %s\n", jobResult)
|
|
||||||
}
|
|
||||||
// If the job is of type backup (B == ascii 66)
|
|
||||||
if jobResult.JobType == 66 {
|
|
||||||
var (
|
|
||||||
successExists bool
|
|
||||||
errorExists bool
|
|
||||||
currentSuccess uint64
|
|
||||||
currentError uint64
|
|
||||||
)
|
|
||||||
currentSuccess, successExists = successfulJobs[jobName]
|
|
||||||
currentError, errorExists = errorJobs[jobName]
|
|
||||||
// If the job is of status success (T == ascii 84)
|
|
||||||
if jobResult.JobStatus == 84 {
|
|
||||||
// if there is an older entry in errorJobs we delete it
|
|
||||||
if errorExists && jobResult.StartTime > currentError {
|
|
||||||
delete(errorJobs, jobName)
|
|
||||||
}
|
|
||||||
// if there are no entries more recent in successfulJobs we add this one
|
|
||||||
if !successExists || successExists && jobResult.StartTime > currentSuccess {
|
|
||||||
successfulJobs[jobName] = jobResult.StartTime
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !errorExists || jobResult.StartTime > currentError {
|
|
||||||
errorJobs[jobName] = jobResult.StartTime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
57
state/header.go
Normal file
57
state/header.go
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// c.StateFile()HeaderLength : the length of the state file header struct
|
||||||
|
const headerLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
|
||||||
|
|
||||||
|
// header : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
|
||||||
|
// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
|
||||||
|
type header struct {
|
||||||
|
ID [14]byte
|
||||||
|
_ int16
|
||||||
|
Version int32
|
||||||
|
_ int32
|
||||||
|
LastJobsAddr uint64
|
||||||
|
EndOfRecentJobResultsList uint64
|
||||||
|
Reserved [19]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sfh header) String() string {
|
||||||
|
return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) parseHeader(file *os.File) (err error) {
|
||||||
|
// Parsing the state file header
|
||||||
|
n, data, err := s.readNextBytes(file, headerLength)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : %s", err)
|
||||||
|
}
|
||||||
|
if n != headerLength {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : invalid header length in %s", s.config.StateFile())
|
||||||
|
}
|
||||||
|
buffer := bytes.NewBuffer(data)
|
||||||
|
err = binary.Read(buffer, binary.LittleEndian, &s.header)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", s.config.StateFile(), err)
|
||||||
|
}
|
||||||
|
if s.config.Verbose() {
|
||||||
|
log.Printf("Parsed header: %+s\n", s.header)
|
||||||
|
}
|
||||||
|
if id := string(s.header.ID[:len(s.header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", s.config.StateFile())
|
||||||
|
}
|
||||||
|
if s.header.Version != 4 {
|
||||||
|
return fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", s.header.Version)
|
||||||
|
}
|
||||||
|
if s.header.LastJobsAddr == 0 {
|
||||||
|
return fmt.Errorf("INFO No jobs exist in the state file")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
103
state/job.go
Normal file
103
state/job.go
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bareos-zabbix-check/job"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// jobLength : the length of the job result struct
|
||||||
|
const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
|
||||||
|
|
||||||
|
var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
|
||||||
|
|
||||||
|
// jobEntry : A structure to hold a job result from the state file
|
||||||
|
// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
|
||||||
|
type jobEntry struct {
|
||||||
|
Pad [16]byte
|
||||||
|
Errors int32
|
||||||
|
JobType int32
|
||||||
|
JobStatus int32
|
||||||
|
JobLevel int32
|
||||||
|
JobID uint32
|
||||||
|
VolSessionID uint32
|
||||||
|
VolSessionTime uint32
|
||||||
|
JobFiles uint32
|
||||||
|
JobBytes uint64
|
||||||
|
StartTime uint64
|
||||||
|
EndTime uint64
|
||||||
|
Job [maxNameLength]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (je jobEntry) String() string {
|
||||||
|
var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
|
||||||
|
var jobNameLen int
|
||||||
|
if len(matches) >= 4 {
|
||||||
|
jobNameLen = matches[3]
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %s, EndTime: %s, Job: %s",
|
||||||
|
je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) parseJobs(file *os.File) (err error) {
|
||||||
|
// We seek to the jobs position in the state file
|
||||||
|
file.Seek(int64(s.header.LastJobsAddr), 0)
|
||||||
|
|
||||||
|
// We read how many jobs there are in the state file
|
||||||
|
n, data, err := s.readNextBytes(file, 4)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : %s", err)
|
||||||
|
}
|
||||||
|
if n != 4 {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", s.config.StateFile())
|
||||||
|
}
|
||||||
|
buffer := bytes.NewBuffer(data)
|
||||||
|
var numberOfJobs uint32
|
||||||
|
err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", s.config.StateFile(), err)
|
||||||
|
}
|
||||||
|
if s.config.Verbose() {
|
||||||
|
log.Printf("%d jobs found in state file\n", numberOfJobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We parse the job entries
|
||||||
|
for ; numberOfJobs > 0; numberOfJobs-- {
|
||||||
|
var (
|
||||||
|
jobResult jobEntry
|
||||||
|
jobName string
|
||||||
|
)
|
||||||
|
n, data, err = s.readNextBytes(file, jobLength)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : %s", err)
|
||||||
|
}
|
||||||
|
if n != jobLength {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", s.config.StateFile())
|
||||||
|
}
|
||||||
|
buffer = bytes.NewBuffer(data)
|
||||||
|
err = binary.Read(buffer, binary.LittleEndian, &jobResult)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", s.config.StateFile(), err)
|
||||||
|
}
|
||||||
|
matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
|
||||||
|
if len(matches) >= 4 {
|
||||||
|
jobName = string(jobResult.Job[:matches[3]])
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
|
||||||
|
}
|
||||||
|
if s.config.Verbose() {
|
||||||
|
log.Printf("Parsed job entry: %s\n", jobResult)
|
||||||
|
}
|
||||||
|
// If the job is of type backup (B == ascii 66)
|
||||||
|
if jobResult.JobType == 66 {
|
||||||
|
// If the job is of status success JobStatus is equals to 84 (T == ascii 84)
|
||||||
|
s.jobs = append(s.jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
40
state/parser.go
Normal file
40
state/parser.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bareos-zabbix-check/config"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse parses a bareos state file
|
||||||
|
func (s *State) Parse(c *config.Config) (err error) {
|
||||||
|
s.config = c
|
||||||
|
// Open the state file
|
||||||
|
file, err := os.Open(c.StateFile())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("INFO Couldn't open state file : %s", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
err = s.parseHeader(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = s.parseJobs(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
|
||||||
|
func (s *State) readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
|
||||||
|
bytes = make([]byte, number)
|
||||||
|
n, err = file.Read(bytes)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("file.Read failed in %s : %s", s.config.StateFile(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
21
state/state.go
Normal file
21
state/state.go
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bareos-zabbix-check/config"
|
||||||
|
"bareos-zabbix-check/job"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxNameLength : the maximum length of a string, hard coded in bareos
|
||||||
|
const maxNameLength = 128
|
||||||
|
|
||||||
|
// State is an object for manipulating a bareos state file
|
||||||
|
type State struct {
|
||||||
|
config *config.Config
|
||||||
|
header header
|
||||||
|
jobs []job.Job
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jobs returns the jobs from the state file
|
||||||
|
func (s *State) Jobs() []job.Job {
|
||||||
|
return s.jobs
|
||||||
|
}
|
Loading…
Add table
Reference in a new issue