aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulien Dessaux2020-02-22 11:57:50 +0100
committerJulien Dessaux2020-02-22 11:57:50 +0100
commitbcfaffac240d74cd79bec3c2a9d3c144d215b495 (patch)
treeedd2c5f1e011afee759970323042fcf35bf68962
parentImproved tests for job package (diff)
downloadbareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.tar.gz
bareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.tar.bz2
bareos-zabbix-check-bcfaffac240d74cd79bec3c2a9d3c144d215b495.zip
Added tests to the state package, and reworked the code around that
-rw-r--r--go.mod2
-rw-r--r--go.sum2
-rw-r--r--main.go39
-rw-r--r--state/header.go50
-rw-r--r--state/header_test.go109
-rw-r--r--state/job.go53
-rw-r--r--state/job_test.go174
-rw-r--r--state/parser.go40
-rw-r--r--state/state.go21
-rw-r--r--utils/clen.go11
-rw-r--r--utils/clen_test.go26
11 files changed, 407 insertions, 120 deletions
diff --git a/go.mod b/go.mod
index 505d991..4ee0177 100644
--- a/go.mod
+++ b/go.mod
@@ -1,3 +1,5 @@
module bareos-zabbix-check
go 1.13
+
+require github.com/pkg/errors v0.9.1
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..7c401c3
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,2 @@
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
diff --git a/main.go b/main.go
index 3e206a0..bc5945c 100644
--- a/main.go
+++ b/main.go
@@ -6,6 +6,7 @@ import (
"bareos-zabbix-check/spool"
"bareos-zabbix-check/state"
"fmt"
+ "log"
"os"
"time"
)
@@ -14,22 +15,52 @@ func main() {
var (
config config.Config
spool spool.Spool
- state state.State
errorString string
missingString string
)
config.Init()
- err := state.Parse(&config)
+ // Open the state file
+ stateFile, err := os.Open(config.StateFile())
if err != nil {
- fmt.Print(err)
+ fmt.Printf("INFO Couldn't open state file : %s", err)
os.Exit(0)
}
+ defer stateFile.Close()
+ // parse the state file
+ header, err := state.ParseHeader(stateFile)
+ if err != nil {
+ fmt.Printf("INFO Could not parse state file header : %s", err)
+ os.Exit(0)
+ }
+ if config.Verbose() {
+ log.Printf("Parsed header: %+s\n", header)
+ }
+ // seek to the job entries in the state file
+ offset, err := stateFile.Seek(int64(header.LastJobsAddr), 0)
+ if err != nil {
+ fmt.Printf("INFO Couldn't seek to jobs position in state file : %s", err)
+ }
+ if uint64(offset) != header.LastJobsAddr {
+ fmt.Print("INFO Truncated state file")
+ }
+ // Then parse the jobs in the state file
+ jobs, err := state.ParseJobs(stateFile)
+ if err != nil {
+ fmt.Printf("INFO Could not parse jobs in state file : %s", err)
+ }
+ if config.Verbose() {
+ log.Printf("%d jobs found in state file\n", len(jobs))
+ for i := 0; i < len(jobs); i++ {
+ log.Print(jobs[i])
+ }
+ }
+
// We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
// in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
// avoid false positives during backup bootstrap
err = spool.Load(&config)
- jobs := job.KeepOldestOnly(append(state.Jobs(), spool.Jobs()...))
+ jobs = job.KeepOldestOnly(append(jobs, spool.Jobs()...))
spool.SetJobs(job.KeepSuccessOnly(jobs))
// we write this new spool
diff --git a/state/header.go b/state/header.go
index 3e55899..3becd5d 100644
--- a/state/header.go
+++ b/state/header.go
@@ -1,57 +1,57 @@
package state
import (
+ "bareos-zabbix-check/utils"
"bytes"
"encoding/binary"
"fmt"
- "log"
- "os"
+ "io"
+
+ "github.com/pkg/errors"
)
// c.StateFile()HeaderLength : the length of the state file header struct
-const headerLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8
+const headerLength = 14 + 2 + 4 + 4 + 8 + 8 // + 19*8
-// header : A structure to hold the header of the state file. It is statically aligned for amd64 architecture
+// Header is a structure to hold the header of the state file. It is statically aligned for amd64 architecture
// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
-type header struct {
+type Header struct {
ID [14]byte
_ int16
Version int32
_ int32
LastJobsAddr uint64
EndOfRecentJobResultsList uint64
- Reserved [19]uint64
+ //Reserved [19]uint64
}
-func (sfh header) String() string {
- return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved)
+func (sfh *Header) String() string {
+ return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d",
+ string(sfh.ID[:utils.Clen(sfh.ID[:])]), sfh.Version, sfh.LastJobsAddr, sfh.EndOfRecentJobResultsList)
}
-func (s *State) parseHeader(file *os.File) (err error) {
+// ParseHeader parses a Header struct
+func ParseHeader(handle io.Reader) (h *Header, err error) {
// Parsing the state file header
- n, data, err := s.readNextBytes(file, headerLength)
+ data := make([]byte, headerLength)
+ n, err := handle.Read(data)
if err != nil {
- return fmt.Errorf("INFO Corrupted state file : %s", err)
+ return nil, errors.Wrap(err, "Corrupted state file")
}
if n != headerLength {
- return fmt.Errorf("INFO Corrupted state file : invalid header length in %s", s.config.StateFile())
+ return nil, fmt.Errorf("Corrupted state file : invalid header length")
}
buffer := bytes.NewBuffer(data)
- err = binary.Read(buffer, binary.LittleEndian, &s.header)
- if err != nil {
- return fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", s.config.StateFile(), err)
- }
- if s.config.Verbose() {
- log.Printf("Parsed header: %+s\n", s.header)
- }
- if id := string(s.header.ID[:len(s.header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" {
- return fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", s.config.StateFile())
+ h = &Header{}
+ _ = binary.Read(buffer, binary.LittleEndian, h) // this call cannot fail since we checked the header length
+ if id := string(h.ID[:utils.Clen(h.ID[:])]); id != "Bareos State\n" && id != "Bacula State\n" {
+ return nil, fmt.Errorf("Corrupted state file : Not a bareos or bacula state file : %s", id)
}
- if s.header.Version != 4 {
- return fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", s.header.Version)
+ if h.Version != 4 {
+ return nil, fmt.Errorf("Invalid state file : This script only supports bareos state file version 4, got %d", h.Version)
}
- if s.header.LastJobsAddr == 0 {
- return fmt.Errorf("INFO No jobs exist in the state file")
+ if h.LastJobsAddr == 0 {
+ return nil, fmt.Errorf("No jobs exist in the state file")
}
return
}
diff --git a/state/header_test.go b/state/header_test.go
new file mode 100644
index 0000000..1ed71eb
--- /dev/null
+++ b/state/header_test.go
@@ -0,0 +1,109 @@
+package state
+
+import (
+ "bytes"
+ "io"
+ "reflect"
+ "testing"
+)
+
+func Test_header_String(t *testing.T) {
+ var id [14]byte
+ copy(id[:], []byte("test"))
+ type fields struct {
+ ID [14]byte
+ Version int32
+ LastJobsAddr uint64
+ EndOfRecentJobResultsList uint64
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want string
+ }{
+ {"default header", fields{ID: id, Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3}, "ID: \"test\", Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ sfh := Header{
+ ID: tt.fields.ID,
+ Version: tt.fields.Version,
+ LastJobsAddr: tt.fields.LastJobsAddr,
+ EndOfRecentJobResultsList: tt.fields.EndOfRecentJobResultsList,
+ }
+ if got := sfh.String(); got != tt.want {
+ t.Errorf("header.String() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_parseHeader(t *testing.T) {
+ readerEmpty := bytes.NewReader([]byte(""))
+ readerTooSmall := bytes.NewReader([]byte("abcd"))
+ readerNotBareosNorBacula := bytes.NewReader([]byte{
+ 't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // ID
+ 0, 0, // padding
+ 4, 0, 0, 0, // version
+ 0, 0, 0, 0, //padding
+ 0, 0, 0, 0, 0, 0, 0, 0, // last job address
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
+ })
+ readerBadVersion := bytes.NewReader([]byte{
+ 'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
+ 0, 0, // padding
+ 3, 0, 0, 0, // version
+ 0, 0, 0, 0, //padding
+ 0, 0, 0, 0, 0, 0, 0, 0, // last job address
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
+ })
+ readerNoJobs := bytes.NewReader([]byte{
+ 'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
+ 0, 0, // padding
+ 4, 0, 0, 0, // version
+ 0, 0, 0, 0, //padding
+ 0, 0, 0, 0, 0, 0, 0, 0, // last job address
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
+ })
+ readerValid := bytes.NewReader([]byte{
+ 'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
+ 0, 0, // padding
+ 4, 0, 0, 0, // version
+ 0, 0, 0, 0, //padding
+ 192, 0, 0, 0, 0, 0, 0, 0, // last job address
+ 254, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
+ })
+ type args struct {
+ handle io.Reader
+ }
+ tests := []struct {
+ name string
+ args args
+ wantH *Header
+ wantErr bool
+ }{
+ {"read error", args{readerEmpty}, nil, true},
+ {"invalid header length", args{readerTooSmall}, nil, true},
+ {"reader not bareos nor bacula", args{readerNotBareosNorBacula}, nil, true},
+ {"reader bad version", args{readerBadVersion}, nil, true},
+ {"reader no jobs", args{readerNoJobs}, nil, true},
+ {"reader valid", args{readerValid}, &Header{
+ ID: [14]byte{'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0},
+ Version: 4,
+ LastJobsAddr: 192,
+ EndOfRecentJobResultsList: 254,
+ }, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotH, err := ParseHeader(tt.args.handle)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("parseHeader() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(gotH, tt.wantH) {
+ t.Errorf("parseHeader() = %v, want %v", gotH, tt.wantH)
+ }
+ })
+ }
+}
diff --git a/state/job.go b/state/job.go
index 8d5b04f..e8333ff 100644
--- a/state/job.go
+++ b/state/job.go
@@ -2,15 +2,20 @@ package state
import (
"bareos-zabbix-check/job"
+ "bareos-zabbix-check/utils"
"bytes"
"encoding/binary"
"fmt"
- "log"
- "os"
+ "io"
"regexp"
"time"
+
+ "github.com/pkg/errors"
)
+// maxnameLength : the maximum length of a job name, hardcoded in bareos
+const maxNameLength = 128
+
// jobLength : the length of the job result struct
const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
@@ -19,7 +24,7 @@ var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}
// jobEntry : A structure to hold a job result from the state file
// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
type jobEntry struct {
- Pad [16]byte
+ _ [16]byte
Errors int32
JobType int32
JobStatus int32
@@ -36,7 +41,7 @@ type jobEntry struct {
func (je jobEntry) String() string {
var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
- var jobNameLen int
+ jobNameLen := utils.Clen(je.Job[:])
if len(matches) >= 4 {
jobNameLen = matches[3]
}
@@ -44,27 +49,20 @@ func (je jobEntry) String() string {
je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
}
-func (s *State) parseJobs(file *os.File) (err error) {
- // We seek to the jobs position in the state file
- file.Seek(int64(s.header.LastJobsAddr), 0)
-
+// ParseJobs parses the jobs in a state file
+func ParseJobs(handle io.Reader) (jobs []job.Job, err error) {
// We read how many jobs there are in the state file
- n, data, err := s.readNextBytes(file, 4)
+ data := make([]byte, 4)
+ n, err := handle.Read(data)
if err != nil {
- return fmt.Errorf("INFO Corrupted state file : %s", err)
+ return nil, errors.Wrap(err, "Corrupted state file")
}
if n != 4 {
- return fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", s.config.StateFile())
+ return nil, fmt.Errorf("Corrupted state file : invalid numberOfJobs read length")
}
buffer := bytes.NewBuffer(data)
var numberOfJobs uint32
- err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs)
- if err != nil {
- return fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", s.config.StateFile(), err)
- }
- if s.config.Verbose() {
- log.Printf("%d jobs found in state file\n", numberOfJobs)
- }
+ _ = binary.Read(buffer, binary.LittleEndian, &numberOfJobs) // this call cannot fail since we checked the header length
// We parse the job entries
for ; numberOfJobs > 0; numberOfJobs-- {
@@ -72,31 +70,26 @@ func (s *State) parseJobs(file *os.File) (err error) {
jobResult jobEntry
jobName string
)
- n, data, err = s.readNextBytes(file, jobLength)
+ data := make([]byte, jobLength)
+ n, err = handle.Read(data)
if err != nil {
- return fmt.Errorf("INFO Corrupted state file : %s", err)
+ return nil, errors.Wrap(err, "Corrupted state file")
}
if n != jobLength {
- return fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", s.config.StateFile())
+ return nil, fmt.Errorf("Corrupted state file : invalid job entry")
}
buffer = bytes.NewBuffer(data)
- err = binary.Read(buffer, binary.LittleEndian, &jobResult)
- if err != nil {
- return fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", s.config.StateFile(), err)
- }
+ _ = binary.Read(buffer, binary.LittleEndian, &jobResult) // this call cannot fail since we checked the header length
matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
if len(matches) >= 4 {
jobName = string(jobResult.Job[:matches[3]])
} else {
- return fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
- }
- if s.config.Verbose() {
- log.Printf("Parsed job entry: %s\n", jobResult)
+ return nil, fmt.Errorf("Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
}
// If the job is of type backup (B == ascii 66)
if jobResult.JobType == 66 {
// If the job is of status success JobStatus is equals to 84 (T == ascii 84)
- s.jobs = append(s.jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
+ jobs = append(jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
}
}
return
diff --git a/state/job_test.go b/state/job_test.go
new file mode 100644
index 0000000..410401b
--- /dev/null
+++ b/state/job_test.go
@@ -0,0 +1,174 @@
+package state
+
+import (
+ "bareos-zabbix-check/job"
+ "bytes"
+ "io"
+ "reflect"
+ "testing"
+)
+
+func Test_jobEntry_String(t *testing.T) {
+ var badlyNamedJob [128]byte
+ copy(badlyNamedJob[:], []byte("job_name"))
+ var normalJob [128]byte
+ copy(normalJob[:], []byte("normal_name.2012-06-01"))
+ type fields struct {
+ Errors int32
+ JobType int32
+ JobStatus int32
+ JobLevel int32
+ JobID uint32
+ VolSessionID uint32
+ VolSessionTime uint32
+ JobFiles uint32
+ JobBytes uint64
+ StartTime uint64
+ EndTime uint64
+ Job [maxNameLength]byte
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want string
+ }{
+ {
+ "normal job",
+ fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: badlyNamedJob},
+ "Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: job_name",
+ },
+ {
+ "badly named job",
+ fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: normalJob},
+ "Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: normal_name",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ je := jobEntry{
+ Errors: tt.fields.Errors,
+ JobType: tt.fields.JobType,
+ JobStatus: tt.fields.JobStatus,
+ JobLevel: tt.fields.JobLevel,
+ JobID: tt.fields.JobID,
+ VolSessionID: tt.fields.VolSessionID,
+ VolSessionTime: tt.fields.VolSessionTime,
+ JobFiles: tt.fields.JobFiles,
+ JobBytes: tt.fields.JobBytes,
+ StartTime: tt.fields.StartTime,
+ EndTime: tt.fields.EndTime,
+ Job: tt.fields.Job,
+ }
+ if got := je.String(); got != tt.want {
+ t.Errorf("jobEntry.String() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestParseJobs(t *testing.T) {
+ readerEmpty := bytes.NewReader([]byte{})
+ readerTooSmall := bytes.NewReader([]byte{
+ 1, // number of jobs
+ })
+ readerJobError := bytes.NewReader([]byte{
+ 1, 0, 0, 0, // number of jobs
+ })
+ readerJobTooSmall := bytes.NewReader([]byte{
+ 1, 0, 0, 0, // number of jobs
+ 0,
+ })
+ readerInvalidJobName := bytes.NewReader([]byte{
+ 1, 0, 0, 0, // number of jobs
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
+ 0, 0, 0, 0, // Errors
+ 'B', 0, 0, 0, // JobType
+ 'T', 0, 0, 0, // JobStatus
+ 0, 0, 0, 0, // JobLevel
+ 0, 0, 0, 0, // JobID
+ 0, 0, 0, 0, // VolSessionID
+ 0, 0, 0, 0, // VolSessionTime
+ 0, 0, 0, 0, // JobFiles
+ 0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
+ 1, 0, 0, 0, 0, 0, 0, 0, // StartTime
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndTime
+ 't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Job
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ })
+ readerZeroJobs := bytes.NewReader([]byte{
+ 0, 0, 0, 0, // number of jobs
+ })
+ readerOneNonBackupJob := bytes.NewReader([]byte{
+ 1, 0, 0, 0, // number of jobs
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
+ 0, 0, 0, 0, // Errors
+ 'R', 0, 0, 0, // JobType
+ 'T', 0, 0, 0, // JobStatus
+ 0, 0, 0, 0, // JobLevel
+ 0, 0, 0, 0, // JobID
+ 0, 0, 0, 0, // VolSessionID
+ 0, 0, 0, 0, // VolSessionTime
+ 0, 0, 0, 0, // JobFiles
+ 0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
+ 1, 0, 0, 0, 0, 0, 0, 0, // StartTime
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndTime
+ 't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ })
+ readerOneSuccessfulBackupJob := bytes.NewReader([]byte{
+ 1, 0, 0, 0, // number of jobs
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
+ 0, 0, 0, 0, // Errors
+ 'B', 0, 0, 0, // JobType
+ 'T', 0, 0, 0, // JobStatus
+ 0, 0, 0, 0, // JobLevel
+ 0, 0, 0, 0, // JobID
+ 0, 0, 0, 0, // VolSessionID
+ 0, 0, 0, 0, // VolSessionTime
+ 0, 0, 0, 0, // JobFiles
+ 0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
+ 1, 0, 0, 0, 0, 0, 0, 0, // StartTime
+ 0, 0, 0, 0, 0, 0, 0, 0, // EndTime
+ 't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ })
+ type args struct {
+ handle io.Reader
+ }
+ tests := []struct {
+ name string
+ args args
+ wantJobs []job.Job
+ wantErr bool
+ }{
+ {"read empty", args{readerEmpty}, nil, true},
+ {"read too small", args{readerTooSmall}, nil, true},
+ {"read job error", args{readerJobError}, nil, true},
+ {"read job too small", args{readerJobTooSmall}, nil, true},
+ {"read invalid job name", args{readerInvalidJobName}, nil, true},
+ {"read zero jobs", args{readerZeroJobs}, nil, false},
+ {"read one non backup job", args{readerOneNonBackupJob}, nil, false},
+ {"read one successful backup job", args{readerOneSuccessfulBackupJob}, []job.Job{{Name: "test", Timestamp: 1, Success: true}}, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotJobs, err := ParseJobs(tt.args.handle)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParseJobs() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(gotJobs, tt.wantJobs) {
+ t.Errorf("ParseJobs() = %v, want %v", gotJobs, tt.wantJobs)
+ }
+ })
+ }
+}
diff --git a/state/parser.go b/state/parser.go
deleted file mode 100644
index 60f5394..0000000
--- a/state/parser.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package state
-
-import (
- "bareos-zabbix-check/config"
- "fmt"
- "os"
-)
-
-// Parse parses a bareos state file
-func (s *State) Parse(c *config.Config) (err error) {
- s.config = c
- // Open the state file
- file, err := os.Open(c.StateFile())
- if err != nil {
- return fmt.Errorf("INFO Couldn't open state file : %s", err)
- }
- defer file.Close()
-
- err = s.parseHeader(file)
- if err != nil {
- return err
- }
- err = s.parseJobs(file)
- if err != nil {
- return err
- }
-
- return
-}
-
-// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
-func (s *State) readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
- bytes = make([]byte, number)
- n, err = file.Read(bytes)
- if err != nil {
- return 0, nil, fmt.Errorf("file.Read failed in %s : %s", s.config.StateFile(), err)
- }
-
- return
-}
diff --git a/state/state.go b/state/state.go
deleted file mode 100644
index d28a701..0000000
--- a/state/state.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package state
-
-import (
- "bareos-zabbix-check/config"
- "bareos-zabbix-check/job"
-)
-
-// maxNameLength : the maximum length of a string, hard coded in bareos
-const maxNameLength = 128
-
-// State is an object for manipulating a bareos state file
-type State struct {
- config *config.Config
- header header
- jobs []job.Job
-}
-
-// Jobs returns the jobs from the state file
-func (s *State) Jobs() []job.Job {
- return s.jobs
-}
diff --git a/utils/clen.go b/utils/clen.go
new file mode 100644
index 0000000..17d1c4b
--- /dev/null
+++ b/utils/clen.go
@@ -0,0 +1,11 @@
+package utils
+
+// Clen returns the length of a null terminated string like in C
+func Clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/utils/clen_test.go b/utils/clen_test.go
new file mode 100644
index 0000000..19361b0
--- /dev/null
+++ b/utils/clen_test.go
@@ -0,0 +1,26 @@
+package utils
+
+import "testing"
+
+func TestClen(t *testing.T) {
+ normalString := append([]byte("abcd"), 0)
+ type args struct {
+ n []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ want int
+ }{
+ {"empty string", args{}, 0},
+ {"normal string", args{normalString}, 4},
+ {"non null terminated string", args{[]byte("abcd")}, 4},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := Clen(tt.args.n); got != tt.want {
+ t.Errorf("Clen() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}