1
0
Fork 0

Added tests to the state package, and reworked the code around that

This commit is contained in:
Julien Dessaux 2020-02-22 11:57:50 +01:00
parent e7456142a9
commit bcfaffac24
11 changed files with 407 additions and 120 deletions

2
go.mod
View file

@ -1,3 +1,5 @@
module bareos-zabbix-check module bareos-zabbix-check
go 1.13 go 1.13
require github.com/pkg/errors v0.9.1

2
go.sum Normal file
View file

@ -0,0 +1,2 @@
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=

39
main.go
View file

@ -6,6 +6,7 @@ import (
"bareos-zabbix-check/spool" "bareos-zabbix-check/spool"
"bareos-zabbix-check/state" "bareos-zabbix-check/state"
"fmt" "fmt"
"log"
"os" "os"
"time" "time"
) )
@ -14,22 +15,52 @@ func main() {
var ( var (
config config.Config config config.Config
spool spool.Spool spool spool.Spool
state state.State
errorString string errorString string
missingString string missingString string
) )
config.Init() config.Init()
err := state.Parse(&config) // Open the state file
stateFile, err := os.Open(config.StateFile())
if err != nil { if err != nil {
fmt.Print(err) fmt.Printf("INFO Couldn't open state file : %s", err)
os.Exit(0) os.Exit(0)
} }
defer stateFile.Close()
// parse the state file
header, err := state.ParseHeader(stateFile)
if err != nil {
fmt.Printf("INFO Could not parse state file header : %s", err)
os.Exit(0)
}
if config.Verbose() {
log.Printf("Parsed header: %+s\n", header)
}
// seek to the job entries in the state file
offset, err := stateFile.Seek(int64(header.LastJobsAddr), 0)
if err != nil {
fmt.Printf("INFO Couldn't seek to jobs position in state file : %s", err)
}
if uint64(offset) != header.LastJobsAddr {
fmt.Print("INFO Truncated state file")
}
// Then parse the jobs in the state file
jobs, err := state.ParseJobs(stateFile)
if err != nil {
fmt.Printf("INFO Could not parse jobs in state file : %s", err)
}
if config.Verbose() {
log.Printf("%d jobs found in state file\n", len(jobs))
for i := 0; i < len(jobs); i++ {
log.Print(jobs[i])
}
}
// We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors // We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
// in the state file and we manage to write a new spool file without errors, then we will ignore any error here to // in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
// avoid false positives during backup bootstrap // avoid false positives during backup bootstrap
err = spool.Load(&config) err = spool.Load(&config)
jobs := job.KeepOldestOnly(append(state.Jobs(), spool.Jobs()...)) jobs = job.KeepOldestOnly(append(jobs, spool.Jobs()...))
spool.SetJobs(job.KeepSuccessOnly(jobs)) spool.SetJobs(job.KeepSuccessOnly(jobs))
// we write this new spool // we write this new spool

View file

@ -1,57 +1,57 @@
package state package state
import ( import (
"bareos-zabbix-check/utils"
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"log" "io"
"os"
"github.com/pkg/errors"
) )
// c.StateFile()HeaderLength : the length of the state file header struct // c.StateFile()HeaderLength : the length of the state file header struct
const headerLength = 14 + 2 + 4 + 4 + 8 + 8 + 19*8 const headerLength = 14 + 2 + 4 + 4 + 8 + 8 // + 19*8
// header : A structure to hold the header of the state file. It is statically aligned for amd64 architecture // Header is a structure to hold the header of the state file. It is statically aligned for amd64 architecture
// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652 // This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
type header struct { type Header struct {
ID [14]byte ID [14]byte
_ int16 _ int16
Version int32 Version int32
_ int32 _ int32
LastJobsAddr uint64 LastJobsAddr uint64
EndOfRecentJobResultsList uint64 EndOfRecentJobResultsList uint64
Reserved [19]uint64 //Reserved [19]uint64
} }
func (sfh header) String() string { func (sfh *Header) String() string {
return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d", sfh.ID[:len(sfh.ID)-2], sfh.Version, sfh.EndOfRecentJobResultsList, sfh.Reserved) return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d",
string(sfh.ID[:utils.Clen(sfh.ID[:])]), sfh.Version, sfh.LastJobsAddr, sfh.EndOfRecentJobResultsList)
} }
func (s *State) parseHeader(file *os.File) (err error) { // ParseHeader parses a Header struct
func ParseHeader(handle io.Reader) (h *Header, err error) {
// Parsing the state file header // Parsing the state file header
n, data, err := s.readNextBytes(file, headerLength) data := make([]byte, headerLength)
n, err := handle.Read(data)
if err != nil { if err != nil {
return fmt.Errorf("INFO Corrupted state file : %s", err) return nil, errors.Wrap(err, "Corrupted state file")
} }
if n != headerLength { if n != headerLength {
return fmt.Errorf("INFO Corrupted state file : invalid header length in %s", s.config.StateFile()) return nil, fmt.Errorf("Corrupted state file : invalid header length")
} }
buffer := bytes.NewBuffer(data) buffer := bytes.NewBuffer(data)
err = binary.Read(buffer, binary.LittleEndian, &s.header) h = &Header{}
if err != nil { _ = binary.Read(buffer, binary.LittleEndian, h) // this call cannot fail since we checked the header length
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on header in %s : %s", s.config.StateFile(), err) if id := string(h.ID[:utils.Clen(h.ID[:])]); id != "Bareos State\n" && id != "Bacula State\n" {
return nil, fmt.Errorf("Corrupted state file : Not a bareos or bacula state file : %s", id)
} }
if s.config.Verbose() { if h.Version != 4 {
log.Printf("Parsed header: %+s\n", s.header) return nil, fmt.Errorf("Invalid state file : This script only supports bareos state file version 4, got %d", h.Version)
} }
if id := string(s.header.ID[:len(s.header.ID)-1]); id != "Bareos State\n" && id != "Bacula State\n" { if h.LastJobsAddr == 0 {
return fmt.Errorf("INFO Corrupted state file : Not a bareos or bacula state file %s", s.config.StateFile()) return nil, fmt.Errorf("No jobs exist in the state file")
}
if s.header.Version != 4 {
return fmt.Errorf("INFO Invalid state file : This script only supports bareos state file version 4, got %d", s.header.Version)
}
if s.header.LastJobsAddr == 0 {
return fmt.Errorf("INFO No jobs exist in the state file")
} }
return return
} }

109
state/header_test.go Normal file
View file

@ -0,0 +1,109 @@
package state
import (
"bytes"
"io"
"reflect"
"testing"
)
func Test_header_String(t *testing.T) {
var id [14]byte
copy(id[:], []byte("test"))
type fields struct {
ID [14]byte
Version int32
LastJobsAddr uint64
EndOfRecentJobResultsList uint64
}
tests := []struct {
name string
fields fields
want string
}{
{"default header", fields{ID: id, Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3}, "ID: \"test\", Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sfh := Header{
ID: tt.fields.ID,
Version: tt.fields.Version,
LastJobsAddr: tt.fields.LastJobsAddr,
EndOfRecentJobResultsList: tt.fields.EndOfRecentJobResultsList,
}
if got := sfh.String(); got != tt.want {
t.Errorf("header.String() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parseHeader(t *testing.T) {
readerEmpty := bytes.NewReader([]byte(""))
readerTooSmall := bytes.NewReader([]byte("abcd"))
readerNotBareosNorBacula := bytes.NewReader([]byte{
't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
})
readerBadVersion := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
3, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
})
readerNoJobs := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
})
readerValid := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
192, 0, 0, 0, 0, 0, 0, 0, // last job address
254, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
})
type args struct {
handle io.Reader
}
tests := []struct {
name string
args args
wantH *Header
wantErr bool
}{
{"read error", args{readerEmpty}, nil, true},
{"invalid header length", args{readerTooSmall}, nil, true},
{"reader not bareos nor bacula", args{readerNotBareosNorBacula}, nil, true},
{"reader bad version", args{readerBadVersion}, nil, true},
{"reader no jobs", args{readerNoJobs}, nil, true},
{"reader valid", args{readerValid}, &Header{
ID: [14]byte{'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0},
Version: 4,
LastJobsAddr: 192,
EndOfRecentJobResultsList: 254,
}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotH, err := ParseHeader(tt.args.handle)
if (err != nil) != tt.wantErr {
t.Errorf("parseHeader() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotH, tt.wantH) {
t.Errorf("parseHeader() = %v, want %v", gotH, tt.wantH)
}
})
}
}

View file

@ -2,15 +2,20 @@ package state
import ( import (
"bareos-zabbix-check/job" "bareos-zabbix-check/job"
"bareos-zabbix-check/utils"
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"log" "io"
"os"
"regexp" "regexp"
"time" "time"
"github.com/pkg/errors"
) )
// maxnameLength : the maximum length of a job name, hardcoded in bareos
const maxNameLength = 128
// jobLength : the length of the job result struct // jobLength : the length of the job result struct
const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
@ -19,7 +24,7 @@ var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}
// jobEntry : A structure to hold a job result from the state file // jobEntry : A structure to hold a job result from the state file
// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44 // This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
type jobEntry struct { type jobEntry struct {
Pad [16]byte _ [16]byte
Errors int32 Errors int32
JobType int32 JobType int32
JobStatus int32 JobStatus int32
@ -36,7 +41,7 @@ type jobEntry struct {
func (je jobEntry) String() string { func (je jobEntry) String() string {
var matches = jobNameRegex.FindSubmatchIndex(je.Job[:]) var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
var jobNameLen int jobNameLen := utils.Clen(je.Job[:])
if len(matches) >= 4 { if len(matches) >= 4 {
jobNameLen = matches[3] jobNameLen = matches[3]
} }
@ -44,27 +49,20 @@ func (je jobEntry) String() string {
je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen]) je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
} }
func (s *State) parseJobs(file *os.File) (err error) { // ParseJobs parses the jobs in a state file
// We seek to the jobs position in the state file func ParseJobs(handle io.Reader) (jobs []job.Job, err error) {
file.Seek(int64(s.header.LastJobsAddr), 0)
// We read how many jobs there are in the state file // We read how many jobs there are in the state file
n, data, err := s.readNextBytes(file, 4) data := make([]byte, 4)
n, err := handle.Read(data)
if err != nil { if err != nil {
return fmt.Errorf("INFO Corrupted state file : %s", err) return nil, errors.Wrap(err, "Corrupted state file")
} }
if n != 4 { if n != 4 {
return fmt.Errorf("INFO Corrupted state file : invalid numberOfJobs read length in %s", s.config.StateFile()) return nil, fmt.Errorf("Corrupted state file : invalid numberOfJobs read length")
} }
buffer := bytes.NewBuffer(data) buffer := bytes.NewBuffer(data)
var numberOfJobs uint32 var numberOfJobs uint32
err = binary.Read(buffer, binary.LittleEndian, &numberOfJobs) _ = binary.Read(buffer, binary.LittleEndian, &numberOfJobs) // this call cannot fail since we checked the header length
if err != nil {
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on numberOfJobs in %s : %s", s.config.StateFile(), err)
}
if s.config.Verbose() {
log.Printf("%d jobs found in state file\n", numberOfJobs)
}
// We parse the job entries // We parse the job entries
for ; numberOfJobs > 0; numberOfJobs-- { for ; numberOfJobs > 0; numberOfJobs-- {
@ -72,31 +70,26 @@ func (s *State) parseJobs(file *os.File) (err error) {
jobResult jobEntry jobResult jobEntry
jobName string jobName string
) )
n, data, err = s.readNextBytes(file, jobLength) data := make([]byte, jobLength)
n, err = handle.Read(data)
if err != nil { if err != nil {
return fmt.Errorf("INFO Corrupted state file : %s", err) return nil, errors.Wrap(err, "Corrupted state file")
} }
if n != jobLength { if n != jobLength {
return fmt.Errorf("INFO Corrupted state file : invalid job entry in %s", s.config.StateFile()) return nil, fmt.Errorf("Corrupted state file : invalid job entry")
} }
buffer = bytes.NewBuffer(data) buffer = bytes.NewBuffer(data)
err = binary.Read(buffer, binary.LittleEndian, &jobResult) _ = binary.Read(buffer, binary.LittleEndian, &jobResult) // this call cannot fail since we checked the header length
if err != nil {
return fmt.Errorf("INFO Corrupted state file : binary.Read failed on job entry in %s : %s", s.config.StateFile(), err)
}
matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:]) matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
if len(matches) >= 4 { if len(matches) >= 4 {
jobName = string(jobResult.Job[:matches[3]]) jobName = string(jobResult.Job[:matches[3]])
} else { } else {
return fmt.Errorf("INFO Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:]) return nil, fmt.Errorf("Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
}
if s.config.Verbose() {
log.Printf("Parsed job entry: %s\n", jobResult)
} }
// If the job is of type backup (B == ascii 66) // If the job is of type backup (B == ascii 66)
if jobResult.JobType == 66 { if jobResult.JobType == 66 {
// If the job is of status success JobStatus is equals to 84 (T == ascii 84) // If the job is of status success JobStatus is equals to 84 (T == ascii 84)
s.jobs = append(s.jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84}) jobs = append(jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
} }
} }
return return

174
state/job_test.go Normal file
View file

@ -0,0 +1,174 @@
package state
import (
"bareos-zabbix-check/job"
"bytes"
"io"
"reflect"
"testing"
)
func Test_jobEntry_String(t *testing.T) {
var badlyNamedJob [128]byte
copy(badlyNamedJob[:], []byte("job_name"))
var normalJob [128]byte
copy(normalJob[:], []byte("normal_name.2012-06-01"))
type fields struct {
Errors int32
JobType int32
JobStatus int32
JobLevel int32
JobID uint32
VolSessionID uint32
VolSessionTime uint32
JobFiles uint32
JobBytes uint64
StartTime uint64
EndTime uint64
Job [maxNameLength]byte
}
tests := []struct {
name string
fields fields
want string
}{
{
"normal job",
fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: badlyNamedJob},
"Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: job_name",
},
{
"badly named job",
fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: normalJob},
"Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: normal_name",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
je := jobEntry{
Errors: tt.fields.Errors,
JobType: tt.fields.JobType,
JobStatus: tt.fields.JobStatus,
JobLevel: tt.fields.JobLevel,
JobID: tt.fields.JobID,
VolSessionID: tt.fields.VolSessionID,
VolSessionTime: tt.fields.VolSessionTime,
JobFiles: tt.fields.JobFiles,
JobBytes: tt.fields.JobBytes,
StartTime: tt.fields.StartTime,
EndTime: tt.fields.EndTime,
Job: tt.fields.Job,
}
if got := je.String(); got != tt.want {
t.Errorf("jobEntry.String() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseJobs(t *testing.T) {
readerEmpty := bytes.NewReader([]byte{})
readerTooSmall := bytes.NewReader([]byte{
1, // number of jobs
})
readerJobError := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
})
readerJobTooSmall := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0,
})
readerInvalidJobName := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'B', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
readerZeroJobs := bytes.NewReader([]byte{
0, 0, 0, 0, // number of jobs
})
readerOneNonBackupJob := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'R', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
readerOneSuccessfulBackupJob := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'B', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
type args struct {
handle io.Reader
}
tests := []struct {
name string
args args
wantJobs []job.Job
wantErr bool
}{
{"read empty", args{readerEmpty}, nil, true},
{"read too small", args{readerTooSmall}, nil, true},
{"read job error", args{readerJobError}, nil, true},
{"read job too small", args{readerJobTooSmall}, nil, true},
{"read invalid job name", args{readerInvalidJobName}, nil, true},
{"read zero jobs", args{readerZeroJobs}, nil, false},
{"read one non backup job", args{readerOneNonBackupJob}, nil, false},
{"read one successful backup job", args{readerOneSuccessfulBackupJob}, []job.Job{{Name: "test", Timestamp: 1, Success: true}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotJobs, err := ParseJobs(tt.args.handle)
if (err != nil) != tt.wantErr {
t.Errorf("ParseJobs() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotJobs, tt.wantJobs) {
t.Errorf("ParseJobs() = %v, want %v", gotJobs, tt.wantJobs)
}
})
}
}

View file

@ -1,40 +0,0 @@
package state
import (
"bareos-zabbix-check/config"
"fmt"
"os"
)
// Parse parses a bareos state file
func (s *State) Parse(c *config.Config) (err error) {
s.config = c
// Open the state file
file, err := os.Open(c.StateFile())
if err != nil {
return fmt.Errorf("INFO Couldn't open state file : %s", err)
}
defer file.Close()
err = s.parseHeader(file)
if err != nil {
return err
}
err = s.parseJobs(file)
if err != nil {
return err
}
return
}
// readNextBytes : Reads the next "number" bytes from a "file", returns the number of bytes actually read as well as the bytes read
func (s *State) readNextBytes(file *os.File, number int) (n int, bytes []byte, err error) {
bytes = make([]byte, number)
n, err = file.Read(bytes)
if err != nil {
return 0, nil, fmt.Errorf("file.Read failed in %s : %s", s.config.StateFile(), err)
}
return
}

View file

@ -1,21 +0,0 @@
package state
import (
"bareos-zabbix-check/config"
"bareos-zabbix-check/job"
)
// maxNameLength : the maximum length of a string, hard coded in bareos
const maxNameLength = 128
// State is an object for manipulating a bareos state file
type State struct {
config *config.Config
header header
jobs []job.Job
}
// Jobs returns the jobs from the state file
func (s *State) Jobs() []job.Job {
return s.jobs
}

11
utils/clen.go Normal file
View file

@ -0,0 +1,11 @@
package utils
// Clen returns the length of a null terminated string like in C
func Clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

26
utils/clen_test.go Normal file
View file

@ -0,0 +1,26 @@
package utils
import "testing"
func TestClen(t *testing.T) {
normalString := append([]byte("abcd"), 0)
type args struct {
n []byte
}
tests := []struct {
name string
args args
want int
}{
{"empty string", args{}, 0},
{"normal string", args{normalString}, 4},
{"non null terminated string", args{[]byte("abcd")}, 4},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Clen(tt.args.n); got != tt.want {
t.Errorf("Clen() = %v, want %v", got, tt.want)
}
})
}
}