1
0
Fork 0

Updated for go 1.16 modules

This commit is contained in:
Julien Dessaux 2021-04-23 16:51:22 +02:00
parent 8278d7b471
commit 38d9c881b3
25 changed files with 25 additions and 19 deletions

14
pkg/job/job.go Normal file
View file

@ -0,0 +1,14 @@
package job
import "fmt"
// Job is a bareos job
type Job struct {
Name string
Timestamp uint64
Success bool
}
func (job Job) String() string {
return fmt.Sprintf("Job { Name: \"%s\", Timestamp: \"%d\", Success: \"%t\" }", job.Name, job.Timestamp, job.Success)
}

33
pkg/job/job_test.go Normal file
View file

@ -0,0 +1,33 @@
package job
import (
"testing"
)
func TestJob_String(t *testing.T) {
type fields struct {
Name string
Timestamp uint64
Success bool
}
tests := []struct {
name string
fields fields
want string
}{
{"default job", fields{}, "Job { Name: \"\", Timestamp: \"0\", Success: \"false\" }"},
{"a job", fields{Name: "a", Timestamp: 10, Success: true}, "Job { Name: \"a\", Timestamp: \"10\", Success: \"true\" }"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
job := Job{
Name: tt.fields.Name,
Timestamp: tt.fields.Timestamp,
Success: tt.fields.Success,
}
if got := job.String(); got != tt.want {
t.Errorf("Job.String() = %v, want %v", got, tt.want)
}
})
}
}

35
pkg/job/utils.go Normal file
View file

@ -0,0 +1,35 @@
package job
// KeepOldestOnly filters a job list and keeps only the most recent entry for a job name
func KeepOldestOnly(jobs []Job) (results []Job) {
outerLoop:
for i := 0; i < len(jobs); i++ {
job := jobs[i]
for j := 0; j < len(results); j++ {
result := results[j]
if result.Name == job.Name {
continue outerLoop
}
}
for j := i + 1; j < len(jobs); j++ {
sec := jobs[j]
if sec.Name == job.Name && sec.Timestamp > job.Timestamp {
job = sec
}
}
results = append(results, job)
}
return
}
// KeepSuccessOnly returns only the successful jobs from a job list (suiatble to write a new spool file)
func KeepSuccessOnly(jobs []Job) (result []Job) {
result = make([]Job, 0)
for i := 0; i < len(jobs); i++ {
job := jobs[i]
if job.Success {
result = append(result, job)
}
}
return
}

68
pkg/job/utils_test.go Normal file
View file

@ -0,0 +1,68 @@
package job
import (
"reflect"
"testing"
)
func TestKeepOldestOnly(t *testing.T) {
emptyList := []Job{}
oneJob := []Job{{Name: "a", Timestamp: 10, Success: true}}
twoJobs := []Job{
{Name: "a", Timestamp: 5, Success: true},
{Name: "a", Timestamp: 10, Success: true},
}
threeJobs := []Job{
{Name: "a", Timestamp: 5, Success: true},
{Name: "a", Timestamp: 10, Success: true},
{Name: "a", Timestamp: 8, Success: false},
}
type args struct {
jobs []Job
}
tests := []struct {
name string
args args
want []Job
}{
{"empty list", args{emptyList}, nil},
{"one job", args{oneJob}, oneJob},
{"two jobs", args{twoJobs}, oneJob},
{"three jobs", args{threeJobs}, oneJob},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := KeepOldestOnly(tt.args.jobs); !reflect.DeepEqual(got, tt.want) {
t.Errorf("KeepOldestOnly() = %v, want %v", got, tt.want)
}
})
}
}
func TestKeepSuccessOnly(t *testing.T) {
emptyList := []Job{}
oneJob := []Job{{Name: "a", Timestamp: 10, Success: true}}
twoJobs := []Job{
{Name: "a", Timestamp: 10, Success: true},
{Name: "a", Timestamp: 5, Success: false},
}
type args struct {
jobs []Job
}
tests := []struct {
name string
args args
wantResult []Job
}{
{"empty list", args{emptyList}, emptyList},
{"one job", args{oneJob}, oneJob},
{"two jobs", args{twoJobs}, oneJob},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotResult := KeepSuccessOnly(tt.args.jobs); !reflect.DeepEqual(gotResult, tt.wantResult) {
t.Errorf("KeepSuccessOnly() = %v, want %v", gotResult, tt.wantResult)
}
})
}
}

27
pkg/spool/parse.go Normal file
View file

@ -0,0 +1,27 @@
package spool
import (
"encoding/csv"
"io"
"strconv"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
"github.com/pkg/errors"
)
// Parse parses a spool file
func Parse(handle io.Reader) (jobs []job.Job, err error) {
lines, err := csv.NewReader(handle).ReadAll()
if err != nil {
return nil, errors.Wrap(err, "Corrupted spool file")
}
for n := 0; n < len(lines); n++ {
line := lines[n]
i, err := strconv.Atoi(line[1])
if err != nil {
return nil, errors.Wrapf(err, "Corrupted spool file : couldn't parse timestamp entry : %s", line[1])
}
jobs = append(jobs, job.Job{Name: line[0], Timestamp: uint64(i), Success: true})
}
return
}

42
pkg/spool/parse_test.go Normal file
View file

@ -0,0 +1,42 @@
package spool
import (
"bytes"
"io"
"reflect"
"testing"
"testing/iotest"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
)
func TestParse(t *testing.T) {
readerError := iotest.TimeoutReader(bytes.NewReader([]byte("\n")))
readerCorruptedTimestamp := bytes.NewReader([]byte("test,x"))
readerOneJob := bytes.NewReader([]byte("test,1"))
type args struct {
handle io.Reader
}
tests := []struct {
name string
args args
wantJobs []job.Job
wantErr bool
}{
{"empty", args{readerError}, nil, true},
{"corrupted timestamp", args{readerCorruptedTimestamp}, nil, true},
{"one job", args{readerOneJob}, []job.Job{{Name: "test", Timestamp: 1, Success: true}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotJobs, err := Parse(tt.args.handle)
if (err != nil) != tt.wantErr {
t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotJobs, tt.wantJobs) {
t.Errorf("Parse() = %v, want %v", gotJobs, tt.wantJobs)
}
})
}
}

21
pkg/spool/serialize.go Normal file
View file

@ -0,0 +1,21 @@
package spool
import (
"encoding/csv"
"fmt"
"io"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
)
// Serialize writes a spool on the disk
func Serialize(handle io.Writer, jobs []job.Job) error {
lines := make([][]string, len(jobs))
for i := 0; i < len(jobs); i++ {
job := jobs[i]
lines[i] = make([]string, 2)
lines[i][0] = job.Name
lines[i][1] = fmt.Sprintf("%d", job.Timestamp)
}
return csv.NewWriter(handle).WriteAll(lines)
}

View file

@ -0,0 +1,34 @@
package spool
import (
"bytes"
"testing"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
)
func TestSerialize(t *testing.T) {
type args struct {
jobs []job.Job
}
tests := []struct {
name string
args args
wantHandle string
wantErr bool
}{
{"One job", args{[]job.Job{{Name: "a", Timestamp: 1}}}, "a,1\n", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handle := &bytes.Buffer{}
if err := Serialize(handle, tt.args.jobs); (err != nil) != tt.wantErr {
t.Errorf("Serialize() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotHandle := handle.String(); gotHandle != tt.wantHandle {
t.Errorf("Serialize() = %v, want %v", gotHandle, tt.wantHandle)
}
})
}
}

57
pkg/state/header.go Normal file
View file

@ -0,0 +1,57 @@
package state
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/utils"
"github.com/pkg/errors"
)
// c.StateFile()HeaderLength : the length of the state file header struct
const headerLength = 14 + 2 + 4 + 4 + 8 + 8 // + 19*8
// Header is a structure to hold the header of the state file. It is statically aligned for amd64 architecture
// This comes from bareos repository file core/src/lib/bsys.cc:525 and core/src/lib/bsys.cc:652
type Header struct {
ID [14]byte
_ int16
Version int32
_ int32
LastJobsAddr uint64
EndOfRecentJobResultsList uint64
//Reserved [19]uint64
}
func (sfh *Header) String() string {
return fmt.Sprintf("ID: \"%s\", Version: %d, LastJobsAddr: %d, EndOfRecentJobResultsList: %d",
string(sfh.ID[:utils.Clen(sfh.ID[:])]), sfh.Version, sfh.LastJobsAddr, sfh.EndOfRecentJobResultsList)
}
// ParseHeader parses a Header struct
func ParseHeader(handle io.Reader) (h *Header, err error) {
// Parsing the state file header
data := make([]byte, headerLength)
n, err := handle.Read(data)
if err != nil {
return nil, errors.Wrap(err, "Corrupted state file")
}
if n != headerLength {
return nil, fmt.Errorf("Corrupted state file : invalid header length")
}
buffer := bytes.NewBuffer(data)
h = &Header{}
_ = binary.Read(buffer, binary.LittleEndian, h) // this call cannot fail since we checked the header length
if id := string(h.ID[:utils.Clen(h.ID[:])]); id != "Bareos State\n" && id != "Bacula State\n" {
return nil, fmt.Errorf("Corrupted state file : Not a bareos or bacula state file : %s", id)
}
if h.Version != 4 {
return nil, fmt.Errorf("Invalid state file : This script only supports bareos state file version 4, got %d", h.Version)
}
if h.LastJobsAddr == 0 {
return nil, fmt.Errorf("No jobs exist in the state file")
}
return
}

109
pkg/state/header_test.go Normal file
View file

@ -0,0 +1,109 @@
package state
import (
"bytes"
"io"
"reflect"
"testing"
)
func Test_header_String(t *testing.T) {
var id [14]byte
copy(id[:], []byte("test"))
type fields struct {
ID [14]byte
Version int32
LastJobsAddr uint64
EndOfRecentJobResultsList uint64
}
tests := []struct {
name string
fields fields
want string
}{
{"default header", fields{ID: id, Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3}, "ID: \"test\", Version: 1, LastJobsAddr: 2, EndOfRecentJobResultsList: 3"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sfh := Header{
ID: tt.fields.ID,
Version: tt.fields.Version,
LastJobsAddr: tt.fields.LastJobsAddr,
EndOfRecentJobResultsList: tt.fields.EndOfRecentJobResultsList,
}
if got := sfh.String(); got != tt.want {
t.Errorf("header.String() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parseHeader(t *testing.T) {
readerEmpty := bytes.NewReader([]byte(""))
readerTooSmall := bytes.NewReader([]byte("abcd"))
readerNotBareosNorBacula := bytes.NewReader([]byte{
't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
})
readerBadVersion := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
3, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsLis
})
readerNoJobs := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
0, 0, 0, 0, 0, 0, 0, 0, // last job address
0, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
})
readerValid := bytes.NewReader([]byte{
'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0, // ID
0, 0, // padding
4, 0, 0, 0, // version
0, 0, 0, 0, //padding
192, 0, 0, 0, 0, 0, 0, 0, // last job address
254, 0, 0, 0, 0, 0, 0, 0, // EndOfRecentJobResultsList
})
type args struct {
handle io.Reader
}
tests := []struct {
name string
args args
wantH *Header
wantErr bool
}{
{"read error", args{readerEmpty}, nil, true},
{"invalid header length", args{readerTooSmall}, nil, true},
{"reader not bareos nor bacula", args{readerNotBareosNorBacula}, nil, true},
{"reader bad version", args{readerBadVersion}, nil, true},
{"reader no jobs", args{readerNoJobs}, nil, true},
{"reader valid", args{readerValid}, &Header{
ID: [14]byte{'B', 'a', 'r', 'e', 'o', 's', ' ', 'S', 't', 'a', 't', 'e', '\n', 0},
Version: 4,
LastJobsAddr: 192,
EndOfRecentJobResultsList: 254,
}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotH, err := ParseHeader(tt.args.handle)
if (err != nil) != tt.wantErr {
t.Errorf("parseHeader() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotH, tt.wantH) {
t.Errorf("parseHeader() = %v, want %v", gotH, tt.wantH)
}
})
}
}

96
pkg/state/job.go Normal file
View file

@ -0,0 +1,96 @@
package state
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"regexp"
"time"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/utils"
"github.com/pkg/errors"
)
// maxnameLength : the maximum length of a job name, hardcoded in bareos
const maxNameLength = 128
// jobLength : the length of the job result struct
const jobLength = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 + 8 + maxNameLength
var jobNameRegex = regexp.MustCompilePOSIX(`^([-A-Za-z0-9_]+)\.[0-9]{4}-[0-9]{2}-[0-9]{2}.*`)
// jobEntry : A structure to hold a job result from the state file
// This comes from bareos repository file core/src/lib/recent_job_results_list.h:29 and file core/src/lib/recent_job_results_list.cc:44
type jobEntry struct {
_ [16]byte
Errors int32
JobType int32
JobStatus int32
JobLevel int32
JobID uint32
VolSessionID uint32
VolSessionTime uint32
JobFiles uint32
JobBytes uint64
StartTime uint64
EndTime uint64
Job [maxNameLength]byte
}
func (je jobEntry) String() string {
var matches = jobNameRegex.FindSubmatchIndex(je.Job[:])
jobNameLen := utils.Clen(je.Job[:])
if len(matches) >= 4 {
jobNameLen = matches[3]
}
return fmt.Sprintf("Errors: %d, JobType: %c, JobStatus: %c, JobLevel: %c, JobID: %d, VolSessionID: %d, VolSessionTime: %d, JobFiles: %d, JobBytes: %d, StartTime: %s, EndTime: %s, Job: %s",
je.Errors, je.JobType, je.JobStatus, je.JobLevel, je.JobID, je.VolSessionID, je.VolSessionTime, je.JobFiles, je.JobBytes, time.Unix(int64(je.StartTime), 0), time.Unix(int64(je.EndTime), 0), je.Job[:jobNameLen])
}
// ParseJobs parses the jobs in a state file
func ParseJobs(handle io.Reader) (jobs []job.Job, err error) {
// We read how many jobs there are in the state file
data := make([]byte, 4)
n, err := handle.Read(data)
if err != nil {
return nil, errors.Wrap(err, "Corrupted state file")
}
if n != 4 {
return nil, fmt.Errorf("Corrupted state file : invalid numberOfJobs read length")
}
buffer := bytes.NewBuffer(data)
var numberOfJobs uint32
_ = binary.Read(buffer, binary.LittleEndian, &numberOfJobs) // this call cannot fail since we checked the header length
// We parse the job entries
for ; numberOfJobs > 0; numberOfJobs-- {
var (
jobResult jobEntry
jobName string
)
data := make([]byte, jobLength)
n, err = handle.Read(data)
if err != nil {
return nil, errors.Wrap(err, "Corrupted state file")
}
if n != jobLength {
return nil, fmt.Errorf("Corrupted state file : invalid job entry")
}
buffer = bytes.NewBuffer(data)
_ = binary.Read(buffer, binary.LittleEndian, &jobResult) // this call cannot fail since we checked the header length
matches := jobNameRegex.FindSubmatchIndex(jobResult.Job[:])
if len(matches) >= 4 {
jobName = string(jobResult.Job[:matches[3]])
} else {
return nil, fmt.Errorf("Couldn't parse job name, this shouldn't happen : %s", jobResult.Job[:])
}
// If the job is of type backup (B == ascii 66)
if jobResult.JobType == 66 {
// If the job is of status success JobStatus is equals to 84 (T == ascii 84)
jobs = append(jobs, job.Job{Name: jobName, Timestamp: jobResult.StartTime, Success: jobResult.JobStatus == 84})
}
}
return
}

175
pkg/state/job_test.go Normal file
View file

@ -0,0 +1,175 @@
package state
import (
"bytes"
"io"
"reflect"
"testing"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
)
func Test_jobEntry_String(t *testing.T) {
var badlyNamedJob [128]byte
copy(badlyNamedJob[:], []byte("job_name"))
var normalJob [128]byte
copy(normalJob[:], []byte("normal_name.2012-06-01"))
type fields struct {
Errors int32
JobType int32
JobStatus int32
JobLevel int32
JobID uint32
VolSessionID uint32
VolSessionTime uint32
JobFiles uint32
JobBytes uint64
StartTime uint64
EndTime uint64
Job [maxNameLength]byte
}
tests := []struct {
name string
fields fields
want string
}{
{
"normal job",
fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: badlyNamedJob},
"Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: job_name",
},
{
"badly named job",
fields{Errors: 1, JobType: 'B', JobStatus: 'T', JobLevel: 'F', JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, Job: normalJob},
"Errors: 1, JobType: B, JobStatus: T, JobLevel: F, JobID: 2, VolSessionID: 3, VolSessionTime: 4, JobFiles: 5, JobBytes: 6, StartTime: 1970-01-01 01:00:00 +0100 CET, EndTime: 1970-01-01 01:00:00 +0100 CET, Job: normal_name",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
je := jobEntry{
Errors: tt.fields.Errors,
JobType: tt.fields.JobType,
JobStatus: tt.fields.JobStatus,
JobLevel: tt.fields.JobLevel,
JobID: tt.fields.JobID,
VolSessionID: tt.fields.VolSessionID,
VolSessionTime: tt.fields.VolSessionTime,
JobFiles: tt.fields.JobFiles,
JobBytes: tt.fields.JobBytes,
StartTime: tt.fields.StartTime,
EndTime: tt.fields.EndTime,
Job: tt.fields.Job,
}
if got := je.String(); got != tt.want {
t.Errorf("jobEntry.String() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseJobs(t *testing.T) {
readerEmpty := bytes.NewReader([]byte{})
readerTooSmall := bytes.NewReader([]byte{
1, // number of jobs
})
readerJobError := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
})
readerJobTooSmall := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0,
})
readerInvalidJobName := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'B', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
readerZeroJobs := bytes.NewReader([]byte{
0, 0, 0, 0, // number of jobs
})
readerOneNonBackupJob := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'R', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
readerOneSuccessfulBackupJob := bytes.NewReader([]byte{
1, 0, 0, 0, // number of jobs
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // pad
0, 0, 0, 0, // Errors
'B', 0, 0, 0, // JobType
'T', 0, 0, 0, // JobStatus
0, 0, 0, 0, // JobLevel
0, 0, 0, 0, // JobID
0, 0, 0, 0, // VolSessionID
0, 0, 0, 0, // VolSessionTime
0, 0, 0, 0, // JobFiles
0, 0, 0, 0, 0, 0, 0, 0, // JobBytes
1, 0, 0, 0, 0, 0, 0, 0, // StartTime
0, 0, 0, 0, 0, 0, 0, 0, // EndTime
't', 'e', 's', 't', '.', '2', '0', '1', '2', '-', '0', '2', '-', '0', '1', 0, // Job
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
})
type args struct {
handle io.Reader
}
tests := []struct {
name string
args args
wantJobs []job.Job
wantErr bool
}{
{"read empty", args{readerEmpty}, nil, true},
{"read too small", args{readerTooSmall}, nil, true},
{"read job error", args{readerJobError}, nil, true},
{"read job too small", args{readerJobTooSmall}, nil, true},
{"read invalid job name", args{readerInvalidJobName}, nil, true},
{"read zero jobs", args{readerZeroJobs}, nil, false},
{"read one non backup job", args{readerOneNonBackupJob}, nil, false},
{"read one successful backup job", args{readerOneSuccessfulBackupJob}, []job.Job{{Name: "test", Timestamp: 1, Success: true}}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotJobs, err := ParseJobs(tt.args.handle)
if (err != nil) != tt.wantErr {
t.Errorf("ParseJobs() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotJobs, tt.wantJobs) {
t.Errorf("ParseJobs() = %v, want %v", gotJobs, tt.wantJobs)
}
})
}
}

11
pkg/utils/clen.go Normal file
View file

@ -0,0 +1,11 @@
package utils
// Clen returns the length of a null terminated string like in C
func Clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

26
pkg/utils/clen_test.go Normal file
View file

@ -0,0 +1,26 @@
package utils
import "testing"
func TestClen(t *testing.T) {
normalString := append([]byte("abcd"), 0)
type args struct {
n []byte
}
tests := []struct {
name string
args args
want int
}{
{"empty string", args{}, 0},
{"normal string", args{normalString}, 4},
{"non null terminated string", args{[]byte("abcd")}, 4},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Clen(tt.args.n); got != tt.want {
t.Errorf("Clen() = %v, want %v", got, tt.want)
}
})
}
}

21
pkg/zabbix/flags.go Normal file
View file

@ -0,0 +1,21 @@
package zabbix
import (
"flag"
)
var (
stateFileName string
workDir string
)
func processFlags() (err error) {
flag.StringVar(&stateFileName, "f", "", "Force the state file to use, defaults to "+bareosStateFile+" if it exists else "+baculaStateFile+".")
flag.StringVar(&workDir, "w", "", "Force the work directory to use, defaults to "+bareosWorkDir+" if it exists else "+baculaWorkDir+".")
flag.Parse()
err = checkWorkDir()
if err == nil {
err = checkStateFile()
}
return
}

34
pkg/zabbix/statefile.go Normal file
View file

@ -0,0 +1,34 @@
package zabbix
import (
"fmt"
"os"
"path/filepath"
)
const (
bareosStateFile = "bareos-fd.9102.state"
baculaStateFile = "bacula-fd.9102.state"
)
func checkStateFile() error {
// Finds the state file to parse
if stateFileName != "" {
stateFileName = filepath.Join(workDir, stateFileName)
info, err := os.Stat(stateFileName)
if os.IsNotExist(err) || info.IsDir() {
return fmt.Errorf("The state file %s does not exist", stateFileName)
}
} else {
stateFileName = filepath.Join(workDir, bareosStateFile)
info, err := os.Stat(stateFileName)
if os.IsNotExist(err) || info.IsDir() {
stateFileName = filepath.Join(workDir, baculaStateFile)
info, err = os.Stat(stateFileName)
if os.IsNotExist(err) || info.IsDir() {
return fmt.Errorf("Could not find a suitable state file. Has a job ever run?")
}
}
}
return nil
}

BIN
pkg/zabbix/testdata/bareos-fd-17.2.state vendored Normal file

Binary file not shown.

BIN
pkg/zabbix/testdata/bareos-fd-18.2.state vendored Normal file

Binary file not shown.

Binary file not shown.

38
pkg/zabbix/workdir.go Normal file
View file

@ -0,0 +1,38 @@
package zabbix
import (
"fmt"
"os"
"path/filepath"
)
const (
bareosWorkDir = "/var/lib/bareos"
baculaWorkDir = "/var/lib/bacula"
)
var root = "/"
// checkWorkDir checks if a work directory is valid
func checkWorkDir() error {
// Determine the work directory to use.
if workDir != "" {
workDir = filepath.Join(root, workDir)
info, err := os.Stat(workDir)
if os.IsNotExist(err) || !info.IsDir() {
return fmt.Errorf("Invalid work directory %s : it does not exist or is not a directory", workDir)
}
} else {
workDir = filepath.Join(root, bareosWorkDir)
info, err := os.Stat(workDir)
if os.IsNotExist(err) || !info.IsDir() {
workDir = filepath.Join(root, baculaWorkDir)
info, err := os.Stat(workDir)
if os.IsNotExist(err) || !info.IsDir() {
return fmt.Errorf("Could not find a suitable work directory. Is bareos or bacula installed?")
}
}
}
workDir = filepath.Clean(workDir)
return nil
}

105
pkg/zabbix/zabbix.go Normal file
View file

@ -0,0 +1,105 @@
package zabbix
import (
"fmt"
"os"
"path/filepath"
"time"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/job"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/spool"
"git.adyxax.org/adyxax/bareos-zabbix-check/pkg/state"
)
const (
spoolFileName = "bareos-zabbix-check.spool"
)
var now = uint64(time.Now().Unix())
// Main the true main function of this program
func Main() string {
err := processFlags()
if err != nil {
return fmt.Sprintf("INFO Failed to init programm : %s", err)
}
// Open the state file
stateFile, err := os.Open(stateFileName)
if err != nil {
return fmt.Sprintf("INFO Could not open state file : %s", err)
}
defer stateFile.Close()
// parse the state file
header, err := state.ParseHeader(stateFile)
if err != nil {
return fmt.Sprintf("INFO Could not parse state file header : %s", err)
}
// seek to the job entries in the state file
offset, err := stateFile.Seek(int64(header.LastJobsAddr), 0)
if err != nil {
return fmt.Sprintf("INFO Couldn't seek to jobs position in state file : %s", err)
}
if uint64(offset) != header.LastJobsAddr {
return fmt.Sprint("INFO Truncated state file")
}
// Then parse the jobs in the state file
jobs, err := state.ParseJobs(stateFile)
if err != nil {
return fmt.Sprintf("INFO Could not parse jobs in state file : %s", err)
}
// We will check for errors in loading the spool file only at the end. If all jobs ran successfully without errors
// in the state file and we manage to write a new spool file without errors, then we will ignore any error here to
// avoid false positives during backup bootstrap
// Open the spool file
spoolFile, spoolErr := os.Open(filepath.Join(workDir, spoolFileName))
var spoolJobs []job.Job
if err == nil {
defer spoolFile.Close()
spoolJobs, spoolErr = spool.Parse(spoolFile)
}
jobs = job.KeepOldestOnly(append(jobs, spoolJobs...))
// we write this new spool
spoolFile, err = os.Create(filepath.Join(workDir, spoolFileName))
if err == nil {
defer spoolFile.Close()
err = spool.Serialize(spoolFile, jobs)
}
if err != nil {
return fmt.Sprintf("AVERAGE: Error saving the spool file : %s\n", err)
}
var (
errorString string
missingString string
)
// We build the error strings
for i := 0; i < len(jobs); i++ {
job := jobs[i]
if job.Success {
if job.Timestamp < now-24*3600 {
if missingString == "" {
missingString = fmt.Sprintf("missing: %s", job.Name)
} else {
missingString = fmt.Sprintf("%s, %s", missingString, job.Name)
}
}
} else {
if errorString == "" {
errorString = fmt.Sprintf("errors: %s", job.Name)
} else {
errorString = fmt.Sprintf("%s, %s", errorString, job.Name)
}
}
}
// Finally we output
if errorString != "" || missingString != "" {
if spoolErr != nil {
return fmt.Sprintf("AVERAGE: %s %s %s", errorString, missingString, spoolErr)
}
return fmt.Sprintf("AVERAGE: %s %s", errorString, missingString)
}
return "OK"
}

61
pkg/zabbix/zabbix_test.go Normal file
View file

@ -0,0 +1,61 @@
package zabbix
import (
"flag"
"fmt"
"os"
"path/filepath"
"testing"
)
func TestMain(t *testing.T) {
os.RemoveAll("tmp")
cwd, _ := os.Getwd()
err := os.MkdirAll("tmp/ok-18.2", 0777)
if err != nil {
t.Skipf("skipping main tests because tmp directory cannot be created : %s", err)
}
wd, err := os.Getwd()
if err != nil {
t.Skipf("skipping main tests because cannot get working directory : %s", err)
}
os.MkdirAll("tmp/ok-17.2", 0777)
os.MkdirAll("tmp/no_state_file", 0777)
os.MkdirAll("tmp/bacula_auto_detect_failed/var/lib/bacula", 0777)
os.MkdirAll("tmp/bareos_auto_detect_failed/var/lib/bareos", 0777)
os.MkdirAll("tmp/error", 0777)
os.Symlink("../../testdata/bareos-fd-17.2.state", "tmp/ok-17.2/state")
os.Symlink("../../testdata/bareos-fd-18.2.state", "tmp/ok-18.2/state")
os.Symlink("../../testdata/bareos-fd-18.2.state-with-error", "tmp/error/state")
tests := []struct {
name string
timestamp uint64
rootDir string
args []string
want string
}{
{"failed bacula_auto_detect", 0, "tmp/bacula_auto_detect_failed", []string{}, "INFO Failed to init programm : Could not find a suitable state file. Has a job ever run?"},
{"failed bareos_auto_detect", 0, "tmp/bareos_auto_detect_failed", []string{}, "INFO Failed to init programm : Could not find a suitable state file. Has a job ever run?"},
{"failed auto_detect", 0, "tmp/non_existent", []string{}, "INFO Failed to init programm : Could not find a suitable work directory. Is bareos or bacula installed?"},
{"no work directory", 0, "tmp", []string{"-w", "/non_existent"}, fmt.Sprintf("INFO Failed to init programm : Invalid work directory %s/tmp/non_existent : it does not exist or is not a directory", wd)},
{"no state file auto_detect", 0, "tmp", []string{"-w", "/no_state_file"}, "INFO Failed to init programm : Could not find a suitable state file. Has a job ever run?"},
{"no state file", 0, "tmp", []string{"-w", "/no_state_file", "-f", "test"}, fmt.Sprintf("INFO Failed to init programm : The state file %s/tmp/no_state_file/test does not exist", wd)},
{"ok bareos 18.2", 1582579731, "tmp/ok-18.2", []string{"-w", "/", "-f", "state"}, "OK"},
{"ok bareos 17.2", 1582579731, "tmp/ok-17.2", []string{"-w", "/", "-f", "state"}, "OK"},
{"missing", 1582709331, "tmp/ok-18.2", []string{"-w", "/", "-f", "state"}, "AVERAGE: missing: awhphpipam1_percona_xtrabackup, awhphpipam1_LinuxAll, awhphpipam1_www"},
{"error", 1582579731, "tmp/error", []string{"-w", "/", "-f", "state"}, "AVERAGE: errors: awhphpipam1_percona_xtrabackup, awhphpipam1_www Corrupted spool file: invalid argument"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
now = tt.timestamp
root = filepath.Join(cwd, tt.rootDir)
flag.CommandLine = flag.NewFlagSet("bareos-zabbix-check", flag.ExitOnError) //flags are now reset
os.Args = append([]string{"bareos-zabbix-check"}, tt.args...)
if got := Main(); got != tt.want {
t.Log(workDir)
t.Errorf("Main() = %v, want %v", got, tt.want)
}
})
}
os.RemoveAll("tmp")
}