Merge branch 'v1.0.0-rc2-rewrite'

This commit is contained in:
Eric Lippmann 2021-05-25 23:31:13 +02:00
commit a5e5adba54
79 changed files with 7545 additions and 7 deletions

65
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,65 @@
name: Go
on:
push:
branches:
- master
pull_request: {}
jobs:
build-test:
strategy:
matrix:
os: [ macos-latest, ubuntu-latest ]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.16.x
- run: go build -gcflags="-m" ./...
- run: go test -v -race ./...
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Run staticcheck ./...
run: |
go install honnef.co/go/tools/cmd/staticcheck@4dc1992c9bb4310ba1e98b30c8d7d46444891d3b
staticcheck ./...
vet:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.16.x
- run: go vet ./...
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Run gofmt -d .
run: |
fmtvar="$(gofmt -d .)"
echo "$fmtvar"
test -z "$fmtvar"

7
.gitignore vendored
View file

@ -1,10 +1,3 @@
/vendor
# Exclude all hidden files
.*
# Except those related to git(lab) and .mailmap
!.git*
!.gitlab-ci.yml
!.mailmap

247
cmd/icingadb/main.go Normal file
View file

@ -0,0 +1,247 @@
package main
import (
"context"
"fmt"
"github.com/icinga/icingadb/internal/command"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/icingadb/history"
"github.com/icinga/icingadb/pkg/icingadb/overdue"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"os"
"os/signal"
"sync"
"syscall"
)
const (
ExitSuccess = 0
ExitFailure = 1
)
func main() {
os.Exit(run())
}
func run() int {
cmd := command.New()
logger := cmd.Logger
defer logger.Sync()
defer func() {
if err := recover(); err != nil {
type stackTracer interface {
StackTrace() errors.StackTrace
}
if err, ok := err.(stackTracer); ok {
for _, f := range err.StackTrace() {
fmt.Printf("%+s:%d\n", f, f)
}
}
}
}()
logger.Info("Starting Icinga DB")
db := cmd.Database()
defer db.Close()
{
logger.Info("Connecting to database")
err := db.Ping()
if err != nil {
panic(errors.Wrap(err, "can't connect to database"))
}
}
rc := cmd.Redis()
{
logger.Info("Connecting to Redis")
_, err := rc.Ping(context.Background()).Result()
if err != nil {
panic(errors.Wrap(err, "can't connect to Redis"))
}
}
ctx, cancelCtx := context.WithCancel(context.Background())
heartbeat := icingaredis.NewHeartbeat(ctx, rc, logger)
ha := icingadb.NewHA(ctx, db, heartbeat, logger)
// Closing ha on exit ensures that this instance retracts its heartbeat
// from the database so that another instance can take over immediately.
defer ha.Close()
s := icingadb.NewSync(db, rc, logger)
hs := history.NewSync(db, rc, logger)
rt := icingadb.NewRuntimeUpdates(db, rc, logger)
ods := overdue.NewSync(db, rc, logger)
sig := make(chan os.Signal)
signal.Notify(sig, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
// Main loop
for {
hactx, cancelHactx := context.WithCancel(ctx)
for hactx.Err() == nil {
select {
case <-ha.Takeover():
logger.Info("Taking over")
go func() {
for hactx.Err() == nil {
synctx, cancelSynctx := context.WithCancel(hactx)
g, synctx := errgroup.WithContext(synctx)
// WaitGroup for configuration synchronization.
// Runtime updates must wait for configuration synchronization to complete.
wg := sync.WaitGroup{}
dump := icingadb.NewDumpSignals(rc, logger)
g.Go(func() error {
logger.Info("Staring config dump signal handling")
return dump.Listen(synctx)
})
lastRuntimeStreamId, err := rc.StreamLastId(ctx, "icinga:runtime")
if err != nil {
panic(err)
}
g.Go(func() error {
select {
case <-dump.InProgress():
logger.Info("Icinga 2 started a new config dump, waiting for it to complete")
cancelSynctx()
return nil
case <-synctx.Done():
return synctx.Err()
}
})
g.Go(func() error {
logger.Info("Starting history sync")
return hs.Sync(synctx)
})
g.Go(func() error {
logger.Info("Starting overdue sync")
return ods.Sync(synctx)
})
logger.Info("Starting config sync")
for _, factory := range v1.Factories {
factory := factory
wg.Add(1)
g.Go(func() error {
defer wg.Done()
return s.SyncAfterDump(synctx, common.NewSyncSubject(factory.WithInit), dump)
})
}
wg.Add(1)
g.Go(func() error {
defer wg.Done()
<-dump.Done("icinga:customvar")
logger.Infof("Syncing customvar")
cv := common.NewSyncSubject(v1.NewCustomvar)
cvs, redisErrs := rc.YieldAll(synctx, cv)
// Let errors from Redis cancel our group.
com.ErrgroupReceive(g, redisErrs)
// Multiplex cvs to use them both for customvar and customvar_flat.
cvs1, cvs2 := make(chan contracts.Entity), make(chan contracts.Entity)
g.Go(func() error {
defer close(cvs1)
defer close(cvs2)
for {
select {
case cv, ok := <-cvs:
if !ok {
return nil
}
cvs1 <- cv
cvs2 <- cv
case <-synctx.Done():
return synctx.Err()
}
}
})
actualCvs, dbErrs := db.YieldAll(
ctx, cv.Factory(), db.BuildSelectStmt(cv.Entity(), cv.Entity().Fingerprint()))
// Let errors from DB cancel our group.
com.ErrgroupReceive(g, dbErrs)
g.Go(func() error {
return s.ApplyDelta(ctx, icingadb.NewDelta(ctx, actualCvs, cvs1, cv, logger))
})
cvFlat := common.NewSyncSubject(v1.NewCustomvarFlat)
cvFlats, flattenErrs := v1.FlattenCustomvars(ctx, cvs2)
// Let errors from Flatten cancel our group.
com.ErrgroupReceive(g, flattenErrs)
actualCvFlats, dbErrs := db.YieldAll(
ctx, cvFlat.Factory(), db.BuildSelectStmt(cvFlat.Entity(), cvFlat.Entity().Fingerprint()))
// Let errors from DB cancel our group.
com.ErrgroupReceive(g, dbErrs)
g.Go(func() error {
return s.ApplyDelta(ctx, icingadb.NewDelta(ctx, actualCvFlats, cvFlats, cvFlat, logger))
})
return nil
})
g.Go(func() error {
wg.Wait()
logger.Infof("Starting runtime updates sync")
return rt.Sync(synctx, v1.Factories, lastRuntimeStreamId)
})
if err := g.Wait(); err != nil && !utils.IsContextCanceled(err) {
panic(err)
}
}
}()
case <-ha.Handover():
logger.Warn("Handing over")
cancelHactx()
case <-hactx.Done():
// Nothing to do here, surrounding loop will terminate now.
case <-ha.Done():
if err := ha.Err(); err != nil {
panic(errors.Wrap(err, "HA exited with an error"))
} else if ctx.Err() == nil {
// ha is created as a single instance once. It should only exit if the main context is cancelled,
// otherwise there is no way to get Icinga DB back into a working state.
panic(errors.New("HA exited without an error but main context isn't cancelled"))
}
return ExitFailure
case <-ctx.Done():
panic(errors.New("main context closed unexpectedly"))
case s := <-sig:
logger.Infow("Exiting due to signal", zap.String("signal", s.String()))
cancelCtx()
return ExitSuccess
}
}
}
}

8
config.yml.example Normal file
View file

@ -0,0 +1,8 @@
database:
host: icingadb
port: 3306
database: icingadb
user: icingadb
password: icingadb
redis:
address: redis:6380

17
go.mod Normal file
View file

@ -0,0 +1,17 @@
module github.com/icinga/icingadb
go 1.16
require (
github.com/creasty/defaults v1.5.1
github.com/go-redis/redis/v8 v8.7.1
github.com/go-sql-driver/mysql v1.5.0
github.com/google/uuid v1.2.0
github.com/jessevdk/go-flags v1.4.0
github.com/jmoiron/sqlx v1.3.1
github.com/pkg/errors v0.9.1
go.uber.org/zap v1.16.0
golang.org/x/exp v0.0.0-20210514180818-737f94c0881e
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
)

186
go.sum Normal file
View file

@ -0,0 +1,186 @@
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creasty/defaults v1.5.1 h1:j8WexcS3d/t4ZmllX4GEkl4wIB/trOr035ajcLHCISM=
github.com/creasty/defaults v1.5.1/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-redis/redis/v8 v8.7.1 h1:8IYi6RO83fNcG5amcUUYTN/qH2h4OjZHlim3KWGFSsA=
github.com/go-redis/redis/v8 v8.7.1/go.mod h1:BRxHBWn3pO3CfjyX6vAoyeRmCquvxr6QG+2onGV2gYs=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE=
github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/otel v0.18.0 h1:d5Of7+Zw4ANFOJB+TIn2K3QWsgS2Ht7OU9DqZHI6qu8=
go.opentelemetry.io/otel v0.18.0/go.mod h1:PT5zQj4lTsR1YeARt8YNKcFb88/c2IKoSABK9mX0r78=
go.opentelemetry.io/otel/metric v0.18.0 h1:yuZCmY9e1ZTaMlZXLrrbAPmYW6tW1A5ozOZeOYGaTaY=
go.opentelemetry.io/otel/metric v0.18.0/go.mod h1:kEH2QtzAyBy3xDVQfGZKIcok4ZZFvd5xyKPfPcuK6pE=
go.opentelemetry.io/otel/oteltest v0.18.0 h1:FbKDFm/LnQDOHuGjED+fy3s5YMVg0z019GJ9Er66hYo=
go.opentelemetry.io/otel/oteltest v0.18.0/go.mod h1:NyierCU3/G8DLTva7KRzGii2fdxdR89zXKH1bNWY7Bo=
go.opentelemetry.io/otel/trace v0.18.0 h1:ilCfc/fptVKaDMK1vWk0elxpolurJbEgey9J6g6s+wk=
go.opentelemetry.io/otel/trace v0.18.0/go.mod h1:FzdUu3BPwZSZebfQ1vl5/tAa8LyMLXSJN57AXIt/iDk=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20210514180818-737f94c0881e h1:VqVU3dsTLGDa5pW74b+xG1lvKltt4EZIUrFPeKOqV2s=
golang.org/x/exp v0.0.0-20210514180818-737f94c0881e/go.mod h1:MSdmUWF4ZWBPSUbgUX/gaau5kvnbkSs9pgtY6B9JXDE=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=

View file

@ -0,0 +1,57 @@
package command
import (
"github.com/icinga/icingadb/pkg/config"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
)
type Command struct {
Flags *config.Flags
Config *config.Config
Logger *zap.SugaredLogger
}
func New() *Command {
flags, err := config.ParseFlags()
if err != nil {
utils.Fatal(err)
}
cfg, err := config.FromYAMLFile(flags.Config)
if err != nil {
utils.Fatal(err)
}
logger, err := zap.NewDevelopment()
if err != nil {
utils.Fatal(err)
}
sugar := logger.Sugar()
return &Command{
Flags: flags,
Config: cfg,
Logger: sugar,
}
}
func (c Command) Database() *icingadb.DB {
db, err := c.Config.Database.Open(c.Logger)
if err != nil {
c.Logger.Fatalw("can't create database connection pool from config", zap.Error(err))
}
return db
}
func (c Command) Redis() *icingaredis.Client {
rc, err := c.Config.Redis.NewClient(c.Logger)
if err != nil {
c.Logger.Fatalw("can't create Redis client from config", zap.Error(err))
}
return rc
}

43
pkg/backoff/backoff.go Normal file
View file

@ -0,0 +1,43 @@
package backoff
import (
"math/rand"
"time"
)
// Backoff returns the backoff duration for a specific retry attempt.
type Backoff func(uint64) time.Duration
// NewExponentialWithJitter returns a backoff implementation that
// exponentially increases the backoff duration for each retry from min,
// never exceeding max. Some randomization is added to the backoff duration.
// It panics if min >= max.
func NewExponentialWithJitter(min, max time.Duration) Backoff {
if min <= 0 {
min = time.Millisecond * 100
}
if max <= 0 {
max = time.Second * 10
}
if min >= max {
panic("max must be larger than min")
}
return func(attempt uint64) time.Duration {
e := min << attempt
if e <= 0 || e > max {
e = max
}
return time.Duration(jitter(int64(e)))
}
}
// jitter returns a random integer distributed in the range [n/2..n).
func jitter(n int64) int64 {
if n == 0 {
return 0
}
return n/2 + rand.Int63n(n/2)
}

95
pkg/com/bulker.go Normal file
View file

@ -0,0 +1,95 @@
package com
import (
"context"
"golang.org/x/sync/errgroup"
"sync"
"time"
)
type Bulker struct {
ch chan []interface{}
ctx context.Context
mu sync.Mutex
err error
}
func NewBulker(ctx context.Context, ch <-chan interface{}, count int) *Bulker {
b := &Bulker{
ch: make(chan []interface{}),
ctx: ctx,
mu: sync.Mutex{},
}
go b.run(ch, count)
return b
}
// Bulk returns the channel on which the bulks are delivered.
func (b *Bulker) Bulk() <-chan []interface{} {
return b.ch
}
func (b *Bulker) run(ch <-chan interface{}, count int) {
defer close(b.ch)
bufCh := make(chan interface{}, count)
g, ctx := errgroup.WithContext(b.ctx)
g.Go(func() error {
defer close(bufCh)
for {
select {
case v, ok := <-ch:
if !ok {
return nil
}
bufCh <- v
case <-ctx.Done():
return ctx.Err()
}
}
})
g.Go(func() error {
for done := false; !done; {
buf := make([]interface{}, 0, count)
timeout := time.After(256 * time.Millisecond)
for drain := true; drain && len(buf) < count; {
select {
case v, ok := <-bufCh:
if !ok {
drain = false
done = true
break
}
buf = append(buf, v)
case <-timeout:
drain = false
case <-ctx.Done():
return ctx.Err()
}
}
if len(buf) > 0 {
b.ch <- buf
}
}
return nil
})
// We don't expect an error here.
// We only use errgroup for the encapsulated use of sync.WaitGroup.
_ = g.Wait()
}
func Bulk(ctx context.Context, ch <-chan interface{}, count int) <-chan []interface{} {
return NewBulker(ctx, ch, count).Bulk()
}

92
pkg/com/com.go Normal file
View file

@ -0,0 +1,92 @@
package com
import (
"context"
"github.com/icinga/icingadb/pkg/contracts"
"golang.org/x/sync/errgroup"
)
// WaitAsync calls Wait() on the passed Waiter in a new goroutine and
// sends the first non-nil error (if any) to the returned channel.
// The returned channel is always closed when the Waiter is done.
func WaitAsync(w contracts.Waiter) <-chan error {
errs := make(chan error, 1)
go func() {
defer close(errs)
if e := w.Wait(); e != nil {
errs <- e
}
}()
return errs
}
// ErrgroupReceive adds a goroutine to the specified group that
// returns the first non-nil error (if any) from the specified channel.
// If the channel is closed, it will return nil.
func ErrgroupReceive(g *errgroup.Group, err <-chan error) {
g.Go(func() error {
if e := <-err; e != nil {
return e
}
return nil
})
}
// PipeError forwards the first non-nil error from in to out
// using a separate goroutine.
func PipeError(in <-chan error, out chan<- error) {
go func() {
if e := <-in; e != nil {
out <- e
}
}()
}
// CopyFirst asynchronously forwards all items from input to forward and synchronously returns the first item.
func CopyFirst(
ctx context.Context, input <-chan contracts.Entity,
) (first contracts.Entity, forward <-chan contracts.Entity, err error) {
var ok bool
select {
case <-ctx.Done():
return nil, nil, ctx.Err()
case first, ok = <-input:
}
if !ok {
return
}
// Buffer of one because we receive an entity and send it back immediately.
fwd := make(chan contracts.Entity, 1)
fwd <- first
forward = fwd
go func() {
defer close(fwd)
for {
select {
case <-ctx.Done():
return
case e, ok := <-input:
if !ok {
return
}
select {
case <-ctx.Done():
return
case fwd <- e:
}
}
}
}()
return
}

82
pkg/com/cond.go Normal file
View file

@ -0,0 +1,82 @@
package com
import (
"context"
"errors"
)
var CondClosed = errors.New("condition closed")
// Cond implements a condition variable, a rendezvous point
// for goroutines waiting for or announcing the occurrence
// of an event.
type Cond struct {
ctx context.Context
cancel context.CancelFunc
broadcast chan struct{}
listeners chan chan struct{}
}
func NewCond(ctx context.Context) *Cond {
done, cancel := context.WithCancel(ctx)
c := &Cond{
broadcast: make(chan struct{}),
cancel: cancel,
ctx: done,
listeners: make(chan chan struct{}),
}
go c.controller()
return c
}
// controller loop.
func (c *Cond) controller() {
notify := make(chan struct{})
for {
select {
case <-c.broadcast:
// all current receivers get a closed channel
close(notify)
// set up next batch of receivers.
notify = make(chan struct{})
case c.listeners <- notify:
// great. A Receiver has our channel
case <-c.ctx.Done():
return
}
}
}
// Close implements the io.Closer interface.
func (c *Cond) Close() error {
c.cancel()
return nil
}
// Wait returns a channel on which the next (close) signal will be sent.
func (c *Cond) Wait() <-chan struct{} {
select {
case l := <-c.listeners:
return l
case <-c.ctx.Done():
panic(CondClosed)
}
}
// Broadcast wakes all current listeners.
func (c *Cond) Broadcast() {
select {
case c.broadcast <- struct{}{}:
case <-c.ctx.Done():
panic(CondClosed)
}
}
func (c *Cond) Done() <-chan struct{} {
return c.ctx.Done()
}

25
pkg/com/counter.go Normal file
View file

@ -0,0 +1,25 @@
package com
import "sync/atomic"
// Atomic counter.
type Counter uint64
// Add adds the given delta to the counter.
func (c *Counter) Add(delta uint64) {
atomic.AddUint64(c.ptr(), delta)
}
// Inc increments the counter by one.
func (c *Counter) Inc() {
c.Add(1)
}
// Val returns the counter value.
func (c Counter) Val() uint64 {
return atomic.LoadUint64(c.ptr())
}
func (c *Counter) ptr() *uint64 {
return (*uint64)(c)
}

96
pkg/com/entity_bulker.go Normal file
View file

@ -0,0 +1,96 @@
package com
import (
"context"
"github.com/icinga/icingadb/pkg/contracts"
"golang.org/x/sync/errgroup"
"sync"
"time"
)
type EntityBulker struct {
ch chan []contracts.Entity
ctx context.Context
mu sync.Mutex
err error
}
func NewEntityBulker(ctx context.Context, ch <-chan contracts.Entity, count int) *EntityBulker {
b := &EntityBulker{
ch: make(chan []contracts.Entity),
ctx: ctx,
mu: sync.Mutex{},
}
go b.run(ch, count)
return b
}
// Bulk returns the channel on which the bulks are delivered.
func (b *EntityBulker) Bulk() <-chan []contracts.Entity {
return b.ch
}
func (b *EntityBulker) run(ch <-chan contracts.Entity, count int) {
defer close(b.ch)
bufCh := make(chan contracts.Entity, count)
g, ctx := errgroup.WithContext(b.ctx)
g.Go(func() error {
defer close(bufCh)
for {
select {
case v, ok := <-ch:
if !ok {
return nil
}
bufCh <- v
case <-ctx.Done():
return ctx.Err()
}
}
})
g.Go(func() error {
for done := false; !done; {
buf := make([]contracts.Entity, 0, count)
timeout := time.After(256 * time.Millisecond)
for drain := true; drain && len(buf) < count; {
select {
case v, ok := <-bufCh:
if !ok {
drain = false
done = true
break
}
buf = append(buf, v)
case <-timeout:
drain = false
case <-ctx.Done():
return ctx.Err()
}
}
if len(buf) > 0 {
b.ch <- buf
}
}
return nil
})
// We don't expect an error here.
// We only use errgroup for the encapsulated use of sync.WaitGroup.
_ = g.Wait()
}
func BulkEntities(ctx context.Context, ch <-chan contracts.Entity, count int) <-chan []contracts.Entity {
return NewEntityBulker(ctx, ch, count).Bulk()
}

View file

@ -0,0 +1,39 @@
package common
import (
"github.com/icinga/icingadb/pkg/contracts"
)
// SyncSubject defines information about entities to be synchronized.
type SyncSubject struct {
entity contracts.Entity
factory contracts.EntityFactoryFunc
withChecksum bool
}
// NewSyncSubject returns a new SyncSubject.
func NewSyncSubject(factoryFunc contracts.EntityFactoryFunc) *SyncSubject {
e := factoryFunc()
_, withChecksum := e.(contracts.Checksumer)
return &SyncSubject{
entity: e,
factory: factoryFunc,
withChecksum: withChecksum,
}
}
// Entity returns one value from the factory. Always returns the same entity.
func (s SyncSubject) Entity() contracts.Entity {
return s.entity
}
// Factory returns the entity factory function.
func (s SyncSubject) Factory() contracts.EntityFactoryFunc {
return s.factory
}
// WithChecksum returns whether entities from the factory implement contracts.Checksumer.
func (s SyncSubject) WithChecksum() bool {
return s.withChecksum
}

71
pkg/config/config.go Normal file
View file

@ -0,0 +1,71 @@
package config
import (
"fmt"
"github.com/jessevdk/go-flags"
"gopkg.in/yaml.v3"
"os"
)
// Config defines Icinga DB config.
type Config struct {
Database *Database `yaml:"database"`
Redis *Redis `yaml:"redis"`
}
// Flags defines CLI flags.
type Flags struct {
// Config is the path to the config file
Config string `short:"c" long:"config" description:"path to config file" required:"true" default:"./config.yml"`
// Datadir is the location of the data directory
Datadir string `long:"datadir" description:"path to the data directory" required:"true" default:"./"`
}
// FromYAMLFile returns a new Config value created from the given YAML config file.
func FromYAMLFile(name string) (*Config, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
c := &Config{}
d := yaml.NewDecoder(f)
if err := d.Decode(&c); err != nil {
return nil, err
}
return c, nil
}
// ValidateFile checks whether the given file name is a readable file.
func ValidateFile(name string) error {
f, err := os.Stat(name)
if err != nil {
return err
}
if f.IsDir() {
return fmt.Errorf("'%s' is a directory", name)
}
return nil
}
// ParseFlags parses CLI flags and
// returns a Flags value created from them.
func ParseFlags() (*Flags, error) {
f := &Flags{}
parser := flags.NewParser(f, flags.Default)
if _, err := parser.Parse(); err != nil {
return nil, err
}
if err := ValidateFile(f.Config); err != nil {
return nil, err
}
return f, nil
}

70
pkg/config/database.go Normal file
View file

@ -0,0 +1,70 @@
package config
import (
"errors"
"fmt"
"github.com/creasty/defaults"
"github.com/icinga/icingadb/pkg/driver"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/utils"
"github.com/jmoiron/sqlx"
"github.com/jmoiron/sqlx/reflectx"
"go.uber.org/zap"
"sync"
)
var registerDriverOnce sync.Once
// Database defines database client configuration.
type Database struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
Database string `yaml:"database"`
User string `yaml:"user"`
Password string `yaml:"password"`
icingadb.Options `yaml:",inline"`
}
// Open prepares the DSN string and driver configuration,
// calls sqlx.Open, but returns *icingadb.DB.
func (d *Database) Open(logger *zap.SugaredLogger) (*icingadb.DB, error) {
registerDriverOnce.Do(func() {
driver.Register(logger)
})
dsn := fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s?timeout=60s",
d.User, d.Password, d.Host, d.Port, d.Database)
db, err := sqlx.Open("icingadb-mysql", dsn)
if err != nil {
return nil, err
}
db.SetMaxIdleConns(d.MaxConnections / 3)
db.SetMaxOpenConns(d.MaxConnections)
db.Mapper = reflectx.NewMapperFunc("db", func(s string) string {
return utils.Key(s, '_')
})
return icingadb.NewDb(db, logger, &d.Options), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (d *Database) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := defaults.Set(d); err != nil {
return err
}
// Prevent recursion.
type self Database
if err := unmarshal((*self)(d)); err != nil {
return err
}
if d.MaxConnectionsPerTable < 1 {
return errors.New("max_connections_per_table must be at least 1")
}
return nil
}

98
pkg/config/redis.go Normal file
View file

@ -0,0 +1,98 @@
package config
import (
"context"
"errors"
"github.com/creasty/defaults"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/retry"
"go.uber.org/zap"
"net"
"os"
"sync"
"syscall"
"time"
)
// Redis defines Redis client configuration.
type Redis struct {
Address string `yaml:"address"`
Password string `yaml:"password"`
icingaredis.Options `yaml:",inline"`
}
// NewClient prepares Redis client configuration,
// calls redis.NewClient, but returns *icingaredis.Client.
func (r *Redis) NewClient(logger *zap.SugaredLogger) (*icingaredis.Client, error) {
c := redis.NewClient(&redis.Options{
Addr: r.Address,
Dialer: dialWithLogging(logger),
Password: r.Password,
DB: 0, // Use default DB,
ReadTimeout: r.Timeout,
})
opts := c.Options()
opts.MaxRetries = opts.PoolSize + 1 // https://github.com/go-redis/redis/issues/1737
c = redis.NewClient(opts)
return icingaredis.NewClient(c, logger, &r.Options), nil
}
// dialWithLogging returns a Redis Dialer with logging capabilities.
func dialWithLogging(logger *zap.SugaredLogger) func(context.Context, string, string) (net.Conn, error) {
// dial behaves like net.Dialer#DialContext, but re-tries on syscall.ECONNREFUSED.
return func(ctx context.Context, network, addr string) (conn net.Conn, err error) {
var dl net.Dialer
var logFirstError sync.Once
err = retry.WithBackoff(
ctx,
func(ctx context.Context) (err error) {
conn, err = dl.DialContext(ctx, network, addr)
logFirstError.Do(func() {
if err != nil {
logger.Warnw("Can't connect to Redis. Retrying", zap.Error(err))
}
})
return
},
func(err error) bool {
if op, ok := err.(*net.OpError); ok {
sys, ok := op.Err.(*os.SyscallError)
return ok && sys.Err == syscall.ECONNREFUSED
}
return false
},
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
5*time.Minute,
)
return
}
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (d *Redis) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := defaults.Set(d); err != nil {
return err
}
// Prevent recursion.
type self Redis
if err := unmarshal((*self)(d)); err != nil {
return err
}
if d.MaxHMGetConnections < 1 {
return errors.New("max_hmget_connections must be at least 1")
}
if d.HMGetCount < 1 {
return errors.New("hmget_count must be at least 1")
}
if d.HScanCount < 1 {
return errors.New("hscan_count must be at least 1")
}
return nil
}

View file

@ -0,0 +1,85 @@
package contracts
// Entity is implemented by every type Icinga DB should synchronize.
type Entity interface {
Fingerprinter
IDer
}
// Fingerprinter is implemented by every entity that uniquely identifies itself.
type Fingerprinter interface {
// Fingerprint returns the value that uniquely identifies the entity.
Fingerprint() Fingerprinter
}
// ID is a unique identifier of an entity.
type ID interface {
// String returns the string representation form of the ID.
// The String method is used to use the ID in functions
// where it needs to be compared or hashed.
String() string
}
// IDer is implemented by every entity that uniquely identifies itself.
type IDer interface {
ID() ID // ID returns the ID.
SetID(ID) // SetID sets the ID.
}
// Equaler is implemented by every type that is comparable.
type Equaler interface {
Equal(Equaler) bool // Equal checks for equality.
}
// Checksum is a unique identifier of an entity.
type Checksum interface {
Equaler
// String returns the string representation form of the Checksum.
// The String method is used to use the Checksum in functions
// where it needs to be compared or hashed.
String() string
}
// Checksumer is implemented by every entity with a checksum.
type Checksumer interface {
Checksum() Checksum // Checksum returns the Checksum.
SetChecksum(Checksum) // SetChecksum sets the Checksum.
}
// EntityFactoryFunc knows how to create an Entity.
type EntityFactoryFunc func() Entity
// WithInit calls Init() on the created Entity if applicable.
func (f EntityFactoryFunc) WithInit() Entity {
e := f()
if initer, ok := e.(Initer); ok {
initer.Init()
}
return e
}
// Waiter implements the Wait method,
// which blocks until execution is complete.
type Waiter interface {
Wait() error // Wait waits for execution to complete.
}
// Initer implements the Init method,
// which initializes the object in addition to zeroing.
type Initer interface {
Init() // Init initializes the object.
}
// Upserter implements the Upsert method,
// which returns a part of the object for ON DUPLICATE KEY UPDATE.
type Upserter interface {
Upsert() interface{} // Upsert partitions the object.
}
// TableNamer implements the TableName method,
// which returns the table of the object.
type TableNamer interface {
TableName() string // TableName tells the table.
}

83
pkg/driver/driver.go Normal file
View file

@ -0,0 +1,83 @@
package driver
import (
"context"
"database/sql"
"database/sql/driver"
"github.com/go-sql-driver/mysql"
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/retry"
"go.uber.org/zap"
"io/ioutil"
"log"
"net"
"os"
"sync"
"syscall"
"time"
)
var timeout = time.Minute * 5
// TODO(el): Support DriverContext.
type Driver struct {
Driver driver.Driver
Logger *zap.SugaredLogger
}
// TODO(el): Test DNS.
func (d Driver) Open(dsn string) (c driver.Conn, err error) {
var logFirstError sync.Once
err = retry.WithBackoff(
context.Background(),
func(context.Context) (err error) {
c, err = d.Driver.Open(dsn)
logFirstError.Do(func() {
if err != nil {
d.Logger.Warnw("Can't connect to database. Retrying", zap.Error(err))
}
})
return
},
shouldRetry,
backoff.NewExponentialWithJitter(time.Millisecond*128, time.Minute*1),
timeout,
)
return
}
func shouldRetry(err error) bool {
underlying := err
if op, ok := err.(*net.OpError); ok {
underlying = op.Err
}
if sys, ok := underlying.(*os.SyscallError); ok {
underlying = sys.Err
}
switch underlying {
case driver.ErrBadConn, syscall.ECONNREFUSED:
return true
}
type temporary interface {
Temporary() bool
}
if t, ok := err.(temporary); ok {
return t.Temporary()
}
type timeout interface {
Timeout() bool
}
if t, ok := err.(timeout); ok {
return t.Timeout()
}
return false
}
func Register(logger *zap.SugaredLogger) {
sql.Register("icingadb-mysql", &Driver{Driver: &mysql.MySQLDriver{}, Logger: logger})
// TODO(el): Don't discard but hide?
_ = mysql.SetLogger(log.New(ioutil.Discard, "", 0))
}

30
pkg/flatten/flatten.go Normal file
View file

@ -0,0 +1,30 @@
package flatten
import (
"strconv"
)
// Flatten creates flat, one-dimensional maps from arbitrarily nested values, e.g. JSON.
func Flatten(value interface{}, prefix string) map[string]interface{} {
var flatten func(string, interface{})
flattened := make(map[string]interface{})
flatten = func(key string, value interface{}) {
switch value := value.(type) {
case map[string]interface{}:
for k, v := range value {
flatten(key+"."+k, v)
}
case []interface{}:
for i, v := range value {
flatten(key+"["+strconv.Itoa(i)+"]", v)
}
default:
flattened[key] = value
}
}
flatten(prefix, value)
return flattened
}

448
pkg/icingadb/db.go Normal file
View file

@ -0,0 +1,448 @@
package icingadb
import (
"context"
"fmt"
"github.com/go-sql-driver/mysql"
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/retry"
"github.com/icinga/icingadb/pkg/utils"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"reflect"
"strings"
"sync"
"time"
)
// DB is a wrapper around sqlx.DB with bulk execution,
// statement building, streaming and logging capabilities.
type DB struct {
*sqlx.DB
logger *zap.SugaredLogger
options *Options
tableSemaphores map[string]*semaphore.Weighted
tableSemaphoresMu sync.Mutex
}
type Options struct {
MaxConnections int `yaml:"max_connections" default:"16"`
MaxConnectionsPerTable int `yaml:"max_connections_per_table" default:"8"`
}
// NewDb returns a new icingadb.DB wrapper for a pre-existing *sqlx.DB.
func NewDb(db *sqlx.DB, logger *zap.SugaredLogger, options *Options) *DB {
return &DB{
DB: db,
logger: logger,
options: options,
tableSemaphores: make(map[string]*semaphore.Weighted),
}
}
func (db *DB) BuildColumns(subject interface{}) []string {
fields := db.Mapper.TypeMap(reflect.TypeOf(subject)).Names
columns := make([]string, 0, len(fields))
for _, f := range fields {
if f.Field.Tag == "" {
continue
}
columns = append(columns, f.Name)
}
return columns
}
func (db *DB) BuildDeleteStmt(from interface{}) string {
return fmt.Sprintf(
`DELETE FROM %s WHERE id IN (?)`,
utils.TableName(from),
)
}
func (db *DB) BuildInsertStmt(into interface{}) string {
columns := db.BuildColumns(into)
return fmt.Sprintf(
`INSERT INTO %s (%s) VALUES (%s)`,
utils.TableName(into),
strings.Join(columns, ", "),
fmt.Sprintf(":%s", strings.Join(columns, ", :")),
)
}
func (db *DB) BuildSelectStmt(from interface{}, into interface{}) string {
return fmt.Sprintf(
`SELECT %s FROM %s`,
strings.Join(db.BuildColumns(into), ", "),
utils.TableName(from),
)
}
func (db *DB) BuildUpdateStmt(update interface{}) string {
columns := db.BuildColumns(update)
set := make([]string, 0, len(columns))
for _, col := range columns {
set = append(set, fmt.Sprintf("%s = :%s", col, col))
}
return fmt.Sprintf(
`UPDATE %s SET %s WHERE id = :id`,
utils.TableName(update),
strings.Join(set, ", "),
)
}
func (db *DB) BuildUpsertStmt(subject interface{}) (stmt string, placeholders int) {
insertColumns := db.BuildColumns(subject)
var updateColumns []string
if upserter, ok := subject.(contracts.Upserter); ok {
updateColumns = db.BuildColumns(upserter.Upsert())
} else {
updateColumns = insertColumns
}
set := make([]string, 0, len(updateColumns))
for _, col := range updateColumns {
set = append(set, fmt.Sprintf("%s = :%s", col, col))
}
return fmt.Sprintf(
`INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s`,
utils.TableName(subject),
strings.Join(insertColumns, ","),
fmt.Sprintf(":%s", strings.Join(insertColumns, ",:")),
strings.Join(set, ","),
), len(insertColumns) + len(updateColumns)
}
func (db *DB) BulkExec(ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan interface{}) error {
var cnt com.Counter
g, ctx := errgroup.WithContext(ctx)
// Use context from group.
bulk := com.Bulk(ctx, arg, count)
db.logger.Debugf("Executing %s", query)
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
db.logger.Debugf("Executed %s with %d rows in %s", query, cnt.Val(), elapsed)
})
g.Go(func() error {
g, ctx := errgroup.WithContext(ctx)
for b := range bulk {
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
g.Go(func(b []interface{}) func() error {
return func() error {
defer sem.Release(1)
return retry.WithBackoff(
ctx,
func(context.Context) error {
query, args, err := sqlx.In(query, b)
if err != nil {
return err
}
query = db.Rebind(query)
_, err = db.ExecContext(ctx, query, args...)
if err != nil {
return err
}
cnt.Add(uint64(len(b)))
return nil
},
IsRetryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
0,
)
}
}(b))
}
return g.Wait()
})
return g.Wait()
}
func (db *DB) NamedBulkExec(
ctx context.Context, query string, count int, sem *semaphore.Weighted,
arg <-chan contracts.Entity, succeeded chan<- contracts.Entity,
) error {
var cnt com.Counter
g, ctx := errgroup.WithContext(ctx)
bulk := com.BulkEntities(ctx, arg, count)
db.logger.Debugf("Executing %s", query)
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
db.logger.Debugf("Executed %s with %d rows in %s", query, cnt.Val(), elapsed)
})
g.Go(func() error {
// stmt, err := db.PrepareNamedContext(ctx, query)
// if err != nil {
// return err
// }
for {
select {
case b, ok := <-bulk:
if !ok {
return nil
}
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
g.Go(func(b []contracts.Entity) func() error {
return func() error {
defer sem.Release(1)
return retry.WithBackoff(
ctx,
func(ctx context.Context) error {
db.logger.Debugf("Executing %s with %d rows..", query, len(b))
_, err := db.NamedExecContext(ctx, query, b)
if err != nil {
fmt.Println(err)
return err
}
cnt.Add(uint64(len(b)))
if succeeded != nil {
for _, row := range b {
select {
case <-ctx.Done():
return ctx.Err()
case succeeded <- row:
}
}
}
return nil
},
IsRetryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
0,
)
}
}(b))
case <-ctx.Done():
return ctx.Err()
}
}
})
return g.Wait()
}
func (db *DB) NamedBulkExecTx(
ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan contracts.Entity,
) error {
var cnt com.Counter
g, ctx := errgroup.WithContext(ctx)
bulk := com.BulkEntities(ctx, arg, count)
db.logger.Debugf("Executing %s", query)
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
db.logger.Debugf("Executed %s with %d rows in %s", query, cnt.Val(), elapsed)
})
g.Go(func() error {
for {
select {
case b, ok := <-bulk:
if !ok {
return nil
}
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
g.Go(func(b []contracts.Entity) func() error {
return func() error {
defer sem.Release(1)
return retry.WithBackoff(
ctx,
func(ctx context.Context) error {
tx, err := db.BeginTxx(ctx, nil)
if err != nil {
return errors.Wrap(err, "can't start transaction")
}
stmt, err := tx.PrepareNamedContext(ctx, query)
if err != nil {
return errors.Wrap(err, "can't prepare named statement with context in transaction")
}
for _, arg := range b {
if _, err := stmt.ExecContext(ctx, arg); err != nil {
return errors.Wrap(err, "can't execute statement in transaction")
}
}
if err := tx.Commit(); err != nil {
return errors.Wrap(err, "can't commit transaction")
}
cnt.Add(uint64(len(b)))
return nil
},
IsRetryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
0,
)
}
}(b))
case <-ctx.Done():
return ctx.Err()
}
}
})
return g.Wait()
}
func (db *DB) YieldAll(ctx context.Context, factoryFunc contracts.EntityFactoryFunc, query string, args ...interface{}) (<-chan contracts.Entity, <-chan error) {
var cnt com.Counter
entities := make(chan contracts.Entity, 1)
g, ctx := errgroup.WithContext(ctx)
db.logger.Infof("Syncing %s", query)
g.Go(func() error {
defer close(entities)
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
v := factoryFunc()
db.logger.Infof("Fetched %d elements of %s in %s", cnt.Val(), utils.Name(v), elapsed)
})
rows, err := db.Queryx(query, args...)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
e := factoryFunc()
if err := rows.StructScan(e); err != nil {
return err
}
select {
case entities <- e:
cnt.Inc()
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
return entities, com.WaitAsync(g)
}
func (db *DB) CreateStreamed(ctx context.Context, entities <-chan contracts.Entity) error {
first, forward, err := com.CopyFirst(ctx, entities)
if first == nil {
return err
}
sem := db.getSemaphoreForTable(utils.TableName(first))
return db.NamedBulkExec(ctx, db.BuildInsertStmt(first), 1<<15/len(db.BuildColumns(first)), sem, forward, nil)
}
func (db *DB) UpsertStreamed(ctx context.Context, entities <-chan contracts.Entity, succeeded chan<- contracts.Entity) error {
first, forward, err := com.CopyFirst(ctx, entities)
if first == nil {
return err
}
// TODO(ak): wait for https://github.com/jmoiron/sqlx/issues/694
//stmt, placeholders := db.BuildUpsertStmt(first)
//return db.NamedBulkExec(ctx, stmt, 1<<15/placeholders, 1<<3, forward, succeeded)
stmt, _ := db.BuildUpsertStmt(first)
sem := db.getSemaphoreForTable(utils.TableName(first))
return db.NamedBulkExec(ctx, stmt, 1, sem, forward, succeeded)
}
func (db *DB) UpdateStreamed(ctx context.Context, entities <-chan contracts.Entity) error {
first, forward, err := com.CopyFirst(ctx, entities)
if first == nil {
return err
}
sem := db.getSemaphoreForTable(utils.TableName(first))
return db.NamedBulkExecTx(ctx, db.BuildUpdateStmt(first), 1<<15, sem, forward)
}
func (db *DB) DeleteStreamed(ctx context.Context, entityType contracts.Entity, ids <-chan interface{}) error {
sem := db.getSemaphoreForTable(utils.TableName(entityType))
return db.BulkExec(ctx, db.BuildDeleteStmt(entityType), 1<<15, sem, ids)
}
func (db *DB) Delete(ctx context.Context, entityType contracts.Entity, ids []interface{}) error {
idsCh := make(chan interface{}, len(ids))
for _, id := range ids {
idsCh <- id
}
close(idsCh)
return db.DeleteStreamed(ctx, entityType, idsCh)
}
func IsRetryable(err error) bool {
err = errors.Cause(err)
if err == mysql.ErrInvalidConn {
return true
}
switch e := err.(type) {
case *mysql.MySQLError:
switch e.Number {
case 1053, 1205, 1213, 2006:
// 1053: Server shutdown in progress
// 1205:
// 1213:
// 2006: MySQL server has gone away
return true
}
}
return false
}
func (db *DB) getSemaphoreForTable(table string) *semaphore.Weighted {
db.tableSemaphoresMu.Lock()
defer db.tableSemaphoresMu.Unlock()
if sem, ok := db.tableSemaphores[table]; ok {
return sem
} else {
sem = semaphore.NewWeighted(int64(db.options.MaxConnectionsPerTable))
db.tableSemaphores[table] = sem
return sem
}
}

138
pkg/icingadb/delta.go Normal file
View file

@ -0,0 +1,138 @@
package icingadb
import (
"context"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"sync"
"time"
)
type Delta struct {
Create EntitiesById
Update EntitiesById
Delete EntitiesById
Subject *common.SyncSubject
done chan error
err error
logger *zap.SugaredLogger
}
func NewDelta(ctx context.Context, actual, desired <-chan contracts.Entity, subject *common.SyncSubject, logger *zap.SugaredLogger) *Delta {
delta := &Delta{
Subject: subject,
done: make(chan error, 1),
logger: logger,
}
go delta.start(ctx, actual, desired)
return delta
}
func (delta *Delta) Wait() error {
return <-delta.done
}
func (delta *Delta) start(ctx context.Context, actualCh, desiredCh <-chan contracts.Entity) {
defer close(delta.done)
var update EntitiesById
if delta.Subject.WithChecksum() {
update = EntitiesById{}
}
actual := EntitiesById{}
desired := EntitiesById{}
var mtx, updateMtx sync.Mutex
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
var cnt com.Counter
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
delta.logger.Debugf(
"Synced %d actual elements of type %s in %s", cnt.Val(), utils.Name(delta.Subject.Entity()), elapsed)
})
for {
select {
case a, ok := <-actualCh:
if !ok {
return nil
}
id := a.ID().String()
mtx.Lock()
if d, ok := desired[id]; ok {
delete(desired, id)
mtx.Unlock()
if delta.Subject.WithChecksum() && !a.(contracts.Checksumer).Checksum().Equal(d.(contracts.Checksumer).Checksum()) {
updateMtx.Lock()
update[id] = d
updateMtx.Unlock()
}
} else {
actual[id] = a
mtx.Unlock()
}
cnt.Inc()
case <-ctx.Done():
return ctx.Err()
}
}
})
g.Go(func() error {
var cnt com.Counter
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
delta.logger.Debugf(
"Synced %d desired elements of type %s in %s", cnt.Val(), utils.Name(delta.Subject.Entity()), elapsed)
})
for {
select {
case d, ok := <-desiredCh:
if !ok {
return nil
}
id := d.ID().String()
mtx.Lock()
if a, ok := actual[id]; ok {
delete(actual, id)
mtx.Unlock()
if delta.Subject.WithChecksum() && !a.(contracts.Checksumer).Checksum().Equal(d.(contracts.Checksumer).Checksum()) {
updateMtx.Lock()
update[id] = d
updateMtx.Unlock()
}
} else {
desired[id] = d
mtx.Unlock()
}
cnt.Inc()
case <-ctx.Done():
return ctx.Err()
}
}
})
if err := g.Wait(); err != nil {
delta.done <- err
return
}
delta.Create = desired
delta.Delete = actual
if delta.Subject.WithChecksum() {
delta.Update = update
}
}

View file

@ -0,0 +1,134 @@
package icingadb
import (
"context"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/icingaredis"
"go.uber.org/zap"
"sync"
)
type DumpSignals struct {
redis *icingaredis.Client
logger *zap.SugaredLogger
mutex sync.Mutex
doneCh map[string]chan struct{}
allDoneCh chan struct{}
inProgressCh chan struct{}
}
func NewDumpSignals(redis *icingaredis.Client, logger *zap.SugaredLogger) *DumpSignals {
return &DumpSignals{
redis: redis,
logger: logger,
doneCh: make(map[string]chan struct{}),
inProgressCh: make(chan struct{}),
}
}
// Listen starts listening for dump signals in the icinga:dump Redis stream. When a done signal is received, this is
// signaled via the channels returned from the Done function.
//
// If a wip signal is received after a done signal was passed on via the Done function, this is signaled via the
// InProgress function and this function returns with err == nil. In this case, all other signals are invalidated.
// It is up to the caller to pass on this information, for example by cancelling derived contexts.
//
// This function may only be called once for each DumpSignals object. To listen for a new iteration of dump signals, a new
// DumpSignals instance must be created.
func (s *DumpSignals) Listen(ctx context.Context) error {
// Closing a channel twice results in a panic. This function takes a chan struct{} and closes it unless it is
// already closed. In this case it just does nothing. This function assumes that the channel is never written to
// and that there are no concurrent attempts to close the channel.
safeClose := func(ch chan struct{}) {
select {
case <-ch:
// Reading from a closed channel returns immediately, therefore don't close it again.
default:
close(ch)
}
}
lastStreamId := "0-0"
anyDoneSent := false
for {
if err := ctx.Err(); err != nil {
return err
}
cmd := s.redis.XRead(ctx, &redis.XReadArgs{
Streams: []string{"icinga:dump", lastStreamId},
Block: 0, // block indefinitely
})
result, err := cmd.Result()
if err != nil {
return icingaredis.WrapCmdErr(cmd)
}
for _, entry := range result[0].Messages {
lastStreamId = entry.ID
key := entry.Values["key"].(string)
done := entry.Values["state"].(string) == "done"
s.logger.Debugw("Received dump signal from Redis", zap.String("key", key), zap.Bool("done", done))
if done {
if key == "*" {
if s.allDoneCh == nil {
s.mutex.Lock()
// Set s.allDoneCh to signal for all future listeners that we've received an all-done signal.
s.allDoneCh = make(chan struct{})
close(s.allDoneCh)
// Notify all existing listeners.
for _, ch := range s.doneCh {
safeClose(ch)
}
s.mutex.Unlock()
}
} else {
s.mutex.Lock()
if ch, ok := s.doneCh[key]; ok {
safeClose(ch)
}
s.mutex.Unlock()
}
anyDoneSent = true
} else if anyDoneSent {
// Received a wip signal after handing out any done signal via one of the channels returned by Done,
// signal that a new dump is in progress. This treats every state=wip as if it has key=*, which is the
// only key for which state=wip is currently sent by Icinga 2.
close(s.inProgressCh)
return nil
}
}
}
}
// Done returns a channel that is closed when the given key receives a done dump signal.
func (s *DumpSignals) Done(key string) <-chan struct{} {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.allDoneCh != nil {
// An all done-signal was already received, don't care about individual key anymore.
return s.allDoneCh
} else if ch, ok := s.doneCh[key]; ok {
// Return existing wait channel for this key.
return ch
} else {
// First request for this key, create new wait channel.
ch = make(chan struct{})
s.doneCh[key] = ch
return ch
}
}
// InProgress returns a channel that is closed when a new dump is in progress after done signals were sent to channels
// returned by Done.
func (s *DumpSignals) InProgress() <-chan struct{} {
return s.inProgressCh
}

View file

@ -0,0 +1,44 @@
package icingadb
import (
"context"
"github.com/icinga/icingadb/pkg/contracts"
)
type EntitiesById map[string]contracts.Entity
func (ebi EntitiesById) Keys() []string {
keys := make([]string, 0, len(ebi))
for k := range ebi {
keys = append(keys, k)
}
return keys
}
func (ebi EntitiesById) IDs() []interface{} {
ids := make([]interface{}, 0, len(ebi))
for _, v := range ebi {
ids = append(ids, v.(contracts.IDer).ID())
}
return ids
}
func (ebi EntitiesById) Entities(ctx context.Context) <-chan contracts.Entity {
entities := make(chan contracts.Entity, 0)
go func() {
defer close(entities)
for _, v := range ebi {
select {
case <-ctx.Done():
return
case entities <- v:
}
}
}()
return entities
}

289
pkg/icingadb/ha.go Normal file
View file

@ -0,0 +1,289 @@
package icingadb
import (
"context"
"database/sql"
"encoding/hex"
"github.com/google/uuid"
"github.com/icinga/icingadb/pkg/backoff"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/icingaredis"
icingaredisv1 "github.com/icinga/icingadb/pkg/icingaredis/v1"
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"sync"
"time"
)
var timeout = 60 * time.Second
type HA struct {
ctx context.Context
cancel context.CancelFunc
instanceId types.Binary
db *DB
heartbeat *icingaredis.Heartbeat
logger *zap.SugaredLogger
responsible bool
handover chan struct{}
takeover chan struct{}
done chan struct{}
mu *sync.Mutex
err error
errOnce sync.Once
}
func NewHA(ctx context.Context, db *DB, heartbeat *icingaredis.Heartbeat, logger *zap.SugaredLogger) *HA {
ctx, cancel := context.WithCancel(ctx)
instanceId := uuid.New()
ha := &HA{
ctx: ctx,
cancel: cancel,
instanceId: instanceId[:],
db: db,
heartbeat: heartbeat,
logger: logger,
handover: make(chan struct{}),
takeover: make(chan struct{}),
done: make(chan struct{}),
mu: &sync.Mutex{},
}
go ha.controller()
return ha
}
// Close implements the io.Closer interface.
func (h *HA) Close() error {
// Cancel ctx.
h.cancel()
// Wait until the controller loop ended.
<-h.Done()
// Remove our instance from the database.
h.removeInstance()
// And return an error, if any.
return h.Err()
}
func (h *HA) Done() <-chan struct{} {
return h.done
}
func (h *HA) Err() error {
h.mu.Lock()
defer h.mu.Unlock()
return h.err
}
func (h *HA) Handover() chan struct{} {
return h.handover
}
func (h *HA) Takeover() chan struct{} {
return h.takeover
}
func (h *HA) abort(err error) {
h.errOnce.Do(func() {
h.mu.Lock()
h.err = err
h.mu.Unlock()
h.cancel()
})
}
// controller loop.
func (h *HA) controller() {
defer close(h.done)
h.logger.Debugw("Starting HA", zap.String("instance_id", hex.EncodeToString(h.instanceId)))
oldInstancesRemoved := false
logTicker := time.NewTicker(time.Second * 60)
defer logTicker.Stop()
shouldLog := true
for {
select {
case m, ok := <-h.heartbeat.Beat():
if !ok {
// Beat channel closed.
return
}
now := time.Now()
t, err := m.Time()
if err != nil {
h.abort(err)
}
tt := t.Time()
if tt.After(now.Add(1 * time.Second)) {
h.logger.Debugw("Received heartbeat from future", "time", t)
}
if tt.Before(now.Add(-1 * timeout)) {
h.logger.Errorw("Received heartbeat from the past", "time", t)
h.signalHandover()
continue
}
s, err := m.IcingaStatus()
if err != nil {
h.abort(err)
}
select {
case <-logTicker.C:
shouldLog = true
default:
}
if err = h.realize(s, t, shouldLog); err != nil {
h.abort(err)
}
if !oldInstancesRemoved {
go h.removeOldInstances(s)
oldInstancesRemoved = true
}
shouldLog = false
case <-h.heartbeat.Lost():
h.logger.Error("Lost heartbeat")
h.signalHandover()
case <-h.ctx.Done():
return
}
}
}
func (h *HA) realize(s *icingaredisv1.IcingaStatus, t *types.UnixMilli, shouldLog bool) error {
boff := backoff.NewExponentialWithJitter(time.Millisecond*256, time.Second*3)
for attempt := 0; true; attempt++ {
sleep := boff(uint64(attempt))
time.Sleep(sleep)
ctx, cancel := context.WithCancel(h.ctx)
tx, err := h.db.BeginTxx(ctx, &sql.TxOptions{
Isolation: sql.LevelSerializable,
})
if err != nil {
return err
}
rows, err := tx.QueryxContext(ctx, `SELECT id, heartbeat FROM icingadb_instance WHERE environment_id = ? AND responsible = ? AND id != ? AND heartbeat > ?`, s.EnvironmentID(), "y", h.instanceId, utils.UnixMilli(time.Now().Add(-1*timeout)))
if err != nil {
return err
}
takeover := true
for rows.Next() {
instance := &v1.IcingadbInstance{}
err := rows.StructScan(instance)
if err != nil {
h.logger.Errorw("Can't scan currently active instance", zap.Error(err))
break
}
if shouldLog {
h.logger.Infow("Another instance is active", "instance_id", instance.Id, zap.String("environment", s.Environment), "heartbeat", instance.Heartbeat, zap.Duration("heartbeat_age", time.Now().Sub(instance.Heartbeat.Time())))
}
takeover = false
break
}
_ = rows.Close()
i := v1.IcingadbInstance{
EntityWithoutChecksum: v1.EntityWithoutChecksum{
IdMeta: v1.IdMeta{
Id: h.instanceId,
},
},
EnvironmentMeta: v1.EnvironmentMeta{
EnvironmentId: s.EnvironmentID(),
},
Heartbeat: *t,
Responsible: types.Bool{Bool: takeover || h.responsible, Valid: true},
EndpointId: s.EndpointId,
Icinga2Version: s.Version,
Icinga2StartTime: s.ProgramStart,
Icinga2NotificationsEnabled: s.NotificationsEnabled,
Icinga2ActiveServiceChecksEnabled: s.ActiveServiceChecksEnabled,
Icinga2ActiveHostChecksEnabled: s.ActiveHostChecksEnabled,
Icinga2EventHandlersEnabled: s.EventHandlersEnabled,
Icinga2FlapDetectionEnabled: s.FlapDetectionEnabled,
Icinga2PerformanceDataEnabled: s.PerformanceDataEnabled,
}
stmt, _ := h.db.BuildUpsertStmt(i)
_, err = tx.NamedExecContext(ctx, stmt, i)
if err != nil {
cancel()
if !utils.IsDeadlock(err) {
h.logger.Errorw("Can't update or insert instance", zap.Error(err))
break
} else {
if attempt > 2 {
// Log with info level after third attempt
h.logger.Infow("Can't update or insert instance. Retrying", zap.Error(err), zap.Int("retry count", attempt))
} else {
h.logger.Debugw("Can't update or insert instance. Retrying", zap.Error(err), zap.Int("retry count", attempt))
}
continue
}
}
if err := tx.Commit(); err != nil {
return err
}
if takeover {
h.signalTakeover()
}
break
}
return nil
}
func (h *HA) removeInstance() {
h.logger.Debugw("Removing our row from icingadb_instance", zap.String("instance_id", hex.EncodeToString(h.instanceId)))
// Intentionally not using a context here as this is a cleanup task and h.ctx is already cancelled.
_, err := h.db.Exec("DELETE FROM icingadb_instance WHERE id = ?", h.instanceId)
if err != nil {
h.logger.Warnw("Could not remove instance from database", zap.Error(err))
}
}
func (h *HA) removeOldInstances(s *icingaredisv1.IcingaStatus) {
select {
case <-h.ctx.Done():
return
case <-time.After(timeout):
result, err := h.db.ExecContext(h.ctx, "DELETE FROM icingadb_instance "+
"WHERE id != ? AND environment_id = ? AND endpoint_id = ? AND heartbeat < ?",
h.instanceId, s.EnvironmentID(), s.EndpointId, types.UnixMilli(time.Now().Add(-timeout)))
if err != nil {
h.logger.Errorw("Can't remove rows of old instances", zap.Error(err))
return
}
affected, err := result.RowsAffected()
if err != nil {
h.logger.Errorw("Can't get number of removed old instances", zap.Error(err))
return
}
h.logger.Debugf("Removed %d old instances", affected)
}
}
func (h *HA) signalHandover() {
if h.responsible {
h.responsible = false
h.handover <- struct{}{}
}
}
func (h *HA) signalTakeover() {
if !h.responsible {
h.responsible = true
h.takeover <- struct{}{}
}
}

View file

@ -0,0 +1,304 @@
package history
import (
"context"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingadb"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1/history"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/structify"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"reflect"
"sync"
"time"
)
// Sync specifies the source and destination of a history sync.
type Sync struct {
db *icingadb.DB
redis *icingaredis.Client
logger *zap.SugaredLogger
}
// NewSync creates a new Sync.
func NewSync(db *icingadb.DB, redis *icingaredis.Client, logger *zap.SugaredLogger) *Sync {
return &Sync{
db: db,
redis: redis,
logger: logger,
}
}
// insertedMessage represents a just inserted row.
type insertedMessage struct {
// redisId specifies the origin Redis message.
redisId string
// structType represents the table the row was inserted into.
structType reflect.Type
}
const bulkSize = 1 << 14
// Sync synchronizes Redis history streams from s.redis to s.db and deletes the original data on success.
func (s Sync) Sync(ctx context.Context) error {
g, ctx := errgroup.WithContext(ctx)
for _, hs := range historyStreams {
var redis2structs []chan<- redis.XMessage
insertedMessages := make(chan insertedMessage, bulkSize)
// messageProgress are the tables (represented by struct types)
// with successfully inserted rows by Redis message ID.
messageProgress := map[string]map[reflect.Type]struct{}{}
messageProgressMtx := &sync.Mutex{}
stream := "icinga:history:stream:" + hs.kind
s.logger.Infof("Syncing %s history", hs.kind)
for _, structifier := range hs.structifiers {
redis2struct := make(chan redis.XMessage, bulkSize)
struct2db := make(chan contracts.Entity, bulkSize)
succeeded := make(chan contracts.Entity, bulkSize)
// rowIds are IDs of to be synced Redis messages by database row.
rowIds := map[contracts.Entity]string{}
rowIdsMtx := &sync.Mutex{}
redis2structs = append(redis2structs, redis2struct)
g.Go(structifyStream(ctx, structifier, redis2struct, struct2db, rowIds, rowIdsMtx))
g.Go(fwdSucceeded(ctx, insertedMessages, succeeded, rowIds, rowIdsMtx))
// Upserts from struct2db.
g.Go(func() error {
defer close(succeeded)
return s.db.UpsertStreamed(ctx, struct2db, succeeded)
})
}
g.Go(s.xRead(ctx, redis2structs, stream))
g.Go(s.cleanup(ctx, hs, insertedMessages, messageProgress, messageProgressMtx, stream))
}
return g.Wait()
}
// xRead reads from the Redis stream and broadcasts the data to redis2structs.
func (s Sync) xRead(ctx context.Context, redis2structs []chan<- redis.XMessage, stream string) func() error {
return func() error {
defer func() {
for _, r2s := range redis2structs {
close(r2s)
}
}()
xra := &redis.XReadArgs{
Streams: []string{stream, "0-0"},
Count: bulkSize,
Block: 10 * time.Second,
}
for {
cmd := s.redis.XRead(ctx, xra)
streams, err := cmd.Result()
if err != nil && err != redis.Nil {
return icingaredis.WrapCmdErr(cmd)
}
for _, stream := range streams {
for _, message := range stream.Messages {
xra.Streams[1] = message.ID
for _, r2s := range redis2structs {
select {
case <-ctx.Done():
return ctx.Err()
case r2s <- message:
}
}
}
}
}
}
}
// structifyStream structifies from redis2struct to struct2db.
func structifyStream(
ctx context.Context, structifier structify.MapStructifier, redis2struct <-chan redis.XMessage,
struct2db chan<- contracts.Entity, rowIds map[contracts.Entity]string, rowIdsMtx *sync.Mutex,
) func() error {
return func() error {
defer close(struct2db)
for {
select {
case <-ctx.Done():
return ctx.Err()
case message, ok := <-redis2struct:
if !ok {
return nil
}
ptr, err := structifier(message.Values)
if err != nil {
return err
}
ue := ptr.(v1.UpserterEntity)
rowIdsMtx.Lock()
rowIds[ue] = message.ID
rowIdsMtx.Unlock()
select {
case <-ctx.Done():
return ctx.Err()
case struct2db <- ue:
}
}
}
}
}
// fwdSucceeded informs insertedMessages about successfully inserted rows according to succeeded.
func fwdSucceeded(
ctx context.Context, insertedMessages chan<- insertedMessage, succeeded <-chan contracts.Entity,
rowIds map[contracts.Entity]string, rowIdsMtx *sync.Mutex,
) func() error {
return func() error {
for {
select {
case <-ctx.Done():
return ctx.Err()
case row, ok := <-succeeded:
if !ok {
return nil
}
rowIdsMtx.Lock()
id, ok := rowIds[row]
if ok {
delete(rowIds, row)
}
rowIdsMtx.Unlock()
if ok {
select {
case <-ctx.Done():
return ctx.Err()
case insertedMessages <- insertedMessage{id, reflect.TypeOf(row).Elem()}:
}
}
}
}
}
}
// cleanup collects completely inserted messages from insertedMessages and deletes them from Redis.
func (s Sync) cleanup(
ctx context.Context, hs historyStream, insertedMessages <-chan insertedMessage,
messageProgress map[string]map[reflect.Type]struct{}, messageProgressMtx *sync.Mutex, stream string,
) func() error {
return func() error {
var ids []string
var count uint64
var timeout <-chan time.Time
const period = 20 * time.Second
periodically := time.NewTicker(period)
defer periodically.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-periodically.C:
if count > 0 {
s.logger.Infof("Inserted %d %s history entries in the last %s", count, hs.kind, period)
count = 0
}
case msg := <-insertedMessages:
messageProgressMtx.Lock()
mp, ok := messageProgress[msg.redisId]
if !ok {
mp = map[reflect.Type]struct{}{}
messageProgress[msg.redisId] = mp
}
mp[msg.structType] = struct{}{}
if ok = len(mp) == len(hs.structifiers); ok {
delete(messageProgress, msg.redisId)
}
messageProgressMtx.Unlock()
if ok {
ids = append(ids, msg.redisId)
count++
switch len(ids) {
case 1:
timeout = time.After(time.Second / 4)
case bulkSize:
cmd := s.redis.XDel(ctx, stream, ids...)
if _, err := cmd.Result(); err != nil {
return icingaredis.WrapCmdErr(cmd)
}
ids = nil
timeout = nil
}
}
case <-timeout:
cmd := s.redis.XDel(ctx, stream, ids...)
if _, err := cmd.Result(); err != nil {
return icingaredis.WrapCmdErr(cmd)
}
ids = nil
timeout = nil
}
}
}
}
// historyStream represents a Redis history stream.
type historyStream struct {
// kind specifies the stream's purpose.
kind string
// structifiers lists the factories of the model structs the stream data shall be copied to.
structifiers []structify.MapStructifier
}
// historyStreams contains all Redis history streams to sync.
var historyStreams = func() []historyStream {
var streams []historyStream
for _, rhs := range []struct {
kind string
structPtrs []v1.UpserterEntity
}{
{"notification", []v1.UpserterEntity{(*v1.NotificationHistory)(nil), (*v1.HistoryNotification)(nil)}},
{"usernotification", []v1.UpserterEntity{(*v1.UserNotificationHistory)(nil)}},
{"state", []v1.UpserterEntity{(*v1.StateHistory)(nil), (*v1.HistoryState)(nil)}},
{"downtime", []v1.UpserterEntity{(*v1.DowntimeHistory)(nil), (*v1.HistoryDowntime)(nil)}},
{"comment", []v1.UpserterEntity{(*v1.CommentHistory)(nil), (*v1.HistoryComment)(nil)}},
{"flapping", []v1.UpserterEntity{(*v1.FlappingHistory)(nil), (*v1.HistoryFlapping)(nil)}},
{"acknowledgement", []v1.UpserterEntity{(*v1.AcknowledgementHistory)(nil), (*v1.HistoryAck)(nil)}},
} {
var structifiers []structify.MapStructifier
for _, structPtr := range rhs.structPtrs {
structifiers = append(structifiers, structify.MakeMapStructifier(reflect.TypeOf(structPtr).Elem(), "json"))
}
streams = append(streams, historyStream{rhs.kind, structifiers})
}
return streams
}()

View file

@ -0,0 +1,213 @@
package objectpacker
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"reflect"
"sort"
)
// MustPackAny calls PackAny using in and panics if there was an error.
func MustPackAny(in ...interface{}) []byte {
var buf bytes.Buffer
if err := PackAny(in, &buf); err != nil {
panic(err)
}
return buf.Bytes()
}
// PackAny packs any JSON-encodable value (ex. structs, also ignores interfaces like encoding.TextMarshaler)
// to a BSON-similar format suitable for consistent hashing. Spec:
//
// PackAny(nil) => 0x0
// PackAny(false) => 0x1
// PackAny(true) => 0x2
// PackAny(float64(42)) => 0x3 ieee754_binary64_bigendian(42)
// PackAny("exämple") => 0x4 uint64_bigendian(len([]byte("exämple"))) []byte("exämple")
// PackAny([]uint8{0x42}) => 0x4 uint64_bigendian(len([]uint8{0x42})) []uint8{0x42}
// PackAny([1]uint8{0x42}) => 0x4 uint64_bigendian(len([1]uint8{0x42})) [1]uint8{0x42}
// PackAny([]T{x,y}) => 0x5 uint64_bigendian(len([]T{x,y})) PackAny(x) PackAny(y)
// PackAny(map[K]V{x:y}) => 0x6 uint64_bigendian(len(map[K]V{x:y})) len(map_key(x)) map_key(x) PackAny(y)
// PackAny((*T)(nil)) => 0x0
// PackAny((*T)(0x42)) => PackAny(*(*T)(0x42))
// PackAny(x) => panic()
//
// map_key([1]uint8{0x42}) => [1]uint8{0x42}
// map_key(x) => []byte(fmt.Sprint(x))
func PackAny(in interface{}, out io.Writer) error {
return packValue(reflect.ValueOf(in), out)
}
var tByte = reflect.TypeOf(byte(0))
var tBytes = reflect.TypeOf([]uint8(nil))
// packValue does the actual job of packAny and just exists for recursion w/o unneccessary reflect.ValueOf calls.
func packValue(in reflect.Value, out io.Writer) error {
switch kind := in.Kind(); kind {
case reflect.Invalid: // nil
_, err := out.Write([]byte{0})
return err
case reflect.Bool:
if in.Bool() {
_, err := out.Write([]byte{2})
return err
} else {
_, err := out.Write([]byte{1})
return err
}
case reflect.Float64:
if _, err := out.Write([]byte{3}); err != nil {
return err
}
return binary.Write(out, binary.BigEndian, in.Float())
case reflect.Array, reflect.Slice:
if typ := in.Type(); typ.Elem() == tByte {
if kind == reflect.Array {
if !in.CanAddr() {
vNewElem := reflect.New(typ).Elem()
vNewElem.Set(in)
in = vNewElem
}
in = in.Slice(0, in.Len())
}
// Pack []byte as string, not array of numbers.
return packString(in.Convert(tBytes). // Support types.Binary
Interface().([]uint8), out)
}
if _, err := out.Write([]byte{5}); err != nil {
return err
}
l := in.Len()
if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil {
return err
}
for i := 0; i < l; i++ {
if err := packValue(in.Index(i), out); err != nil {
return err
}
}
// If there aren't any values to pack, ...
if l < 1 {
// ... create one and pack it - panics on disallowed type.
_ = packValue(reflect.Zero(in.Type().Elem()), ioutil.Discard)
}
return nil
case reflect.Interface:
return packValue(in.Elem(), out)
case reflect.Map:
type kv struct {
key []byte
value reflect.Value
}
if _, err := out.Write([]byte{6}); err != nil {
return err
}
l := in.Len()
if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil {
return err
}
sorted := make([]kv, 0, l)
{
iter := in.MapRange()
for iter.Next() {
var packedKey []byte
if key := iter.Key(); key.Kind() == reflect.Array {
if typ := key.Type(); typ.Elem() == tByte {
if !key.CanAddr() {
vNewElem := reflect.New(typ).Elem()
vNewElem.Set(key)
key = vNewElem
}
packedKey = key.Slice(0, key.Len()).Interface().([]byte)
} else {
// Not just stringify the key (below), but also pack it (here) - panics on disallowed type.
_ = packValue(iter.Key(), ioutil.Discard)
packedKey = []byte(fmt.Sprint(key.Interface()))
}
} else {
// Not just stringify the key (below), but also pack it (here) - panics on disallowed type.
_ = packValue(iter.Key(), ioutil.Discard)
packedKey = []byte(fmt.Sprint(key.Interface()))
}
sorted = append(sorted, kv{packedKey, iter.Value()})
}
}
sort.Slice(sorted, func(i, j int) bool { return bytes.Compare(sorted[i].key, sorted[j].key) < 0 })
for _, kv := range sorted {
if err := binary.Write(out, binary.BigEndian, uint64(len(kv.key))); err != nil {
return err
}
if _, err := out.Write(kv.key); err != nil {
return err
}
if err := packValue(kv.value, out); err != nil {
return err
}
}
// If there aren't any key-value pairs to pack, ...
if l < 1 {
typ := in.Type()
// ... create one and pack it - panics on disallowed type.
_ = packValue(reflect.Zero(typ.Key()), ioutil.Discard)
_ = packValue(reflect.Zero(typ.Elem()), ioutil.Discard)
}
return nil
case reflect.Ptr:
if in.IsNil() {
err := packValue(reflect.Value{}, out)
// Create a fictive referenced value and pack it - panics on disallowed type.
_ = packValue(reflect.Zero(in.Type().Elem()), ioutil.Discard)
return err
} else {
return packValue(in.Elem(), out)
}
case reflect.String:
return packString([]byte(in.String()), out)
default:
panic("bad type: " + in.Kind().String())
}
}
// packString deduplicates string packing of multiple locations in packValue.
func packString(in []byte, out io.Writer) error {
if _, err := out.Write([]byte{4}); err != nil {
return err
}
if err := binary.Write(out, binary.BigEndian, uint64(len(in))); err != nil {
return err
}
_, err := out.Write(in)
return err
}

View file

@ -0,0 +1,195 @@
package objectpacker
import (
"bytes"
"github.com/icinga/icingadb/pkg/types"
"io"
"testing"
"unsafe"
)
// limitedWriter allows writing a specific amount of data.
type limitedWriter struct {
// limit specifies how many bytes to allow to write.
limit int
}
var _ io.Writer = (*limitedWriter)(nil)
// Write returns io.EOF once lw.limit is exceeded, nil otherwise.
func (lw *limitedWriter) Write(p []byte) (n int, err error) {
if len(p) <= lw.limit {
lw.limit -= len(p)
return len(p), nil
}
n = lw.limit
err = io.EOF
lw.limit = 0
return
}
func TestLimitedWriter_Write(t *testing.T) {
assertLimitedWriter_Write(t, 3, []byte{1, 2}, 2, nil, 1)
assertLimitedWriter_Write(t, 3, []byte{1, 2, 3}, 3, nil, 0)
assertLimitedWriter_Write(t, 3, []byte{1, 2, 3, 4}, 3, io.EOF, 0)
assertLimitedWriter_Write(t, 0, []byte{1}, 0, io.EOF, 0)
assertLimitedWriter_Write(t, 0, nil, 0, nil, 0)
}
func assertLimitedWriter_Write(t *testing.T, limitBefore int, p []byte, n int, err error, limitAfter int) {
t.Helper()
lw := limitedWriter{limitBefore}
actualN, actualErr := lw.Write(p)
if actualErr != err {
t.Errorf("_, err := (&limitedWriter{%d}).Write(%#v); err != %#v", limitBefore, p, err)
}
if actualN != n {
t.Errorf("n, _ := (&limitedWriter{%d}).Write(%#v); n != %d", limitBefore, p, n)
}
if lw.limit != limitAfter {
t.Errorf("lw := limitedWriter{%d}; lw.Write(%#v); lw.limit != %d", limitBefore, p, limitAfter)
}
}
func TestPackAny(t *testing.T) {
assertPackAny(t, nil, []byte{0})
assertPackAny(t, false, []byte{1})
assertPackAny(t, true, []byte{2})
assertPackAnyPanic(t, -42, 0)
assertPackAnyPanic(t, int8(-42), 0)
assertPackAnyPanic(t, int16(-42), 0)
assertPackAnyPanic(t, int32(-42), 0)
assertPackAnyPanic(t, int64(-42), 0)
assertPackAnyPanic(t, uint(42), 0)
assertPackAnyPanic(t, uint8(42), 0)
assertPackAnyPanic(t, uint16(42), 0)
assertPackAnyPanic(t, uint32(42), 0)
assertPackAnyPanic(t, uint64(42), 0)
assertPackAnyPanic(t, uintptr(42), 0)
assertPackAnyPanic(t, float32(-42.5), 0)
assertPackAny(t, -42.5, []byte{3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0})
assertPackAnyPanic(t, []struct{}(nil), 9)
assertPackAnyPanic(t, []struct{}{}, 9)
assertPackAny(t, []interface{}{nil, true, -42.5}, []byte{
5, 0, 0, 0, 0, 0, 0, 0, 3,
0,
2,
3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0,
})
assertPackAny(t, []string{"", "a"}, []byte{
5, 0, 0, 0, 0, 0, 0, 0, 2,
4, 0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0, 1, 'a',
})
assertPackAnyPanic(t, []interface{}{0 + 0i}, 9)
assertPackAnyPanic(t, map[struct{}]struct{}(nil), 9)
assertPackAnyPanic(t, map[struct{}]struct{}{}, 9)
assertPackAny(t, map[interface{}]interface{}{true: "", "nil": -42.5}, []byte{
6, 0, 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3, 'n', 'i', 'l',
3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 4, 't', 'r', 'u', 'e',
4, 0, 0, 0, 0, 0, 0, 0, 0,
})
assertPackAny(t, map[string]float64{"": 42}, []byte{
6, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0x40, 0x45, 0, 0, 0, 0, 0, 0,
})
assertPackAny(t, map[[1]byte]bool{[1]byte{42}: true}, []byte{
6, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 1, 42,
2,
})
assertPackAnyPanic(t, map[struct{}]struct{}{{}: {}}, 9)
assertPackAny(t, (*string)(nil), []byte{0})
assertPackAnyPanic(t, (*int)(nil), 0)
assertPackAny(t, new(float64), []byte{3, 0, 0, 0, 0, 0, 0, 0, 0})
assertPackAny(t, "", []byte{4, 0, 0, 0, 0, 0, 0, 0, 0})
assertPackAny(t, "a", []byte{4, 0, 0, 0, 0, 0, 0, 0, 1, 'a'})
assertPackAny(t, "ä", []byte{4, 0, 0, 0, 0, 0, 0, 0, 2, 0xc3, 0xa4})
{
var binary [256]byte
for i := range binary {
binary[i] = byte(i)
}
assertPackAny(t, binary, append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...))
assertPackAny(t, binary[:], append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...))
assertPackAny(t, types.Binary(binary[:]), append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...))
}
{
type myByte byte
assertPackAnyPanic(t, []myByte(nil), 9)
}
assertPackAnyPanic(t, complex64(0+0i), 0)
assertPackAnyPanic(t, 0+0i, 0)
assertPackAnyPanic(t, make(chan struct{}, 0), 0)
assertPackAnyPanic(t, func() {}, 0)
assertPackAnyPanic(t, struct{}{}, 0)
assertPackAnyPanic(t, unsafe.Pointer(uintptr(0)), 0)
}
func assertPackAny(t *testing.T, in interface{}, out []byte) {
t.Helper()
{
buf := &bytes.Buffer{}
if err := PackAny(in, buf); err == nil {
if bytes.Compare(buf.Bytes(), out) != 0 {
t.Errorf("buf := &bytes.Buffer{}; packAny(%#v, buf); bytes.Compare(buf.Bytes(), %#v) != 0", in, out)
}
} else {
t.Errorf("packAny(%#v, &bytes.Buffer{}) != nil", in)
}
}
for i := 0; i < len(out); i++ {
if PackAny(in, &limitedWriter{i}) != io.EOF {
t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i)
}
}
}
func assertPackAnyPanic(t *testing.T, in interface{}, allowToWrite int) {
t.Helper()
for i := 0; i < allowToWrite; i++ {
if PackAny(in, &limitedWriter{i}) != io.EOF {
t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i)
}
}
defer func() {
t.Helper()
if r := recover(); r == nil {
t.Errorf("packAny(%#v, &limitedWriter{%d}) didn't panic", in, allowToWrite)
}
}()
_ = PackAny(in, &limitedWriter{allowToWrite})
}

View file

@ -0,0 +1,282 @@
package overdue
import (
"context"
"fmt"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/icingadb/v1/overdue"
"github.com/icinga/icingadb/pkg/icingaredis"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"strconv"
"sync/atomic"
"time"
)
// Sync specifies the source and destination of an overdue sync.
type Sync struct {
db *icingadb.DB
redis *icingaredis.Client
logger *zap.SugaredLogger
}
// NewSync creates a new Sync.
func NewSync(db *icingadb.DB, redis *icingaredis.Client, logger *zap.SugaredLogger) *Sync {
return &Sync{
db: db,
redis: redis,
logger: logger,
}
}
// factory abstracts overdue.NewHostState and overdue.NewServiceState.
type factory = func(id string, overdue bool) (contracts.Entity, error)
// Sync synchronizes Redis overdue sets from s.redis to s.db.
func (s Sync) Sync(ctx context.Context) error {
{
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
return s.initSync(ctx, "host")
})
g.Go(func() error {
return s.initSync(ctx, "service")
})
if err := g.Wait(); err != nil {
return err
}
}
g, ctx := errgroup.WithContext(ctx)
hostCounter := new(uint64)
serviceCounter := new(uint64)
g.Go(func() error {
return s.sync(ctx, "host", overdue.NewHostState, hostCounter)
})
g.Go(func() error {
return s.log(ctx, "host", hostCounter)
})
g.Go(func() error {
return s.sync(ctx, "service", overdue.NewServiceState, serviceCounter)
})
g.Go(func() error {
return s.log(ctx, "service", serviceCounter)
})
return g.Wait()
}
// initSync initializes icingadb:overdue:objectType from the database.
func (s Sync) initSync(ctx context.Context, objectType string) error {
s.logger.Infof("Refreshing already synced %s overdue indicators", objectType)
start := time.Now()
var rows []v1.IdMeta
query := fmt.Sprintf("SELECT id FROM %s_state WHERE is_overdue='y'", objectType)
if err := s.db.SelectContext(ctx, &rows, query); err != nil {
return err
}
_, err := s.redis.Pipelined(ctx, func(pipe redis.Pipeliner) error {
key := "icingadb:overdue:" + objectType
pipe.Del(ctx, key)
var ids []interface{}
for _, row := range rows {
ids = append(ids, row.Id.String())
if len(ids) == 100 {
pipe.SAdd(ctx, key, ids...)
ids = nil
}
}
if len(ids) > 0 {
pipe.SAdd(ctx, key, ids...)
}
return nil
})
if err == nil {
s.logger.Infof(
"Refreshing %d already synced %s overdue indicators took %s",
len(rows), objectType, time.Since(start),
)
}
return err
}
// log periodically logs sync's workload.
func (s Sync) log(ctx context.Context, objectType string, counter *uint64) error {
const period = 20 * time.Second
periodically := time.NewTicker(period)
defer periodically.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-periodically.C:
if count := atomic.SwapUint64(counter, 0); count > 0 {
s.logger.Infof("Synced %d %s overdue indicators in the last %s", count, objectType, period)
}
}
}
}
// luaGetOverdues takes the following KEYS:
// * either icinga:nextupdate:host or icinga:nextupdate:service
// * either icingadb:overdue:host or icingadb:overdue:service
// * a random one
//
// It takes the following ARGV:
// * the current date and time as *nix timestamp float in seconds
//
// It returns the following:
// * overdue monitored objects not yet marked overdue
// * not overdue monitored objects not yet unmarked overdue
var luaGetOverdues = redis.NewScript(`
local icingaNextupdate = KEYS[1]
local icingadbOverdue = KEYS[2]
local tempOverdue = KEYS[3]
local now = ARGV[1]
redis.call('DEL', tempOverdue)
local zrbs = redis.call('ZRANGEBYSCORE', icingaNextupdate, '-inf', '(' .. now)
for i = 1, #zrbs do
redis.call('SADD', tempOverdue, zrbs[i])
end
zrbs = nil
local res = {redis.call('SDIFF', tempOverdue, icingadbOverdue), redis.call('SDIFF', icingadbOverdue, tempOverdue)}
redis.call('DEL', tempOverdue)
return res
`)
// sync synchronizes Redis overdue sets from s.redis to s.db for objectType.
func (s Sync) sync(ctx context.Context, objectType string, factory factory, counter *uint64) error {
s.logger.Infof("Syncing %s overdue indicators", objectType)
keys := [3]string{"icinga:nextupdate:" + objectType, "icingadb:overdue:" + objectType, ""}
if rand, err := uuid.NewRandom(); err == nil {
keys[2] = rand.String()
} else {
return err
}
const period = 2 * time.Second
periodically := time.NewTicker(period)
defer periodically.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-periodically.C:
overdues, err := luaGetOverdues.Run(
ctx, s.redis, keys[:], strconv.FormatInt(time.Now().Unix(), 10),
).Result()
if err != nil {
return err
}
root := overdues.([]interface{})
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
return s.updateOverdue(ctx, objectType, factory, counter, root[0].([]interface{}), true)
})
g.Go(func() error {
return s.updateOverdue(ctx, objectType, factory, counter, root[1].([]interface{}), false)
})
if err := g.Wait(); err != nil {
return err
}
// For the case that syncing has taken some time, delay the next sync.
periodically.Reset(period)
select {
case <-periodically.C: // Clean up periodically.C after reset...
default: // ... unless it's already clean.
}
}
}
}
// updateOverdue sets objectType_state#is_overdue for ids to overdue
// and updates icingadb:overdue:objectType respectively.
func (s Sync) updateOverdue(
ctx context.Context, objectType string, factory factory, counter *uint64, ids []interface{}, overdue bool,
) error {
if len(ids) < 1 {
return nil
}
if err := s.updateDb(ctx, factory, ids, overdue); err != nil {
return err
}
atomic.AddUint64(counter, uint64(len(ids)))
var op func(ctx context.Context, key string, members ...interface{}) *redis.IntCmd
if overdue {
op = s.redis.SAdd
} else {
op = s.redis.SRem
}
_, err := op(ctx, "icingadb:overdue:"+objectType, ids...).Result()
return err
}
// updateDb sets objectType_state#is_overdue for ids to overdue.
func (s Sync) updateDb(ctx context.Context, factory factory, ids []interface{}, overdue bool) error {
g, ctx := errgroup.WithContext(ctx)
ch := make(chan contracts.Entity, 1<<10)
g.Go(func() error {
defer close(ch)
for _, id := range ids {
e, err := factory(id.(string), overdue)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case ch <- e:
}
}
return nil
})
g.Go(func() error {
return s.db.UpdateStreamed(ctx, ch)
})
return g.Wait()
}

View file

@ -0,0 +1,173 @@
package icingadb
import (
"context"
"fmt"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/structify"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"reflect"
)
// RuntimeUpdates specifies the source and destination of runtime updates.
type RuntimeUpdates struct {
db *DB
redis *icingaredis.Client
logger *zap.SugaredLogger
}
// NewRuntimeUpdates creates a new RuntimeUpdates.
func NewRuntimeUpdates(db *DB, redis *icingaredis.Client, logger *zap.SugaredLogger) *RuntimeUpdates {
return &RuntimeUpdates{
db: db,
redis: redis,
logger: logger,
}
}
const bulkSize = 1 << 14
// Sync synchronizes Redis runtime streams from s.redis to s.db and deletes the original data on success.
// Note that Sync must be only be called configuration synchronization has been completed.
func (r *RuntimeUpdates) Sync(ctx context.Context, factoryFuncs []contracts.EntityFactoryFunc, lastStreamId string) error {
g, ctx := errgroup.WithContext(ctx)
stream := "icinga:runtime"
updateMessagesByKey := make(map[string]chan<- redis.XMessage)
for _, factoryFunc := range factoryFuncs {
factoryFunc = factoryFunc.WithInit
updateMessages := make(chan redis.XMessage, bulkSize)
upsertEntities := make(chan contracts.Entity, bulkSize)
deleteIds := make(chan interface{}, bulkSize)
v := factoryFunc()
name := utils.Name(v)
updateMessagesByKey[fmt.Sprintf("icinga:%s", utils.Key(name, ':'))] = updateMessages
r.logger.Debugf("Syncing runtime updates of %s", name)
g.Go(structifyStream(ctx, updateMessages, upsertEntities, deleteIds, structify.MakeMapStructifier(reflect.TypeOf(v).Elem(), "json")))
g.Go(func() error {
stmt, _ := r.db.BuildUpsertStmt(v)
// Updates must be executed in order, ensure this by using a semaphore with maximum 1.
sem := semaphore.NewWeighted(1)
// TODO(nh) Currently not possible to increase the count here: https://github.com/jmoiron/sqlx/issues/694
return r.db.NamedBulkExec(ctx, stmt, 1, sem, upsertEntities, nil)
})
g.Go(func() error {
return r.db.DeleteStreamed(ctx, v, deleteIds)
})
}
g.Go(r.xRead(ctx, updateMessagesByKey, stream, lastStreamId))
return g.Wait()
}
// xRead reads from the Redis stream and sends the data to the corresponding updateMessages channel.
// The updateMessages channel is determined by a "redis_key" on each redis message.
func (r *RuntimeUpdates) xRead(ctx context.Context, updateMessagesByKey map[string]chan<- redis.XMessage, stream string, lastStreamId string) func() error {
return func() error {
defer func() {
for _, updateMessages := range updateMessagesByKey {
close(updateMessages)
}
}()
for {
xra := &redis.XReadArgs{
Streams: []string{stream, lastStreamId},
Count: bulkSize,
Block: 0,
}
cmd := r.redis.XRead(ctx, xra)
streams, err := cmd.Result()
if err != nil {
return icingaredis.WrapCmdErr(cmd)
}
for _, stream := range streams {
for _, message := range stream.Messages {
lastStreamId = message.ID
redisKey := message.Values["redis_key"]
if redisKey == nil {
return fmt.Errorf("stream message missing 'redis_key' key: %v", message.Values)
}
updateMessages := updateMessagesByKey[redisKey.(string)]
if updateMessages == nil {
return fmt.Errorf("no object type for redis key %s found", redisKey)
}
select {
case updateMessages <- message:
case <-ctx.Done():
return ctx.Err()
}
}
}
}
}
}
// structifyStream gets Redis stream messages (redis.XMessage) via the updateMessages channel and converts
// those messages into Icinga DB entities (contracts.Entity) using the provided structifier.
// Converted entities are inserted into the upsertEntities or deleteIds channel depending on the "runtime_type" message field.
func structifyStream(ctx context.Context, updateMessages <-chan redis.XMessage, upsertEntities chan contracts.Entity, deleteIds chan interface{}, structifier structify.MapStructifier) func() error {
return func() error {
defer func() {
close(upsertEntities)
close(deleteIds)
}()
for {
select {
case message, ok := <-updateMessages:
if !ok {
return nil
}
ptr, err := structifier(message.Values)
if err != nil {
return err
}
entity := ptr.(contracts.Entity)
runtimeType := message.Values["runtime_type"]
if runtimeType == nil {
return fmt.Errorf("stream message missing 'runtime_type' key: %v", message.Values)
}
if runtimeType == "upsert" {
select {
case upsertEntities <- entity:
case <-ctx.Done():
return ctx.Err()
}
} else if runtimeType == "delete" {
select {
case deleteIds <- entity.ID():
case <-ctx.Done():
return ctx.Err()
}
} else {
return fmt.Errorf("invalid runtime type: %s", runtimeType)
}
case <-ctx.Done():
return ctx.Err()
}
}
}
}

158
pkg/icingadb/sync.go Normal file
View file

@ -0,0 +1,158 @@
package icingadb
import (
"context"
"fmt"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"runtime"
"time"
)
// Sync implements a rendezvous point for Icinga DB and Redis to synchronize their entities.
type Sync struct {
db *DB
redis *icingaredis.Client
logger *zap.SugaredLogger
}
func NewSync(db *DB, redis *icingaredis.Client, logger *zap.SugaredLogger) *Sync {
return &Sync{
db: db,
redis: redis,
logger: logger,
}
}
// SyncAfterDump waits for a config dump to finish (using the dump parameter) and then starts a sync for the given
// sync subject using the Sync function.
func (s Sync) SyncAfterDump(ctx context.Context, subject *common.SyncSubject, dump *DumpSignals) error {
typeName := utils.Name(subject.Entity())
key := "icinga:" + utils.Key(typeName, ':')
startTime := time.Now()
logTicker := time.NewTicker(20 * time.Second)
loggedWaiting := false
defer logTicker.Stop()
for {
select {
case <-logTicker.C:
s.logger.Infow("Waiting for dump done signal",
zap.String("type", typeName),
zap.String("key", key),
zap.Duration("duration", time.Now().Sub(startTime)))
loggedWaiting = true
case <-dump.Done(key):
logFn := s.logger.Debugw
if loggedWaiting {
logFn = s.logger.Infow
}
logFn("Starting sync",
zap.String("type", typeName),
zap.String("key", key),
zap.Duration("waited", time.Now().Sub(startTime)))
return s.Sync(ctx, subject)
case <-ctx.Done():
return ctx.Err()
}
}
}
// Sync synchronizes entities between Icinga DB and Redis created with the specified sync subject.
// This function does not respect dump signals. For this, use SyncAfterDump.
func (s Sync) Sync(ctx context.Context, subject *common.SyncSubject) error {
s.logger.Infof("Syncing %s", utils.Key(utils.Name(subject.Entity()), ' '))
g, ctx := errgroup.WithContext(ctx)
desired, redisErrs := s.redis.YieldAll(ctx, subject)
// Let errors from Redis cancel our group.
com.ErrgroupReceive(g, redisErrs)
actual, dbErrs := s.db.YieldAll(
ctx, subject.Factory(), s.db.BuildSelectStmt(subject.Entity(), subject.Entity().Fingerprint()))
// Let errors from DB cancel our group.
com.ErrgroupReceive(g, dbErrs)
g.Go(func() error {
return s.ApplyDelta(ctx, NewDelta(ctx, actual, desired, subject, s.logger))
})
return g.Wait()
}
// ApplyDelta applies all changes from Delta to the database.
func (s Sync) ApplyDelta(ctx context.Context, delta *Delta) error {
if err := delta.Wait(); err != nil {
return err
}
g, ctx := errgroup.WithContext(ctx)
// Create
if len(delta.Create) > 0 {
var entities <-chan contracts.Entity
if delta.Subject.WithChecksum() {
pairs, errs := s.redis.HMYield(
ctx,
fmt.Sprintf("icinga:%s", utils.Key(utils.Name(delta.Subject.Entity()), ':')),
delta.Create.Keys()...)
// Let errors from Redis cancel our group.
com.ErrgroupReceive(g, errs)
entitiesWithoutChecksum, errs := icingaredis.CreateEntities(ctx, delta.Subject.Factory(), pairs, runtime.NumCPU())
// Let errors from CreateEntities cancel our group.
com.ErrgroupReceive(g, errs)
entities, errs = icingaredis.SetChecksums(ctx, entitiesWithoutChecksum, delta.Create, runtime.NumCPU())
// Let errors from SetChecksums cancel our group.
com.ErrgroupReceive(g, errs)
} else {
entities = delta.Create.Entities(ctx)
}
g.Go(func() error {
return s.db.CreateStreamed(ctx, entities)
})
}
// Update
if len(delta.Update) > 0 {
s.logger.Infof("Updating %d rows of type %s", len(delta.Update), utils.Key(utils.Name(delta.Subject.Entity()), ' '))
pairs, errs := s.redis.HMYield(
ctx,
fmt.Sprintf("icinga:%s", utils.Key(utils.Name(delta.Subject.Entity()), ':')),
delta.Update.Keys()...)
// Let errors from Redis cancel our group.
com.ErrgroupReceive(g, errs)
entitiesWithoutChecksum, errs := icingaredis.CreateEntities(ctx, delta.Subject.Factory(), pairs, runtime.NumCPU())
// Let errors from CreateEntities cancel our group.
com.ErrgroupReceive(g, errs)
entities, errs := icingaredis.SetChecksums(ctx, entitiesWithoutChecksum, delta.Update, runtime.NumCPU())
// Let errors from SetChecksums cancel our group.
com.ErrgroupReceive(g, errs)
g.Go(func() error {
// TODO (el): This is very slow in high latency scenarios.
// Use strings.Repeat() on the query and create a stmt
// with a size near the default value of max_allowed_packet.
return s.db.UpdateStreamed(ctx, entities)
})
}
// Delete
if len(delta.Delete) > 0 {
s.logger.Infof("Deleting %d rows of type %s", len(delta.Delete), utils.Key(utils.Name(delta.Subject.Entity()), ' '))
g.Go(func() error {
return s.db.Delete(ctx, delta.Subject.Entity(), delta.Delete.IDs())
})
}
return g.Wait()
}

View file

@ -0,0 +1,46 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Checkable struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
ActionUrlId types.Binary `json:"action_url_id"`
ActiveChecksEnabled types.Bool `json:"active_checks_enabled"`
CheckInterval float64 `json:"check_interval"`
CheckTimeperiod string `json:"check_timeperiod"`
CheckTimeperiodId types.Binary `json:"check_timeperiod_id"`
CheckRetryInterval float64 `json:"check_retry_interval"`
CheckTimeout float64 `json:"check_timeout"`
Checkcommand string `json:"checkcommand"`
CheckcommandId types.Binary `json:"checkcommand_id"`
CommandEndpoint string `json:"command_endpoint"`
CommandEndpointId types.Binary `json:"command_endpoint_id"`
DisplayName string `json:"display_name"`
EventHandlerEnabled types.Bool `json:"event_handler_enabled"`
Eventcommand string `json:"eventcommand"`
EventcommandId types.Binary `json:"eventcommand_id"`
FlappingEnabled types.Bool `json:"flapping_enabled"`
FlappingThresholdHigh float64 `json:"flapping_threshold_high"`
FlappingThresholdLow float64 `json:"flapping_threshold_low"`
IconImageAlt string `json:"icon_image_alt"`
IconImageId types.Binary `json:"icon_image_id"`
IsVolatile types.Bool `json:"is_volatile"`
MaxCheckAttempts float64 `json:"max_check_attempts"`
Notes string `json:"notes"`
NotesUrlId types.Binary `json:"notes_url_id"`
NotificationsEnabled types.Bool `json:"notifications_enabled"`
PassiveChecksEnabled types.Bool `json:"passive_checks_enabled"`
PerfdataEnabled types.Bool `json:"perfdata_enabled"`
Zone string `json:"zone"`
ZoneId types.Binary `json:"zone_id"`
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Checkable)(nil)
)

166
pkg/icingadb/v1/command.go Normal file
View file

@ -0,0 +1,166 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Command struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
ZoneId types.Binary `json:"zone_id"`
Command string `json:"command"`
Timeout uint32 `json:"timeout"`
}
type CommandArgument struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
CommandId types.Binary `json:"command_id"`
ArgumentKey string `json:"argument_key"`
ArgumentValue types.String `json:"value"`
ArgumentOrder types.Int `json:"order"`
Description types.String `json:"description"`
ArgumentKeyOverride types.String `json:"key"`
RepeatKey types.Bool `json:"repeat_key"`
Required types.Bool `json:"required"`
SetIf types.String `json:"set_if"`
SkipKey types.Bool `json:"skip_key"`
}
// Init implements the contracts.Initer interface.
func (ca *CommandArgument) Init() {
ca.RepeatKey = types.Bool{
Bool: true,
Valid: true,
}
ca.Required = types.Bool{
Bool: false,
Valid: true,
}
ca.SkipKey = types.Bool{
Bool: false,
Valid: true,
}
}
type CommandEnvvar struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
CommandId types.Binary `json:"command_id"`
EnvvarKey string `json:"envvar_key"`
EnvvarValue string `json:"value"`
}
type CommandCustomvar struct {
CustomvarMeta `json:",inline"`
CommandId types.Binary `json:"command_id"`
}
type Checkcommand struct {
Command `json:",inline"`
}
type CheckcommandArgument struct {
CommandArgument `json:",inline"`
}
type CheckcommandEnvvar struct {
CommandEnvvar `json:",inline"`
}
type CheckcommandCustomvar struct {
CommandCustomvar `json:",inline"`
}
type Eventcommand struct {
Command `json:",inline"`
}
type EventcommandArgument struct {
CommandArgument `json:",inline"`
}
type EventcommandEnvvar struct {
CommandEnvvar `json:",inline"`
}
type EventcommandCustomvar struct {
CommandCustomvar `json:",inline"`
}
type Notificationcommand struct {
Command `json:",inline"`
}
type NotificationcommandArgument struct {
CommandArgument `json:",inline"`
}
type NotificationcommandEnvvar struct {
CommandEnvvar `json:",inline"`
}
type NotificationcommandCustomvar struct {
CommandCustomvar `json:",inline"`
}
func NewCheckcommand() contracts.Entity {
return &Checkcommand{}
}
func NewCheckcommandArgument() contracts.Entity {
return &CheckcommandArgument{}
}
func NewCheckcommandEnvvar() contracts.Entity {
return &CheckcommandEnvvar{}
}
func NewCheckcommandCustomvar() contracts.Entity {
return &CheckcommandCustomvar{}
}
func NewEventcommand() contracts.Entity {
return &Eventcommand{}
}
func NewEventcommandArgument() contracts.Entity {
return &EventcommandArgument{}
}
func NewEventcommandEnvvar() contracts.Entity {
return &EventcommandEnvvar{}
}
func NewEventcommandCustomvar() contracts.Entity {
return &EventcommandCustomvar{}
}
func NewNotificationcommand() contracts.Entity {
return &Notificationcommand{}
}
func NewNotificationcommandArgument() contracts.Entity {
return &NotificationcommandArgument{}
}
func NewNotificationcommandEnvvar() contracts.Entity {
return &NotificationcommandEnvvar{}
}
func NewNotificationcommandCustomvar() contracts.Entity {
return &NotificationcommandCustomvar{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Command)(nil)
_ contracts.Initer = (*CommandArgument)(nil)
_ contracts.Initer = (*Checkcommand)(nil)
_ contracts.Initer = (*Eventcommand)(nil)
_ contracts.Initer = (*Notificationcommand)(nil)
)

View file

@ -0,0 +1,27 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Comment struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameMeta `json:",inline"`
ObjectType string `json:"object_type"`
HostId types.Binary `json:"host_id"`
ServiceId types.Binary `json:"service_id"`
Author string `json:"author"`
Text string `json:"text"`
EntryType types.CommentType `json:"entry_type"`
EntryTime types.UnixMilli `json:"entry_time"`
IsPersistent types.Bool `json:"is_persistent"`
IsSticky types.Bool `json:"is_sticky"`
ExpireTime types.UnixMilli `json:"expire_time"`
ZoneId types.Binary `json:"zone_id"`
}
func NewComment() contracts.Entity {
return &Comment{}
}

View file

@ -0,0 +1,101 @@
package v1
import (
"context"
"encoding/json"
"fmt"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/flatten"
"github.com/icinga/icingadb/pkg/icingadb/objectpacker"
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"golang.org/x/sync/errgroup"
"runtime"
)
type Customvar struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameMeta `json:",inline"`
Value string `json:"value"`
}
type CustomvarFlat struct {
CustomvarMeta `json:",inline"`
Flatname string `json:"flatname"`
FlatnameChecksum types.Binary `json:"flatname_checksum"`
Flatvalue string `json:"flatvalue"`
}
func NewCustomvar() contracts.Entity {
return &Customvar{}
}
func NewCustomvarFlat() contracts.Entity {
return &CustomvarFlat{}
}
// FlattenCustomvars creates and yields flat custom variables from the provided custom variables.
func FlattenCustomvars(ctx context.Context, cvs <-chan contracts.Entity) (<-chan contracts.Entity, <-chan error) {
cvFlats := make(chan contracts.Entity)
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
defer close(cvFlats)
g, _ := errgroup.WithContext(ctx)
for i := 0; i < runtime.NumCPU(); i++ {
g.Go(func() error {
for entity := range cvs {
var value interface{}
customvar := entity.(*Customvar)
if err := json.Unmarshal([]byte(customvar.Value), &value); err != nil {
return err
}
flattened := flatten.Flatten(value, customvar.Name)
for flatname, flatvalue := range flattened {
var fv string
if flatvalue == nil {
fv = "null"
} else {
fv = fmt.Sprintf("%v", flatvalue)
}
select {
case cvFlats <- &CustomvarFlat{
CustomvarMeta: CustomvarMeta{
EntityWithoutChecksum: EntityWithoutChecksum{
IdMeta: IdMeta{
// TODO(el): Schema comment is wrong.
// Without customvar.Id we would produce duplicate keys here.
Id: utils.Checksum(objectpacker.MustPackAny(customvar.EnvironmentId, customvar.Id, flatname, flatvalue)),
},
},
EnvironmentMeta: EnvironmentMeta{
EnvironmentId: customvar.EnvironmentId,
},
CustomvarId: customvar.Id,
},
Flatname: flatname,
FlatnameChecksum: utils.Checksum(flatname),
Flatvalue: fv,
}:
case <-ctx.Done():
return ctx.Err()
}
}
}
return nil
})
}
return g.Wait()
})
return cvFlats, com.WaitAsync(g)
}

View file

@ -0,0 +1,31 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Downtime struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameMeta `json:",inline"`
TriggeredById types.Binary `json:"triggered_by_id"`
ObjectType string `json:"object_type"`
HostId types.Binary `json:"host_id"`
ServiceId types.Binary `json:"service_id"`
Author string `json:"author"`
Comment string `json:"comment"`
EntryTime types.UnixMilli `json:"entry_time"`
ScheduledStartTime types.UnixMilli `json:"scheduled_start_time"`
ScheduledEndTime types.UnixMilli `json:"scheduled_end_time"`
FlexibleDuration uint64 `json:"flexible_duration"`
IsFlexible types.Bool `json:"is_flexible"`
IsInEffect types.Bool `json:"is_in_effect"`
StartTime types.UnixMilli `json:"start_time"`
EndTime types.UnixMilli `json:"end_time"`
ZoneId types.Binary `json:"zone_id"`
}
func NewDowntime() contracts.Entity {
return &Downtime{}
}

View file

@ -0,0 +1,36 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Endpoint struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
ZoneId types.Binary `json:"zone_id"`
}
type Zone struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
IsGlobal types.Bool `json:"is_global"`
ParentId types.Binary `json:"parent_id"`
Depth uint8 `json:"depth"`
}
func NewEndpoint() contracts.Entity {
return &Endpoint{}
}
func NewZone() contracts.Entity {
return &Zone{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Endpoint)(nil)
_ contracts.Initer = (*Zone)(nil)
)

24
pkg/icingadb/v1/entity.go Normal file
View file

@ -0,0 +1,24 @@
package v1
import "github.com/icinga/icingadb/pkg/contracts"
// EntityWithoutChecksum represents entities without a checksum.
type EntityWithoutChecksum struct {
IdMeta `json:",inline"`
}
// Fingerprint implements the contracts.Fingerprinter interface.
func (e EntityWithoutChecksum) Fingerprint() contracts.Fingerprinter {
return e
}
// EntityWithChecksum represents entities with a checksum.
type EntityWithChecksum struct {
EntityWithoutChecksum `json:",inline"`
ChecksumMeta `json:",inline"`
}
// Fingerprint implements the contracts.Fingerprinter interface.
func (e EntityWithChecksum) Fingerprint() contracts.Fingerprinter {
return e
}

View file

@ -0,0 +1,82 @@
package history
import (
"database/sql/driver"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/types"
)
type AckHistoryUpserter struct {
ClearTime types.UnixMilli `json:"clear_time"`
ClearedBy types.String `json:"cleared_by"`
}
// Upsert implements the contracts.Upserter interface.
func (ahu *AckHistoryUpserter) Upsert() interface{} {
return ahu
}
type AcknowledgementHistory struct {
v1.EntityWithoutChecksum `json:",inline"`
HistoryTableMeta `json:",inline"`
AckHistoryUpserter `json:",inline"`
SetTime types.UnixMilli `json:"set_time"`
Author string `json:"author"`
Comment types.String `json:"comment"`
ExpireTime types.UnixMilli `json:"expire_time"`
IsPersistent types.Bool `json:"is_persistent"`
IsSticky types.Bool `json:"is_sticky"`
}
type HistoryAck struct {
HistoryMeta `json:",inline"`
AcknowledgementHistoryId types.Binary `json:"id"`
// Idea: read SetTime and ClearTime from Redis and let EventTime decide which of them to write to MySQL.
// So EventTime doesn't have to be read from Redis (json:"-")
// and the others don't have to be written to MySQL (db:"-").
SetTime types.UnixMilli `json:"set_time" db:"-"`
ClearTime types.UnixMilli `json:"clear_time" db:"-"`
EventTime AckEventTime `json:"-"`
}
// Init implements the contracts.Initer interface.
func (h *HistoryAck) Init() {
h.EventTime.History = h
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryAck) TableName() string {
return "history"
}
type AckEventTime struct {
History *HistoryAck `db:"-"`
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (et AckEventTime) Value() (driver.Value, error) {
if et.History == nil {
return nil, nil
}
switch et.History.EventType {
case "ack_set":
return et.History.SetTime.Value()
case "ack_clear":
return et.History.ClearTime.Value()
default:
return nil, nil
}
}
// Assert interface compliance.
var (
_ UpserterEntity = (*AcknowledgementHistory)(nil)
_ contracts.Initer = (*HistoryAck)(nil)
_ contracts.TableNamer = (*HistoryAck)(nil)
_ UpserterEntity = (*HistoryAck)(nil)
_ driver.Valuer = AckEventTime{}
)

View file

@ -0,0 +1,120 @@
package history
import (
"database/sql/driver"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type CommentHistoryEntity struct {
CommentId types.Binary `json:"comment_id"`
}
// Fingerprint implements part of the contracts.Entity interface.
func (che CommentHistoryEntity) Fingerprint() contracts.Fingerprinter {
return che
}
// ID implements part of the contracts.Entity interface.
func (che CommentHistoryEntity) ID() contracts.ID {
return che.CommentId
}
// SetID implements part of the contracts.Entity interface.
func (che *CommentHistoryEntity) SetID(id contracts.ID) {
che.CommentId = id.(types.Binary)
}
type CommentHistoryUpserter struct {
RemovedBy types.String `json:"removed_by"`
RemoveTime types.UnixMilli `json:"remove_time"`
HasBeenRemoved types.Bool `json:"has_been_removed"`
}
// Upsert implements the contracts.Upserter interface.
func (chu *CommentHistoryUpserter) Upsert() interface{} {
return chu
}
type CommentHistory struct {
CommentHistoryEntity `json:",inline"`
HistoryTableMeta `json:",inline"`
CommentHistoryUpserter `json:",inline"`
EntryTime types.UnixMilli `json:"entry_time"`
Author string `json:"author"`
Comment string `json:"comment"`
EntryType types.CommentType `json:"entry_type"`
IsPersistent types.Bool `json:"is_persistent"`
IsSticky types.Bool `json:"is_sticky"`
ExpireTime types.UnixMilli `json:"expire_time"`
}
// Init implements the contracts.Initer interface.
func (ch *CommentHistory) Init() {
ch.HasBeenRemoved = types.Bool{
Bool: false,
Valid: true,
}
}
type HistoryComment struct {
HistoryMeta `json:",inline"`
CommentHistoryId types.Binary `json:"comment_id"`
// Idea: read EntryTime, RemoveTime and ExpireTime from Redis
// and let EventTime decide which of them to write to MySQL.
// So EventTime doesn't have to be read from Redis (json:"-")
// and the others don't have to be written to MySQL (db:"-").
EntryTime types.UnixMilli `json:"entry_time" db:"-"`
RemoveTime types.UnixMilli `json:"remove_time" db:"-"`
ExpireTime types.UnixMilli `json:"expire_time" db:"-"`
EventTime CommentEventTime `json:"-"`
}
// Init implements the contracts.Initer interface.
func (h *HistoryComment) Init() {
h.EventTime.History = h
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryComment) TableName() string {
return "history"
}
type CommentEventTime struct {
History *HistoryComment `db:"-"`
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (et CommentEventTime) Value() (driver.Value, error) {
if et.History == nil {
return nil, nil
}
switch et.History.EventType {
case "comment_add":
return et.History.EntryTime.Value()
case "comment_remove":
v, err := et.History.RemoveTime.Value()
if err == nil && v == nil {
return et.History.ExpireTime.Value()
}
return v, err
default:
return nil, nil
}
}
// Assert interface compliance.
var (
_ contracts.Entity = (*CommentHistoryEntity)(nil)
_ contracts.Upserter = (*CommentHistoryUpserter)(nil)
_ contracts.Initer = (*CommentHistory)(nil)
_ UpserterEntity = (*CommentHistory)(nil)
_ contracts.Initer = (*HistoryComment)(nil)
_ contracts.TableNamer = (*HistoryComment)(nil)
_ UpserterEntity = (*HistoryComment)(nil)
_ driver.Valuer = CommentEventTime{}
)

View file

@ -0,0 +1,119 @@
package history
import (
"database/sql/driver"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type DowntimeHistoryEntity struct {
DowntimeId types.Binary `json:"downtime_id"`
}
// Fingerprint implements part of the contracts.Entity interface.
func (dhe DowntimeHistoryEntity) Fingerprint() contracts.Fingerprinter {
return dhe
}
// ID implements part of the contracts.Entity interface.
func (dhe DowntimeHistoryEntity) ID() contracts.ID {
return dhe.DowntimeId
}
// SetID implements part of the contracts.Entity interface.
func (dhe *DowntimeHistoryEntity) SetID(id contracts.ID) {
dhe.DowntimeId = id.(types.Binary)
}
type DowntimeHistoryUpserter struct {
CancelledBy types.String `json:"cancelled_by"`
HasBeenCancelled types.Bool `json:"has_been_cancelled"`
CancelTime types.UnixMilli `json:"cancel_time"`
}
// Upsert implements the contracts.Upserter interface.
func (dhu *DowntimeHistoryUpserter) Upsert() interface{} {
return dhu
}
type DowntimeHistory struct {
DowntimeHistoryEntity `json:",inline"`
HistoryTableMeta `json:",inline"`
DowntimeHistoryUpserter `json:",inline"`
TriggeredById types.Binary `json:"triggered_by_id"`
EntryTime types.UnixMilli `json:"entry_time"`
Author string `json:"author"`
Comment string `json:"comment"`
IsFlexible types.Bool `json:"is_flexible"`
FlexibleDuration uint64 `json:"flexible_duration"`
ScheduledStartTime types.UnixMilli `json:"scheduled_start_time"`
ScheduledEndTime types.UnixMilli `json:"scheduled_end_time"`
StartTime types.UnixMilli `json:"start_time"`
EndTime types.UnixMilli `json:"end_time"`
TriggerTime types.UnixMilli `json:"trigger_time"`
}
type HistoryDowntime struct {
HistoryMeta `json:",inline"`
DowntimeHistoryId types.Binary `json:"downtime_id"`
// Idea: read StartTime, CancelTime, EndTime and HasBeenCancelled from Redis
// and let EventTime decide based on HasBeenCancelled which of the others to write to MySQL.
// So EventTime doesn't have to be read from Redis (json:"-")
// and the others don't have to be written to MySQL (db:"-").
StartTime types.UnixMilli `json:"start_time" db:"-"`
CancelTime types.UnixMilli `json:"cancel_time" db:"-"`
EndTime types.UnixMilli `json:"end_time" db:"-"`
HasBeenCancelled types.Bool `json:"has_been_cancelled" db:"-"`
EventTime DowntimeEventTime `json:"-"`
}
// Init implements the contracts.Initer interface.
func (h *HistoryDowntime) Init() {
h.EventTime.History = h
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryDowntime) TableName() string {
return "history"
}
type DowntimeEventTime struct {
History *HistoryDowntime `db:"-"`
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (et DowntimeEventTime) Value() (driver.Value, error) {
if et.History == nil {
return nil, nil
}
switch et.History.EventType {
case "downtime_start":
return et.History.StartTime.Value()
case "downtime_end":
if !et.History.HasBeenCancelled.Valid {
return nil, nil
}
if et.History.HasBeenCancelled.Bool {
return et.History.CancelTime.Value()
} else {
return et.History.EndTime.Value()
}
default:
return nil, nil
}
}
// Assert interface compliance.
var (
_ contracts.Entity = (*DowntimeHistoryEntity)(nil)
_ contracts.Upserter = (*DowntimeHistoryUpserter)(nil)
_ UpserterEntity = (*DowntimeHistory)(nil)
_ contracts.Initer = (*HistoryDowntime)(nil)
_ contracts.TableNamer = (*HistoryDowntime)(nil)
_ UpserterEntity = (*HistoryDowntime)(nil)
_ driver.Valuer = DowntimeEventTime{}
)

View file

@ -0,0 +1,80 @@
package history
import (
"database/sql/driver"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/types"
)
type FlappingHistoryUpserter struct {
EndTime types.UnixMilli `json:"end_time"`
PercentStateChangeEnd types.Float `json:"percent_state_change_end"`
FlappingThresholdLow float32 `json:"flapping_threshold_low"`
FlappingThresholdHigh float32 `json:"flapping_threshold_high"`
}
// Upsert implements the contracts.Upserter interface.
func (fhu *FlappingHistoryUpserter) Upsert() interface{} {
return fhu
}
type FlappingHistory struct {
v1.EntityWithoutChecksum `json:",inline"`
HistoryTableMeta `json:",inline"`
FlappingHistoryUpserter `json:",inline"`
StartTime types.UnixMilli `json:"start_time"`
PercentStateChangeStart types.Float `json:"percent_state_change_start"`
}
type HistoryFlapping struct {
HistoryMeta `json:",inline"`
FlappingHistoryId types.Binary `json:"id"`
// Idea: read StartTime and EndTime from Redis and let EventTime decide which of them to write to MySQL.
// So EventTime doesn't have to be read from Redis (json:"-")
// and the others don't have to be written to MySQL (db:"-").
StartTime types.UnixMilli `json:"start_time" db:"-"`
EndTime types.UnixMilli `json:"end_time" db:"-"`
EventTime FlappingEventTime `json:"-"`
}
// Init implements the contracts.Initer interface.
func (h *HistoryFlapping) Init() {
h.EventTime.History = h
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryFlapping) TableName() string {
return "history"
}
type FlappingEventTime struct {
History *HistoryFlapping `db:"-"`
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (et FlappingEventTime) Value() (driver.Value, error) {
if et.History == nil {
return nil, nil
}
switch et.History.EventType {
case "flapping_start":
return et.History.StartTime.Value()
case "flapping_end":
return et.History.EndTime.Value()
default:
return nil, nil
}
}
// Assert interface compliance.
var (
_ UpserterEntity = (*FlappingHistory)(nil)
_ contracts.Initer = (*HistoryFlapping)(nil)
_ contracts.TableNamer = (*HistoryFlapping)(nil)
_ UpserterEntity = (*HistoryFlapping)(nil)
_ driver.Valuer = FlappingEventTime{}
)

View file

@ -0,0 +1,89 @@
package history
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type UpserterEntity interface {
contracts.Upserter
contracts.Entity
}
type HistoryTableEntity struct {
Id types.UUID `json:"id"`
}
// Fingerprint implements part of the contracts.Entity interface.
func (hte HistoryTableEntity) Fingerprint() contracts.Fingerprinter {
return hte
}
// ID implements part of the contracts.Entity interface.
func (hte HistoryTableEntity) ID() contracts.ID {
return hte.Id
}
// SetID implements part of the contracts.Entity interface.
func (hte *HistoryTableEntity) SetID(id contracts.ID) {
hte.Id = id.(types.UUID)
}
// Upsert implements the contracts.Upserter interface.
// Update only the Id (effectively nothing).
func (hte HistoryTableEntity) Upsert() interface{} {
return hte
}
type HistoryEntity struct {
Id types.UUID `json:"event_id"`
}
// Fingerprint implements part of the contracts.Entity interface.
func (he HistoryEntity) Fingerprint() contracts.Fingerprinter {
return he
}
// ID implements part of the contracts.Entity interface.
func (he HistoryEntity) ID() contracts.ID {
return he.Id
}
// SetID implements part of the contracts.Entity interface.
func (he *HistoryEntity) SetID(id contracts.ID) {
he.Id = id.(types.UUID)
}
// Upsert implements the contracts.Upserter interface.
// Update only the Id (effectively nothing).
func (he HistoryEntity) Upsert() interface{} {
return he
}
type HistoryTableMeta struct {
EnvironmentId types.Binary `json:"environment_id"`
EndpointId types.Binary `json:"endpoint_id"`
ObjectType string `json:"object_type"`
HostId types.Binary `json:"host_id"`
ServiceId types.Binary `json:"service_id"`
}
type HistoryMeta struct {
HistoryEntity `json:",inline"`
EnvironmentId types.Binary `json:"environment_id"`
EndpointId types.Binary `json:"endpoint_id"`
ObjectType string `json:"object_type"`
HostId types.Binary `json:"host_id"`
ServiceId types.Binary `json:"service_id"`
EventType string `json:"event_type"`
}
// Assert interface compliance.
var (
_ contracts.Entity = (*HistoryTableEntity)(nil)
_ contracts.Upserter = HistoryTableEntity{}
_ contracts.Entity = (*HistoryEntity)(nil)
_ contracts.Upserter = HistoryEntity{}
_ contracts.Entity = (*HistoryMeta)(nil)
_ contracts.Upserter = (*HistoryMeta)(nil)
)

View file

@ -0,0 +1,45 @@
package history
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type NotificationHistory struct {
HistoryTableEntity `json:",inline"`
HistoryTableMeta `json:",inline"`
NotificationId types.Binary `json:"notification_id"`
Type types.NotificationType `json:"type"`
SendTime types.UnixMilli `json:"send_time"`
State uint8 `json:"state"`
PreviousHardState uint8 `json:"previous_hard_state"`
Author string `json:"author"`
Text string `json:"text"`
UsersNotified uint16 `json:"users_notified"`
}
type UserNotificationHistory struct {
HistoryTableEntity `json:",inline"`
EnvironmentId types.Binary `json:"environment_id"`
NotificationHistoryId types.UUID `json:"notification_history_id"`
UserId types.Binary `json:"user_id"`
}
type HistoryNotification struct {
HistoryMeta `json:",inline"`
NotificationHistoryId types.UUID `json:"id"`
EventTime types.UnixMilli `json:"send_time"`
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryNotification) TableName() string {
return "history"
}
// Assert interface compliance.
var (
_ UpserterEntity = (*NotificationHistory)(nil)
_ UpserterEntity = (*UserNotificationHistory)(nil)
_ contracts.TableNamer = (*HistoryNotification)(nil)
_ UpserterEntity = (*HistoryNotification)(nil)
)

View file

@ -0,0 +1,40 @@
package history
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type StateHistory struct {
HistoryTableEntity `json:",inline"`
HistoryTableMeta `json:",inline"`
EventTime types.UnixMilli `json:"event_time"`
StateType types.StateType `json:"state_type"`
SoftState uint8 `json:"soft_state"`
HardState uint8 `json:"hard_state"`
PreviousSoftState uint8 `json:"previous_soft_state"`
PreviousHardState uint8 `json:"previous_hard_state"`
Attempt uint8 `json:"attempt"`
Output types.String `json:"output"`
LongOutput types.String `json:"long_output"`
MaxCheckAttempts uint32 `json:"max_check_attempts"`
CheckSource types.String `json:"check_source"`
}
type HistoryState struct {
HistoryMeta `json:",inline"`
StateHistoryId types.UUID `json:"id"`
EventTime types.UnixMilli `json:"event_time"`
}
// TableName implements the contracts.TableNamer interface.
func (*HistoryState) TableName() string {
return "history"
}
// Assert interface compliance.
var (
_ UpserterEntity = (*StateHistory)(nil)
_ contracts.TableNamer = (*HistoryState)(nil)
_ UpserterEntity = (*HistoryState)(nil)
)

122
pkg/icingadb/v1/host.go Normal file
View file

@ -0,0 +1,122 @@
package v1
import (
"bytes"
"database/sql/driver"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
"net"
)
type Host struct {
Checkable `json:",inline"`
Address string `json:"address"`
Address6 string `json:"address6"`
AddressBin AddressBin `json:"-"`
Address6Bin Address6Bin `json:"-"`
}
// Init implements the contracts.Initer interface.
func (h *Host) Init() {
h.Checkable.Init()
h.AddressBin.Host = h
h.Address6Bin.Host = h
}
type AddressBin struct {
Host *Host `db:"-"`
}
var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
// Value implements the driver.Valuer interface.
func (ab AddressBin) Value() (driver.Value, error) {
if ab.Host == nil {
return nil, nil
}
ip := net.ParseIP(ab.Host.Address)
if ip == nil {
return nil, nil
}
if ip = bytes.TrimPrefix(ip, v4InV6Prefix); len(ip) == 4 {
return []byte(ip), nil
} else {
return nil, nil
}
}
type Address6Bin struct {
Host *Host `db:"-"`
}
// Value implements the driver.Valuer interface.
func (ab Address6Bin) Value() (driver.Value, error) {
if ab.Host == nil {
return nil, nil
}
if ip := net.ParseIP(ab.Host.Address6); ip == nil {
return nil, nil
} else {
return []byte(ip), nil
}
}
type HostCustomvar struct {
CustomvarMeta `json:",inline"`
HostId types.Binary `json:"host_id"`
}
type HostState struct {
State `json:",inline"`
HostId types.Binary `json:"host_id"`
}
type Hostgroup struct {
GroupMeta `json:",inline"`
}
type HostgroupCustomvar struct {
CustomvarMeta `json:",inline"`
HostgroupId types.Binary `json:"hostgroup_id"`
}
type HostgroupMember struct {
MemberMeta `json:",inline"`
HostId types.Binary `json:"host_id"`
HostgroupId types.Binary `json:"hostgroup_id"`
}
func NewHost() contracts.Entity {
return &Host{}
}
func NewHostCustomvar() contracts.Entity {
return &HostCustomvar{}
}
func NewHostState() contracts.Entity {
return &HostState{}
}
func NewHostgroup() contracts.Entity {
return &Hostgroup{}
}
func NewHostgroupCustomvar() contracts.Entity {
return &HostgroupCustomvar{}
}
func NewHostgroupMember() contracts.Entity {
return &HostgroupMember{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Host)(nil)
_ driver.Valuer = AddressBin{}
_ driver.Valuer = Address6Bin{}
_ contracts.Initer = (*Hostgroup)(nil)
)

View file

@ -0,0 +1,21 @@
package v1
import (
"github.com/icinga/icingadb/pkg/types"
)
type IcingadbInstance struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
EndpointId types.Binary `json:"endpoint_id"`
Heartbeat types.UnixMilli `json:"heartbeat"`
Responsible types.Bool `json:"responsible"`
Icinga2Version string `json:"icinga2_version"`
Icinga2StartTime types.UnixMilli `json:"icinga2_start_Time"`
Icinga2NotificationsEnabled types.Bool `json:"icinga2_notifications_enabled"`
Icinga2ActiveServiceChecksEnabled types.Bool `json:"icinga2_active_service_checks_enabled"`
Icinga2ActiveHostChecksEnabled types.Bool `json:"icinga2_active_host_checks_enabled"`
Icinga2EventHandlersEnabled types.Bool `json:"icinga2_event_handlers_enabled"`
Icinga2FlapDetectionEnabled types.Bool `json:"icinga2_flap_detection_enabled"`
Icinga2PerformanceDataEnabled types.Bool `json:"icinga2_performance_data_enabled"`
}

83
pkg/icingadb/v1/meta.go Normal file
View file

@ -0,0 +1,83 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
// ChecksumMeta is embedded by every type with a checksum.
type ChecksumMeta struct {
PropertiesChecksum types.Binary `json:"checksum"`
}
// Checksum implements part of the contracts.Checksumer interface.
func (m ChecksumMeta) Checksum() contracts.Checksum {
return m.PropertiesChecksum
}
// SetChecksum implements part of the contracts.Checksumer interface.
func (m *ChecksumMeta) SetChecksum(checksum contracts.Checksum) {
m.PropertiesChecksum = checksum.(types.Binary)
}
// EnvironmentMeta is embedded by every type which belongs to an environment.
type EnvironmentMeta struct {
EnvironmentId types.Binary `json:"environment_id"`
}
// IdMeta is embedded by every type Icinga DB should synchronize.
type IdMeta struct {
Id types.Binary `json:"id"`
}
// ID implements part of the contracts.IDer interface.
func (m IdMeta) ID() contracts.ID {
return m.Id
}
// SetID implements part of the contracts.IDer interface.
func (m *IdMeta) SetID(id contracts.ID) {
m.Id = id.(types.Binary)
}
// NameMeta is embedded by every type with a name.
type NameMeta struct {
Name string `json:"name"`
NameChecksum types.Binary `json:"name_checksum"`
}
// NameCiMeta is embedded by every type with a case insensitive name.
type NameCiMeta struct {
NameMeta `json:",inline"`
NameCi *string `json:"-"`
}
// Init implements the contracts.Initer interface.
func (n *NameCiMeta) Init() {
n.NameCi = &n.Name
}
type CustomvarMeta struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
CustomvarId types.Binary `json:"customvar_id"`
}
type GroupMeta struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
DisplayName string `json:"display_name"`
ZoneId types.Binary `json:"zone_id"`
}
type MemberMeta struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
}
// Assert interface compliance.
var (
_ contracts.Initer = (*NameCiMeta)(nil)
_ contracts.Initer = (*GroupMeta)(nil)
)

View file

@ -0,0 +1,71 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Notification struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
HostId types.Binary `json:"host_id"`
ServiceId types.Binary `json:"service_id"`
CommandId types.Binary `json:"command_id"`
TimesBegin types.Int `json:"times_begin"`
TimesEnd types.Int `json:"times_end"`
NotificationInterval uint32 `json:"notification_interval"`
TimeperiodId types.Binary `json:"timeperiod_id"`
States types.NotificationStates `json:"states"`
Types types.NotificationTypes `json:"types"`
ZoneId types.Binary `json:"zone_id"`
}
type NotificationUser struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NotificationId types.Binary `json:"notification_id"`
UserId types.Binary `json:"user_id"`
}
type NotificationUsergroup struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NotificationId types.Binary `json:"notification_id"`
UsergroupId types.Binary `json:"usergroup_id"`
}
type NotificationRecipient struct {
NotificationUser `json:",inline"`
UsergroupId types.Binary `json:"usergroup_id"`
}
type NotificationCustomvar struct {
CustomvarMeta `json:",inline"`
NotificationId types.Binary `json:"notification_id"`
}
func NewNotification() contracts.Entity {
return &Notification{}
}
func NewNotificationUser() contracts.Entity {
return &NotificationUser{}
}
func NewNotificationUsergroup() contracts.Entity {
return &NotificationUsergroup{}
}
func NewNotificationRecipient() contracts.Entity {
return &NotificationRecipient{}
}
func NewNotificationCustomvar() contracts.Entity {
return &NotificationCustomvar{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Notification)(nil)
)

View file

@ -0,0 +1,26 @@
package overdue
import (
"github.com/icinga/icingadb/pkg/contracts"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/types"
)
type HostState struct {
v1.EntityWithoutChecksum
IsOverdue types.Bool `json:"is_overdue"`
}
func NewHostState(id string, overdue bool) (contracts.Entity, error) {
hs := &HostState{IsOverdue: types.Bool{
Bool: overdue,
Valid: true,
}}
return hs, hs.Id.UnmarshalText([]byte(id))
}
// Assert interface compliance.
var (
_ contracts.Entity = (*HostState)(nil)
)

View file

@ -0,0 +1,26 @@
package overdue
import (
"github.com/icinga/icingadb/pkg/contracts"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/types"
)
type ServiceState struct {
v1.EntityWithoutChecksum
IsOverdue types.Bool `json:"is_overdue"`
}
func NewServiceState(id string, overdue bool) (contracts.Entity, error) {
hs := &ServiceState{IsOverdue: types.Bool{
Bool: overdue,
Valid: true,
}}
return hs, hs.Id.UnmarshalText([]byte(id))
}
// Assert interface compliance.
var (
_ contracts.Entity = (*ServiceState)(nil)
)

View file

@ -0,0 +1,66 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Service struct {
Checkable `json:",inline"`
HostId types.Binary `json:"host_id"`
}
type ServiceCustomvar struct {
CustomvarMeta `json:",inline"`
ServiceId types.Binary `json:"service_id"`
}
type ServiceState struct {
State `json:",inline"`
ServiceId types.Binary `json:"service_id"`
}
type Servicegroup struct {
GroupMeta `json:",inline"`
}
type ServicegroupCustomvar struct {
CustomvarMeta `json:",inline"`
ServicegroupId types.Binary `json:"servicegroup_id"`
}
type ServicegroupMember struct {
MemberMeta `json:",inline"`
ServiceId types.Binary `json:"service_id"`
ServicegroupId types.Binary `json:"servicegroup_id"`
}
func NewService() contracts.Entity {
return &Service{}
}
func NewServiceCustomvar() contracts.Entity {
return &ServiceCustomvar{}
}
func NewServiceState() contracts.Entity {
return &ServiceState{}
}
func NewServicegroup() contracts.Entity {
return &Servicegroup{}
}
func NewServicegroupCustomvar() contracts.Entity {
return &ServicegroupCustomvar{}
}
func NewServicegroupMember() contracts.Entity {
return &ServicegroupMember{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Service)(nil)
_ contracts.Initer = (*Servicegroup)(nil)
)

35
pkg/icingadb/v1/state.go Normal file
View file

@ -0,0 +1,35 @@
package v1
import (
"github.com/icinga/icingadb/pkg/types"
)
type State struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
AcknowledgementCommentId types.Binary `json:"acknowledgement_comment_id"`
Attempt uint8 `json:"check_attempt"`
CheckCommandline types.String `json:"commandline"`
CheckSource types.String `json:"check_source"`
ExecutionTime float64 `json:"execution_time"`
HardState uint8 `json:"hard_state"`
InDowntime types.Bool `json:"in_downtime"`
IsAcknowledged types.AcknowledgementState `json:"acknowledgement"`
IsFlapping types.Bool `json:"is_flapping"`
IsHandled types.Bool `json:"is_handled"`
IsProblem types.Bool `json:"is_problem"`
IsReachable types.Bool `json:"is_reachable"`
LastStateChange types.UnixMilli `json:"last_state_change"`
LastUpdate types.UnixMilli `json:"last_update"`
Latency float64 `json:"latency"`
LongOutput types.String `json:"long_output"`
NextCheck types.UnixMilli `json:"next_check"`
NextUpdate types.UnixMilli `json:"next_update"`
Output types.String `json:"output"`
PerformanceData types.String `json:"performance_data"`
PreviousHardState uint8 `json:"previous_hard_state"`
Severity uint16 `json:"severity"`
SoftState uint8 `json:"state"`
StateType types.StateType `json:"state_type"`
Timeout float64 `json:"check_timeout"`
}

View file

@ -0,0 +1,67 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type Timeperiod struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
DisplayName string `json:"display_name"`
PreferIncludes types.Bool `json:"prefer_includes"`
ZoneId types.Binary `json:"zone_id"`
}
type TimeperiodRange struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
TimeperiodId types.Binary `json:"timeperiod_id"`
RangeKey string `json:"range_key"`
RangeValue string `json:"range_value"`
}
type TimeperiodOverrideInclude struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
TimeperiodId types.Binary `json:"timeperiod_id"`
OverrideId types.Binary `json:"include_id"`
}
type TimeperiodOverrideExclude struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
TimeperiodId types.Binary `json:"timeperiod_id"`
OverrideId types.Binary `json:"exclude_id"`
}
type TimeperiodCustomvar struct {
CustomvarMeta `json:",inline"`
TimeperiodId types.Binary `json:"timeperiod_id"`
}
func NewTimeperiod() contracts.Entity {
return &Timeperiod{}
}
func NewTimeperiodRange() contracts.Entity {
return &TimeperiodRange{}
}
func NewTimeperiodOverrideInclude() contracts.Entity {
return &TimeperiodOverrideInclude{}
}
func NewTimeperiodOverrideExclude() contracts.Entity {
return &TimeperiodOverrideExclude{}
}
func NewTimeperiodCustomvar() contracts.Entity {
return &TimeperiodCustomvar{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*Timeperiod)(nil)
)

33
pkg/icingadb/v1/url.go Normal file
View file

@ -0,0 +1,33 @@
package v1
import "github.com/icinga/icingadb/pkg/contracts"
type ActionUrl struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
ActionUrl string `json:"action_url"`
}
type NotesUrl struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NotesUrl string `json:"notes_url"`
}
type IconImage struct {
EntityWithoutChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
IconImage string `json:"icon_image"`
}
func NewActionUrl() contracts.Entity {
return &ActionUrl{}
}
func NewNotesUrl() contracts.Entity {
return &NotesUrl{}
}
func NewIconImage() contracts.Entity {
return &IconImage{}
}

66
pkg/icingadb/v1/user.go Normal file
View file

@ -0,0 +1,66 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
)
type User struct {
EntityWithChecksum `json:",inline"`
EnvironmentMeta `json:",inline"`
NameCiMeta `json:",inline"`
DisplayName string `json:"display_name"`
Email string `json:"email"`
Pager string `json:"pager"`
NotificationsEnabled types.Bool `json:"notifications_enabled"`
TimeperiodId types.Binary `json:"timeperiod_id"`
States types.NotificationStates `json:"states"`
Types types.NotificationTypes `json:"types"`
ZoneId types.Binary `json:"zone_id"`
}
type UserCustomvar struct {
CustomvarMeta `json:",inline"`
UserId types.Binary `json:"user_id"`
}
type Usergroup struct {
GroupMeta `json:",inline"`
}
type UsergroupCustomvar struct {
CustomvarMeta `json:",inline"`
UsergroupId types.Binary `json:"usergroup_id"`
}
type UsergroupMember struct {
MemberMeta `json:",inline"`
UserId types.Binary `json:"user_id"`
UsergroupId types.Binary `json:"usergroup_id"`
}
func NewUser() contracts.Entity {
return &User{}
}
func NewUserCustomvar() contracts.Entity {
return &UserCustomvar{}
}
func NewUsergroup() contracts.Entity {
return &Usergroup{}
}
func NewUsergroupCustomvar() contracts.Entity {
return &UsergroupCustomvar{}
}
func NewUsergroupMember() contracts.Entity {
return &UsergroupMember{}
}
// Assert interface compliance.
var (
_ contracts.Initer = (*User)(nil)
_ contracts.Initer = (*Usergroup)(nil)
)

54
pkg/icingadb/v1/v1.go Normal file
View file

@ -0,0 +1,54 @@
package v1
import (
"github.com/icinga/icingadb/pkg/contracts"
)
var Factories = []contracts.EntityFactoryFunc{
NewActionUrl,
NewCheckcommand,
NewCheckcommandArgument,
NewCheckcommandCustomvar,
NewCheckcommandEnvvar,
NewComment,
NewDowntime,
NewEndpoint,
NewEventcommand,
NewEventcommandArgument,
NewEventcommandCustomvar,
NewEventcommandEnvvar,
NewHost,
NewHostCustomvar,
NewHostState,
NewHostgroup,
NewHostgroupCustomvar,
NewHostgroupMember,
NewIconImage,
NewNotesUrl,
NewNotification,
NewNotificationcommand,
NewNotificationcommandArgument,
NewNotificationcommandCustomvar,
NewNotificationcommandEnvvar,
NewNotificationCustomvar,
NewNotificationRecipient,
NewNotificationUser,
NewNotificationUsergroup,
NewService,
NewServiceCustomvar,
NewServiceState,
NewServicegroup,
NewServicegroupCustomvar,
NewServicegroupMember,
NewTimeperiod,
NewTimeperiodCustomvar,
NewTimeperiodOverrideExclude,
NewTimeperiodOverrideInclude,
NewTimeperiodRange,
NewUser,
NewUserCustomvar,
NewUsergroup,
NewUsergroupCustomvar,
NewUsergroupMember,
NewZone,
}

200
pkg/icingaredis/client.go Normal file
View file

@ -0,0 +1,200 @@
package icingaredis
import (
"context"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"runtime"
"time"
)
// Client is a wrapper around redis.Client with
// streaming and logging capabilities.
type Client struct {
*redis.Client
logger *zap.SugaredLogger
options *Options
}
type Options struct {
Timeout time.Duration `yaml:"timeout" default:"30s"`
MaxHMGetConnections int `yaml:"max_hmget_connections" default:"4096"`
HMGetCount int `yaml:"hmget_count" default:"4096"`
HScanCount int `yaml:"hscan_count" default:"4096"`
}
// NewClient returns a new icingaredis.Client wrapper for a pre-existing *redis.Client.
func NewClient(client *redis.Client, logger *zap.SugaredLogger, options *Options) *Client {
return &Client{Client: client, logger: logger, options: options}
}
// HPair defines Redis hashes field-value pairs.
type HPair struct {
Field string
Value string
}
// HYield yields HPair field-value pairs for all fields in the hash stored at key.
func (c *Client) HYield(ctx context.Context, key string) (<-chan HPair, <-chan error) {
pairs := make(chan HPair)
g, ctx := errgroup.WithContext(ctx)
c.logger.Infof("Syncing %s", key)
g.Go(func() error {
var cnt com.Counter
defer close(pairs)
defer utils.Timed(time.Now(), func(elapsed time.Duration) {
c.logger.Infof("Fetched %d elements of %s in %s", cnt.Val(), key, elapsed)
})
var cursor uint64
var err error
var page []string
g, ctx := errgroup.WithContext(ctx)
for {
cmd := c.HScan(ctx, key, cursor, "", int64(c.options.HScanCount))
page, cursor, err = cmd.Result()
if err != nil {
return WrapCmdErr(cmd)
}
g.Go(func(page []string) func() error {
return func() error {
for i := 0; i < len(page); i += 2 {
select {
case pairs <- HPair{
Field: page[i],
Value: page[i+1],
}:
cnt.Inc()
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
}(page))
if cursor == 0 {
break
}
}
return g.Wait()
})
return pairs, com.WaitAsync(g)
}
// HMYield yields HPair field-value pairs for the specified fields in the hash stored at key.
func (c *Client) HMYield(ctx context.Context, key string, fields ...string) (<-chan HPair, <-chan error) {
pairs := make(chan HPair)
g, ctx := errgroup.WithContext(ctx)
// Use context from group.
batches := utils.BatchSliceOfStrings(ctx, fields, c.options.HMGetCount)
g.Go(func() error {
defer close(pairs)
sem := semaphore.NewWeighted(int64(c.options.MaxHMGetConnections))
g, ctx := errgroup.WithContext(ctx)
for batch := range batches {
if err := sem.Acquire(ctx, 1); err != nil {
return err
}
g.Go(func(batch []string) func() error {
return func() error {
defer sem.Release(1)
cmd := c.HMGet(ctx, key, batch...)
vals, err := cmd.Result()
if err != nil {
return WrapCmdErr(cmd)
}
g.Go(func() error {
for i, v := range vals {
if v == nil {
c.logger.Warnf("HMGET %s: field %#v missing", key, batch[i])
continue
}
select {
case pairs <- HPair{
Field: batch[i],
Value: v.(string),
}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
return nil
}
}(batch))
}
return g.Wait()
})
return pairs, com.WaitAsync(g)
}
// StreamLastId fetches the last message of a stream and returns its ID.
func (c *Client) StreamLastId(ctx context.Context, stream string) (string, error) {
lastId := "0-0"
cmd := c.XRevRangeN(ctx, stream, "+", "-", 1)
messages, err := cmd.Result()
if err != nil {
return "", WrapCmdErr(cmd)
}
for _, message := range messages {
lastId = message.ID
}
return lastId, nil
}
// YieldAll yields all entities from Redis that belong to the specified SyncSubject.
func (c Client) YieldAll(ctx context.Context, subject *common.SyncSubject) (<-chan contracts.Entity, <-chan error) {
key := utils.Key(utils.Name(subject.Entity()), ':')
if subject.WithChecksum() {
key = "icinga:checksum:" + key
} else {
key = "icinga:" + key
}
pairs, errs := c.HYield(ctx, key)
g, ctx := errgroup.WithContext(ctx)
// Let errors from HYield cancel the group.
com.ErrgroupReceive(g, errs)
desired, errs := CreateEntities(ctx, subject.Factory(), pairs, runtime.NumCPU())
// Let errors from CreateEntities cancel the group.
com.ErrgroupReceive(g, errs)
return desired, com.WaitAsync(g)
}

View file

@ -0,0 +1,150 @@
package icingaredis
import (
"context"
"github.com/go-redis/redis/v8"
v1 "github.com/icinga/icingadb/pkg/icingaredis/v1"
"github.com/icinga/icingadb/pkg/utils"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"sync"
"time"
)
var timeout = 60 * time.Second
type Heartbeat struct {
ctx context.Context
cancel context.CancelFunc
client *Client
logger *zap.SugaredLogger
active bool
beat chan v1.StatsMessage
lost chan struct{}
done chan struct{}
mu *sync.Mutex
err error
}
func NewHeartbeat(ctx context.Context, client *Client, logger *zap.SugaredLogger) *Heartbeat {
ctx, cancel := context.WithCancel(ctx)
heartbeat := &Heartbeat{
ctx: ctx,
cancel: cancel,
client: client,
logger: logger,
beat: make(chan v1.StatsMessage),
lost: make(chan struct{}),
done: make(chan struct{}),
mu: &sync.Mutex{},
}
go heartbeat.controller()
return heartbeat
}
// Close implements the io.Closer interface.
func (h Heartbeat) Close() error {
// Cancel ctx.
h.cancel()
// Wait until the controller loop ended.
<-h.Done()
// And return an error, if any.
return h.Err()
}
func (h Heartbeat) Done() <-chan struct{} {
return h.done
}
func (h Heartbeat) Err() error {
h.mu.Lock()
defer h.mu.Unlock()
return h.err
}
func (h Heartbeat) Beat() <-chan v1.StatsMessage {
return h.beat
}
func (h Heartbeat) Lost() <-chan struct{} {
return h.lost
}
// controller loop.
func (h Heartbeat) controller() {
messages := make(chan v1.StatsMessage)
defer close(messages)
g, ctx := errgroup.WithContext(h.ctx)
// Message producer loop
g.Go(func() error {
// We expect heartbeats every second but only read them every 3 seconds
throttle := time.Tick(time.Second * 3)
for {
cmd := h.client.XRead(ctx, &redis.XReadArgs{
Streams: []string{"icinga:stats", "$"},
Block: 0, // TODO(el): Might make sense to use a non-blocking variant here
})
streams, err := cmd.Result()
if err != nil {
return WrapCmdErr(cmd)
}
select {
case messages <- streams[0].Messages[0].Values:
case <-ctx.Done():
return ctx.Err()
}
<-throttle
}
})
// State loop
g.Go(func() error {
for {
select {
case m := <-messages:
if !h.active {
s, err := m.IcingaStatus()
if err != nil {
return err
}
h.logger.Infow("Received first Icinga 2 heartbeat", zap.String("environment", s.Environment))
h.active = true
}
h.beat <- m
case <-time.After(timeout):
if h.active {
h.logger.Warn("Lost Icinga 2 heartbeat", zap.Duration("timeout", timeout))
h.lost <- struct{}{}
h.active = false
} else {
h.logger.Warn("Waiting for Icinga 2 heartbeat")
}
case <-ctx.Done():
return ctx.Err()
}
}
})
// Since the goroutines of the group actually run endlessly,
// we wait here forever, unless an error occurs.
if err := g.Wait(); err != nil && !utils.IsContextCanceled(err) {
// Do not propagate context-aborted errors here,
// as this is to be expected when Close was called.
h.setError(err)
}
}
func (h *Heartbeat) setError(err error) {
h.mu.Lock()
h.err = err
h.mu.Unlock()
}

104
pkg/icingaredis/utils.go Normal file
View file

@ -0,0 +1,104 @@
package icingaredis
import (
"context"
"encoding/json"
"github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
func CreateEntities(ctx context.Context, factoryFunc contracts.EntityFactoryFunc, pairs <-chan HPair, concurrent int) (<-chan contracts.Entity, <-chan error) {
entities := make(chan contracts.Entity, 0)
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
defer close(entities)
g, ctx := errgroup.WithContext(ctx)
for i := 0; i < concurrent; i++ {
g.Go(func() error {
for pair := range pairs {
var id types.Binary
if err := id.UnmarshalText([]byte(pair.Field)); err != nil {
return err
}
e := factoryFunc()
if err := json.Unmarshal([]byte(pair.Value), e); err != nil {
return err
}
e.SetID(id)
select {
case entities <- e:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
return g.Wait()
})
return entities, com.WaitAsync(g)
}
func SetChecksums(ctx context.Context, entities <-chan contracts.Entity, checksums map[string]contracts.Entity, concurrent int) (<-chan contracts.Entity, <-chan error) {
entitiesWithChecksum := make(chan contracts.Entity, 0)
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
defer close(entitiesWithChecksum)
g, ctx := errgroup.WithContext(ctx)
for i := 0; i < concurrent; i++ {
g.Go(func() error {
for entity := range entities {
if checksumer, ok := checksums[entity.ID().String()]; ok {
entity.(contracts.Checksumer).SetChecksum(checksumer.(contracts.Checksumer).Checksum())
} else {
panic("no checksum")
// TODO(el): Error is not published
return errors.New("no checksum")
}
select {
case entitiesWithChecksum <- entity:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
return g.Wait()
})
return entitiesWithChecksum, com.WaitAsync(g)
}
// WrapCmdErr adds the command itself and the stack of the current goroutine to the command's error if any.
func WrapCmdErr(cmd redis.Cmder) error {
err := cmd.Err()
if err != nil {
err = errors.Wrap(err, "can't perform "+utils.Ellipsize(
redis.NewCmd(context.Background(), cmd.Args()).String(), // Omits error in opposite to cmd.String()
100,
))
}
return err
}

View file

@ -0,0 +1,26 @@
package v1
import (
"crypto/sha1"
"github.com/icinga/icingadb/pkg/types"
)
type IcingaStatus struct {
Environment string `json:"environment"`
NodeName string `json:"node_name"`
Version string `json:"version"`
ProgramStart types.UnixMilli `json:"program_start"`
EndpointId types.Binary `json:"endpoint_id"`
NotificationsEnabled types.Bool `json:"enable_notifications"`
ActiveServiceChecksEnabled types.Bool `json:"enable_service_checks"`
ActiveHostChecksEnabled types.Bool `json:"enable_host_checks"`
EventHandlersEnabled types.Bool `json:"enable_event_handlers"`
FlapDetectionEnabled types.Bool `json:"enable_flapping"`
PerformanceDataEnabled types.Bool `json:"enable_perfdata"`
}
func (s *IcingaStatus) EnvironmentID() types.Binary {
chksm := sha1.Sum([]byte(s.Environment))
return chksm[:]
}

View file

@ -0,0 +1,48 @@
package v1
import (
"encoding/json"
"errors"
"github.com/icinga/icingadb/pkg/types"
)
// StatsMessage represents a message from the Redis stream icinga:stats.
type StatsMessage map[string]interface{}
func (m StatsMessage) Raw() map[string]interface{} {
return m
}
func (m StatsMessage) IcingaStatus() (*IcingaStatus, error) {
if s, ok := m["IcingaApplication"].(string); ok {
var envelope struct {
Status struct {
IcingaApplication struct {
IcingaStatus `json:"app"`
} `json:"icingaapplication"`
} `json:"status"`
}
if err := json.Unmarshal([]byte(s), &envelope); err != nil {
return nil, err
}
return &envelope.Status.IcingaApplication.IcingaStatus, nil
}
return nil, errors.New("bad message")
}
func (m StatsMessage) Time() (*types.UnixMilli, error) {
if s, ok := m["timestamp"].(string); ok {
var t types.UnixMilli
if err := json.Unmarshal([]byte(s), &t); err != nil {
return nil, err
}
return &t, nil
}
return nil, errors.New("bad message")
}

59
pkg/retry/retry.go Normal file
View file

@ -0,0 +1,59 @@
package retry
import (
"context"
"errors"
"github.com/icinga/icingadb/pkg/backoff"
"time"
)
// RetryableFunc is a retryable function.
type RetryableFunc func(context.Context) error
// IsRetryable checks whether a new attempt can be started based on the error passed.
type IsRetryable func(error) bool
// WithBackoff retries the passed function if it fails and the error allows it to retry.
// The specified backoff policy is used to determine how long to sleep between attempts.
// Once the specified timeout (if >0) elapses, WithBackoff gives up.
func WithBackoff(
ctx context.Context, retryableFunc RetryableFunc, retryable IsRetryable, b backoff.Backoff, timeout time.Duration,
) (err error) {
if timeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
for attempt := 0; ; /* true */ attempt++ {
prevErr := err
if err = retryableFunc(ctx); err == nil {
// No error.
return
}
isRetryable := retryable(err)
if prevErr != nil && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) {
err = prevErr
}
if !isRetryable {
// Not retryable.
return
}
sleep := b(uint64(attempt))
select {
case <-ctx.Done():
// Context canceled. Return last known error.
if err == nil {
err = ctx.Err()
}
return
case <-time.After(sleep):
// Wait for backoff duration and continue.
}
}
}

160
pkg/structify/structify.go Normal file
View file

@ -0,0 +1,160 @@
package structify
import (
"encoding"
"fmt"
"github.com/icinga/icingadb/pkg/contracts"
"reflect"
"strconv"
"unsafe"
)
// structBranch represents either a leaf or a subTree.
type structBranch struct {
// field specifies the struct field index.
field int
// leaf specifies the map key to parse the struct field from.
leaf string
// subTree specifies the struct field's inner tree.
subTree []structBranch
}
type MapStructifier = func(map[string]interface{}) (interface{}, error)
// MakeMapStructifier builds a function which parses a map's string values into a new struct of type t
// and returns a pointer to it. tag specifies which tag connects struct fields to map keys.
// MakeMapStructifier panics if it detects an unsupported type (suitable for usage in init() or global vars).
func MakeMapStructifier(t reflect.Type, tag string) MapStructifier {
tree := buildStructTree(t, tag)
return func(kv map[string]interface{}) (interface{}, error) {
vPtr := reflect.New(t)
ptr := vPtr.Interface()
if initer, ok := ptr.(contracts.Initer); ok {
initer.Init()
}
return ptr, structifyMapByTree(kv, tree, vPtr.Elem())
}
}
// buildStructTree assembles a tree which represents the struct t based on tag.
func buildStructTree(t reflect.Type, tag string) []structBranch {
var tree []structBranch
numFields := t.NumField()
for i := 0; i < numFields; i++ {
if field := t.Field(i); field.PkgPath == "" {
switch tagValue := field.Tag.Get(tag); tagValue {
case "", "-":
case ",inline":
if subTree := buildStructTree(field.Type, tag); subTree != nil {
tree = append(tree, structBranch{i, "", subTree})
}
default:
// If parseString doesn't support *T, it'll panic.
_ = parseString("", reflect.New(field.Type).Interface())
tree = append(tree, structBranch{i, tagValue, nil})
}
}
}
return tree
}
// structifyMapByTree parses src's string values into the struct dest according to tree's specification.
func structifyMapByTree(src map[string]interface{}, tree []structBranch, dest reflect.Value) error {
for _, branch := range tree {
if branch.subTree == nil {
if v, ok := src[branch.leaf]; ok {
if vs, ok := v.(string); ok {
if err := parseString(vs, dest.Field(branch.field).Addr().Interface()); err != nil {
return err
}
}
}
} else if err := structifyMapByTree(src, branch.subTree, dest.Field(branch.field)); err != nil {
return err
}
}
return nil
}
// parseString parses src into *dest.
func parseString(src string, dest interface{}) error {
switch ptr := dest.(type) {
case encoding.TextUnmarshaler:
return ptr.UnmarshalText([]byte(src))
case *string:
*ptr = src
return nil
case **string:
*ptr = &src
return nil
case *uint8:
i, err := strconv.ParseUint(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = uint8(i)
}
return err
case *uint16:
i, err := strconv.ParseUint(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = uint16(i)
}
return err
case *uint32:
i, err := strconv.ParseUint(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = uint32(i)
}
return err
case *uint64:
i, err := strconv.ParseUint(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = i
}
return err
case *int8:
i, err := strconv.ParseInt(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = int8(i)
}
return err
case *int16:
i, err := strconv.ParseInt(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = int16(i)
}
return err
case *int32:
i, err := strconv.ParseInt(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = int32(i)
}
return err
case *int64:
i, err := strconv.ParseInt(src, 10, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = i
}
return err
case *float32:
f, err := strconv.ParseFloat(src, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = float32(f)
}
return err
case *float64:
f, err := strconv.ParseFloat(src, int(unsafe.Sizeof(*ptr)*8))
if err == nil {
*ptr = f
}
return err
default:
panic(fmt.Sprintf("unsupported type: %T", dest))
}
}

View file

@ -0,0 +1,66 @@
package types
import (
"database/sql/driver"
"encoding"
"encoding/json"
"fmt"
)
// Acknowledgement specifies an acknowledgement state (yes, no, sticky).
type AcknowledgementState uint8
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (as *AcknowledgementState) UnmarshalText(bytes []byte) error {
return as.UnmarshalJSON(bytes)
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (as *AcknowledgementState) UnmarshalJSON(data []byte) error {
var i uint8
if err := json.Unmarshal(data, &i); err != nil {
return err
}
a := AcknowledgementState(i)
if _, ok := acknowledgementStates[a]; !ok {
return BadAcknowledgementState{data}
}
*as = a
return nil
}
// Value implements the driver.Valuer interface.
func (as AcknowledgementState) Value() (driver.Value, error) {
if v, ok := acknowledgementStates[as]; ok {
return v, nil
} else {
return nil, BadAcknowledgementState{as}
}
}
// BadAcknowledgementState complains about a syntactically, but not semantically valid AcknowledgementState.
type BadAcknowledgementState struct {
State interface{}
}
// Error implements the error interface.
func (bas BadAcknowledgementState) Error() string {
return fmt.Sprintf("bad acknowledgement state: %#v", bas.State)
}
// acknowledgementStates maps all valid AcknowledgementState values to their SQL representation.
var acknowledgementStates = map[AcknowledgementState]string{
0: "n",
1: "y",
2: "sticky",
}
// Assert interface compliance.
var (
_ error = BadAcknowledgementState{}
_ encoding.TextUnmarshaler = (*AcknowledgementState)(nil)
_ json.Unmarshaler = (*AcknowledgementState)(nil)
_ driver.Valuer = AcknowledgementState(0)
)

136
pkg/types/binary.go Normal file
View file

@ -0,0 +1,136 @@
package types
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/icinga/icingadb/pkg/contracts"
)
// Binary nullable byte string. Hex as JSON.
type Binary []byte
// nullBinary for validating whether a Binary is valid.
var nullBinary Binary
// Equal returns whether the binaries are the same length and
// contain the same bytes.
func (binary Binary) Equal(equaler contracts.Equaler) bool {
b, ok := equaler.(Binary)
if !ok {
panic("bad Binary type assertion")
}
return bytes.Equal(binary, b)
}
// Valid returns whether the Binary is valid.
func (binary Binary) Valid() bool {
return !bytes.Equal(binary, nullBinary)
}
// String returns the hex string representation form of the Binary.
func (binary Binary) String() string {
return hex.EncodeToString(binary)
}
// MarshalText implements a custom marhsal function to encode
// the Binary as hex. MarshalText implements the
// enconding.TextMarshaler interface.
func (binary Binary) MarshalText() ([]byte, error) {
return []byte(binary.String()), nil
}
// UnmarshalText implements a custom unmarshal function to decode
// hex into a Binary. UnmarshalText implements the
// encoding.TextUnmarshaler interface.
func (binary *Binary) UnmarshalText(text []byte) error {
b := make([]byte, hex.DecodedLen(len(text)))
_, err := hex.Decode(b, text)
if err != nil {
return err
}
*binary = b
return nil
}
// MarshalJSON implements a custom marshal function to encode the Binary
// as a hex string. MarshalJSON implements the json.Marshaler interface.
// Supports JSON null.
func (binary Binary) MarshalJSON() ([]byte, error) {
if !binary.Valid() {
return nil, nil
}
return json.Marshal(binary.String())
}
// UnmarshalJSON implements a custom unmarshal function to decode
// a JSON hex string into a Binary. UnmarshalJSON implements the
// json.Unmarshaler interface. Supports JSON null.
func (binary *Binary) UnmarshalJSON(data []byte) error {
if string(data) == "null" || len(data) == 0 {
return nil
}
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
b, err := hex.DecodeString(s)
if err != nil {
return err
}
*binary = b
return nil
}
// Scan implements the sql.Scanner interface.
// Supports SQL NULL.
func (binary *Binary) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case []byte:
if len(src) == 0 {
return nil
}
b := make([]byte, len(src))
copy(b, src)
*binary = b
default:
return fmt.Errorf("Unable to scan type %T into Binary", src)
}
return nil
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (binary Binary) Value() (driver.Value, error) {
if !binary.Valid() {
return nil, nil
}
return []byte(binary), nil
}
// Assert interface compliance.
var (
_ contracts.ID = (*Binary)(nil)
_ encoding.TextMarshaler = (*Binary)(nil)
_ encoding.TextUnmarshaler = (*Binary)(nil)
_ json.Marshaler = (*Binary)(nil)
_ json.Unmarshaler = (*Binary)(nil)
_ sql.Scanner = (*Binary)(nil)
_ driver.Valuer = (*Binary)(nil)
)

116
pkg/types/bool.go Normal file
View file

@ -0,0 +1,116 @@
package types
import (
"database/sql"
"database/sql/driver"
"encoding"
"encoding/json"
"errors"
"strconv"
)
var (
Yes = Bool{
Bool: true,
Valid: true,
}
No = Bool{
Bool: false,
Valid: true,
}
)
var (
enum = map[bool]string{
true: "y",
false: "n",
}
)
// Bool represents a bool for ENUM ('y', 'n'), which can be NULL.
type Bool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// MarshalJSON implements the json.Marshaler interface.
func (b Bool) MarshalJSON() ([]byte, error) {
if !b.Valid {
return nil, nil
}
return json.Marshal(b.Bool)
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (b *Bool) UnmarshalText(text []byte) error {
parsed, err := strconv.ParseUint(string(text), 10, 64)
if err != nil {
return err
}
*b = Bool{parsed != 0, true}
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (b *Bool) UnmarshalJSON(data []byte) error {
if string(data) == "null" || len(data) == 0 {
return nil
}
if err := json.Unmarshal(data, &b.Bool); err != nil {
return err
}
b.Valid = true
return nil
}
// Scan implements the sql.Scanner interface.
// Supports SQL NULL.
func (b *Bool) Scan(src interface{}) error {
if src == nil {
b.Bool, b.Valid = false, false
return nil
}
v, ok := src.([]byte)
if !ok {
return errors.New("bad []byte type assertion")
}
switch string(v) {
case "y":
b.Bool = true
case "n":
b.Bool = false
default:
return errors.New("bad bool")
}
b.Valid = true
return nil
}
// Value implements the driver.Valuer interface.
// Supports SQL NULL.
func (b Bool) Value() (driver.Value, error) {
if !b.Valid {
return nil, nil
}
return enum[b.Bool], nil
}
// Assert interface compliance.
var (
_ json.Marshaler = (*Bool)(nil)
_ encoding.TextUnmarshaler = (*Bool)(nil)
_ json.Unmarshaler = (*Bool)(nil)
_ sql.Scanner = (*Bool)(nil)
_ driver.Valuer = (*Bool)(nil)
)

84
pkg/types/comment_type.go Normal file
View file

@ -0,0 +1,84 @@
package types
import (
"database/sql/driver"
"encoding"
"encoding/json"
"fmt"
"strconv"
)
// CommentType specifies a comment's origin's kind.
type CommentType uint8
// UnmarshalJSON implements the json.Unmarshaler interface.
func (ct *CommentType) UnmarshalJSON(bytes []byte) error {
var i uint8
if err := json.Unmarshal(bytes, &i); err != nil {
return err
}
c := CommentType(i)
if _, ok := commentTypes[c]; !ok {
return BadCommentType{bytes}
}
*ct = c
return nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (ct *CommentType) UnmarshalText(bytes []byte) error {
text := string(bytes)
i, err := strconv.ParseUint(text, 10, 64)
if err != nil {
return err
}
c := CommentType(i)
if uint64(c) != i {
// Truncated due to above cast, obviously too high
return BadCommentType{text}
}
if _, ok := commentTypes[c]; !ok {
return BadCommentType{text}
}
*ct = c
return nil
}
// Value implements the driver.Valuer interface.
func (ct CommentType) Value() (driver.Value, error) {
if v, ok := commentTypes[ct]; ok {
return v, nil
} else {
return nil, BadCommentType{ct}
}
}
// BadCommentType complains about a syntactically, but not semantically valid CommentType.
type BadCommentType struct {
Type interface{}
}
// Error implements the error interface.
func (bct BadCommentType) Error() string {
return fmt.Sprintf("bad comment type: %#v", bct.Type)
}
// commentTypes maps all valid CommentType values to their SQL representation.
var commentTypes = map[CommentType]string{
1: "comment",
4: "ack",
}
// Assert interface compliance.
var (
_ error = BadCommentType{}
_ json.Unmarshaler = (*CommentType)(nil)
_ encoding.TextUnmarshaler = (*CommentType)(nil)
_ driver.Valuer = CommentType(0)
)

66
pkg/types/float.go Normal file
View file

@ -0,0 +1,66 @@
package types
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding"
"encoding/json"
"strconv"
)
// Float adds JSON support to sql.NullFloat64.
type Float struct {
sql.NullFloat64
}
// MarshalJSON implements the json.Marshaler interface.
// Supports JSON null.
func (f Float) MarshalJSON() ([]byte, error) {
var v interface{}
if f.Valid {
v = f.Float64
}
return json.Marshal(v)
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (f *Float) UnmarshalText(text []byte) error {
parsed, err := strconv.ParseFloat(string(text), 64)
if err != nil {
return err
}
*f = Float{sql.NullFloat64{
Float64: parsed,
Valid: true,
}}
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// Supports JSON null.
func (f *Float) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if bytes.HasPrefix(data, []byte{'n'}) {
return nil
}
err := json.Unmarshal(data, &f.Float64)
if err == nil {
f.Valid = true
}
return err
}
// Assert interface compliance.
var (
_ json.Marshaler = Float{}
_ encoding.TextUnmarshaler = (*Float)(nil)
_ json.Unmarshaler = (*Float)(nil)
_ driver.Valuer = Float{}
_ sql.Scanner = (*Float)(nil)
)

66
pkg/types/int.go Normal file
View file

@ -0,0 +1,66 @@
package types
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding"
"encoding/json"
"strconv"
)
// Int adds JSON support to sql.NullInt64.
type Int struct {
sql.NullInt64
}
// MarshalJSON implements the json.Marshaler interface.
// Supports JSON null.
func (i Int) MarshalJSON() ([]byte, error) {
var v interface{}
if i.Valid {
v = i.Int64
}
return json.Marshal(v)
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (i *Int) UnmarshalText(text []byte) error {
parsed, err := strconv.ParseInt(string(text), 10, 64)
if err != nil {
return err
}
*i = Int{sql.NullInt64{
Int64: parsed,
Valid: true,
}}
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// Supports JSON null.
func (i *Int) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if bytes.HasPrefix(data, []byte{'n'}) {
return nil
}
err := json.Unmarshal(data, &i.Int64)
if err == nil {
i.Valid = true
}
return err
}
// Assert interface compliance.
var (
_ json.Marshaler = Int{}
_ json.Unmarshaler = (*Int)(nil)
_ encoding.TextUnmarshaler = (*Int)(nil)
_ driver.Valuer = Int{}
_ sql.Scanner = (*Int)(nil)
)

View file

@ -0,0 +1,83 @@
package types
import (
"database/sql/driver"
"encoding"
"encoding/json"
"fmt"
)
// NotificationStates specifies the set of states a notification may be sent for.
type NotificationStates uint8
// UnmarshalJSON implements the json.Unmarshaler interface.
func (nst *NotificationStates) UnmarshalJSON(bytes []byte) error {
var states []string
if err := json.Unmarshal(bytes, &states); err != nil {
return err
}
var n NotificationStates
for _, state := range states {
if v, ok := notificationStateNames[state]; ok {
n |= v
} else {
return BadNotificationStates{states}
}
}
*nst = n
return nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (nst *NotificationStates) UnmarshalText(text []byte) error {
return nst.UnmarshalJSON(text)
}
// Value implements the driver.Valuer interface.
func (nst NotificationStates) Value() (driver.Value, error) {
if nst&^allNotificationStates == 0 {
return int64(nst), nil
} else {
return nil, BadNotificationStates{nst}
}
}
// BadNotificationStates complains about syntactically, but not semantically valid NotificationStates.
type BadNotificationStates struct {
States interface{}
}
// Error implements the error interface.
func (bns BadNotificationStates) Error() string {
return fmt.Sprintf("bad notification states: %#v", bns.States)
}
// notificationStateNames maps all valid NotificationStates values to their SQL representation.
var notificationStateNames = map[string]NotificationStates{
"OK": 1,
"Warning": 2,
"Critical": 4,
"Unknown": 8,
"Up": 16,
"Down": 32,
}
// allNotificationStates is the largest valid NotificationStates value.
var allNotificationStates = func() NotificationStates {
var nt NotificationStates
for _, v := range notificationStateNames {
nt |= v
}
return nt
}()
// Assert interface compliance.
var (
_ error = BadNotificationStates{}
_ json.Unmarshaler = (*NotificationStates)(nil)
_ encoding.TextUnmarshaler = (*NotificationStates)(nil)
_ driver.Valuer = NotificationStates(0)
)

View file

@ -0,0 +1,73 @@
package types
import (
"database/sql/driver"
"encoding"
"fmt"
"strconv"
)
// NotificationType specifies the reason of a sent notification.
type NotificationType uint16
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (nt *NotificationType) UnmarshalText(bytes []byte) error {
text := string(bytes)
i, err := strconv.ParseUint(text, 10, 64)
if err != nil {
return err
}
n := NotificationType(i)
if uint64(n) != i {
// Truncated due to above cast, obviously too high
return BadNotificationType{text}
}
if _, ok := notificationTypes[n]; !ok {
return BadNotificationType{text}
}
*nt = n
return nil
}
// Value implements the driver.Valuer interface.
func (nt NotificationType) Value() (driver.Value, error) {
if v, ok := notificationTypes[nt]; ok {
return v, nil
} else {
return nil, BadNotificationType{nt}
}
}
// BadNotificationType complains about a syntactically, but not semantically valid NotificationType.
type BadNotificationType struct {
Type interface{}
}
// Error implements the error interface.
func (bnt BadNotificationType) Error() string {
return fmt.Sprintf("bad notification type: %#v", bnt.Type)
}
// notificationTypes maps all valid NotificationType values to their SQL representation.
var notificationTypes = map[NotificationType]string{
1: "downtime_start",
2: "downtime_end",
4: "downtime_removed",
8: "custom",
16: "acknowledgement",
32: "problem",
64: "recovery",
128: "flapping_start",
256: "flapping_end",
}
// Assert interface compliance.
var (
_ error = BadNotificationType{}
_ encoding.TextUnmarshaler = (*NotificationType)(nil)
_ driver.Valuer = NotificationType(0)
)

View file

@ -0,0 +1,86 @@
package types
import (
"database/sql/driver"
"encoding"
"encoding/json"
"fmt"
)
// NotificationTypes specifies the set of reasons a notification may be sent for.
type NotificationTypes uint16
// UnmarshalJSON implements the json.Unmarshaler interface.
func (nt *NotificationTypes) UnmarshalJSON(bytes []byte) error {
var types []string
if err := json.Unmarshal(bytes, &types); err != nil {
return err
}
var n NotificationTypes
for _, typ := range types {
if v, ok := notificationTypeNames[typ]; ok {
n |= v
} else {
return BadNotificationTypes{types}
}
}
*nt = n
return nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (nt *NotificationTypes) UnmarshalText(text []byte) error {
return nt.UnmarshalJSON(text)
}
// Value implements the driver.Valuer interface.
func (nt NotificationTypes) Value() (driver.Value, error) {
if nt&^allNotificationTypes == 0 {
return int64(nt), nil
} else {
return nil, BadNotificationTypes{nt}
}
}
// BadNotificationTypes complains about syntactically, but not semantically valid NotificationTypes.
type BadNotificationTypes struct {
Types interface{}
}
// Error implements the error interface.
func (bnt BadNotificationTypes) Error() string {
return fmt.Sprintf("bad notification types: %#v", bnt.Types)
}
// notificationTypeNames maps all valid NotificationTypes values to their SQL representation.
var notificationTypeNames = map[string]NotificationTypes{
"DowntimeStart": 1,
"DowntimeEnd": 2,
"DowntimeRemoved": 4,
"Custom": 8,
"Acknowledgement": 16,
"Problem": 32,
"Recovery": 64,
"FlappingStart": 128,
"FlappingEnd": 256,
}
// allNotificationTypes is the largest valid NotificationTypes value.
var allNotificationTypes = func() NotificationTypes {
var nt NotificationTypes
for _, v := range notificationTypeNames {
nt |= v
}
return nt
}()
// Assert interface compliance.
var (
_ error = BadNotificationTypes{}
_ json.Unmarshaler = (*NotificationTypes)(nil)
_ encoding.TextUnmarshaler = (*NotificationTypes)(nil)
_ driver.Valuer = NotificationTypes(0)
)

65
pkg/types/state_type.go Normal file
View file

@ -0,0 +1,65 @@
package types
import (
"database/sql/driver"
"encoding"
"encoding/json"
"fmt"
)
// StateType specifies a state's hardness.
type StateType uint8
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (st *StateType) UnmarshalText(bytes []byte) error {
return st.UnmarshalJSON(bytes)
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (st *StateType) UnmarshalJSON(data []byte) error {
var i uint8
if err := json.Unmarshal(data, &i); err != nil {
return err
}
s := StateType(i)
if _, ok := stateTypes[s]; !ok {
return BadStateType{data}
}
*st = s
return nil
}
// Value implements the driver.Valuer interface.
func (st StateType) Value() (driver.Value, error) {
if v, ok := stateTypes[st]; ok {
return v, nil
} else {
return nil, BadStateType{st}
}
}
// BadStateType complains about a syntactically, but not semantically valid StateType.
type BadStateType struct {
Type interface{}
}
// Error implements the error interface.
func (bst BadStateType) Error() string {
return fmt.Sprintf("bad state type: %#v", bst.Type)
}
// stateTypes maps all valid StateType values to their SQL representation.
var stateTypes = map[StateType]string{
0: "soft",
1: "hard",
}
// Assert interface compliance.
var (
_ error = BadStateType{}
_ encoding.TextUnmarshaler = (*StateType)(nil)
_ json.Unmarshaler = (*StateType)(nil)
_ driver.Valuer = StateType(0)
)

60
pkg/types/string.go Normal file
View file

@ -0,0 +1,60 @@
package types
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding"
"encoding/json"
)
// String adds JSON support to sql.NullString.
type String struct {
sql.NullString
}
// MarshalJSON implements the json.Marshaler interface.
// Supports JSON null.
func (s String) MarshalJSON() ([]byte, error) {
var v interface{}
if s.Valid {
v = s.String
}
return json.Marshal(v)
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (s *String) UnmarshalText(text []byte) error {
*s = String{sql.NullString{
String: string(text),
Valid: true,
}}
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// Supports JSON null.
func (s *String) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if bytes.HasPrefix(data, []byte{'n'}) {
return nil
}
err := json.Unmarshal(data, &s.String)
if err == nil {
s.Valid = true
}
return err
}
// Assert interface compliance.
var (
_ json.Marshaler = String{}
_ encoding.TextUnmarshaler = (*String)(nil)
_ json.Unmarshaler = (*String)(nil)
_ driver.Valuer = String{}
_ sql.Scanner = (*String)(nil)
)

94
pkg/types/unix_milli.go Normal file
View file

@ -0,0 +1,94 @@
package types
import (
"database/sql"
"database/sql/driver"
"encoding"
"encoding/json"
"errors"
"github.com/icinga/icingadb/pkg/utils"
"strconv"
"time"
)
// UnixMilli is a nullable millisecond UNIX timestamp in databases and JSON.
type UnixMilli time.Time
// Time returns the time.Time conversion of UnixMilli.
func (t UnixMilli) Time() time.Time {
return time.Time(t)
}
// MarshalJSON implements the json.Marshaler interface.
// Marshals to milliseconds. Supports JSON null.
func (t UnixMilli) MarshalJSON() ([]byte, error) {
if time.Time(t).IsZero() {
return nil, nil
}
return []byte(strconv.FormatInt(utils.UnixMilli(time.Time(t)), 10)), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (t *UnixMilli) UnmarshalText(text []byte) error {
parsed, err := strconv.ParseFloat(string(text), 64)
if err != nil {
return err
}
*t = UnixMilli(utils.FromUnixMilli(int64(parsed)))
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// Unmarshals from milliseconds. Supports JSON null.
func (t *UnixMilli) UnmarshalJSON(data []byte) error {
if string(data) == "null" || len(data) == 0 {
return nil
}
ms, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
tt := utils.FromUnixMilli(int64(ms))
*t = UnixMilli(tt)
return nil
}
// Scan implements the sql.Scanner interface.
// Scans from milliseconds. Supports SQL NULL.
func (t *UnixMilli) Scan(src interface{}) error {
if src == nil {
return nil
}
v, ok := src.(int64)
if !ok {
return errors.New("bad int64 type assertion")
}
tt := utils.FromUnixMilli(v)
*t = UnixMilli(tt)
return nil
}
// Value implements the driver.Valuer interface.
// Returns milliseconds. Supports SQL NULL.
func (t UnixMilli) Value() (driver.Value, error) {
if t.Time().IsZero() {
return nil, nil
}
return utils.UnixMilli(t.Time()), nil
}
// Assert interface compliance.
var (
_ json.Marshaler = (*UnixMilli)(nil)
_ encoding.TextUnmarshaler = (*UnixMilli)(nil)
_ json.Unmarshaler = (*UnixMilli)(nil)
_ sql.Scanner = (*UnixMilli)(nil)
_ driver.Valuer = (*UnixMilli)(nil)
)

24
pkg/types/uuid.go Normal file
View file

@ -0,0 +1,24 @@
package types
import (
"database/sql/driver"
"encoding"
"github.com/google/uuid"
)
// UUID is like uuid.UUID, but marshals itself binarily (not like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) in SQL context.
type UUID struct {
uuid.UUID
}
// Value implements driver.Valuer.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.UUID[:], nil
}
// Assert interface compliance.
var (
_ encoding.TextUnmarshaler = (*UUID)(nil)
_ driver.Valuer = UUID{}
_ driver.Valuer = (*UUID)(nil)
)

221
pkg/utils/utils.go Normal file
View file

@ -0,0 +1,221 @@
package utils
import (
"context"
"crypto/sha1"
"errors"
"fmt"
"github.com/go-sql-driver/mysql"
"github.com/google/uuid"
"github.com/icinga/icingadb/pkg/contracts"
"go.uber.org/zap"
"golang.org/x/exp/utf8string"
"io/ioutil"
"math"
"math/rand"
"os"
"strings"
"sync"
"time"
"unicode"
)
// FromUnixMilli creates and returns a time.Time value
// from the given milliseconds since the Unix epoch ms.
func FromUnixMilli(ms int64) time.Time {
sec, dec := math.Modf(float64(ms) / 1e3)
return time.Unix(int64(sec), int64(dec*(1e9)))
}
// UnixMilli returns milliseconds since the Unix epoch of time t.
func UnixMilli(t time.Time) int64 {
return t.UnixNano() / 1e6
}
// Name returns the declared name of type t.
// Name is used in combination with Key
// to automatically guess an entity's
// database table and Redis key.
func Name(t interface{}) string {
s := strings.TrimLeft(fmt.Sprintf("%T", t), "*")
return s[strings.LastIndex(s, ".")+1:]
}
// TableName returns the table of t.
func TableName(t interface{}) string {
if tn, ok := t.(contracts.TableNamer); ok {
return tn.TableName()
} else {
return Key(Name(t), '_')
}
}
// Key returns the name with all Unicode letters mapped to lower case letters,
// with an additional separator in front of each original upper case letter.
func Key(name string, sep byte) string {
r := []rune(name)
b := strings.Builder{}
b.Grow(len(r) + 2) // nominal 2 bytes of extra space for inserted delimiters
b.WriteRune(unicode.ToLower(r[0]))
for _, r := range r[1:] {
if unicode.IsUpper(r) {
b.WriteByte(sep)
}
b.WriteRune(unicode.ToLower(r))
}
return b.String()
}
func Timed(start time.Time, callback func(elapsed time.Duration)) {
callback(time.Since(start))
}
func BatchSliceOfStrings(ctx context.Context, keys []string, count int) <-chan []string {
batches := make(chan []string)
go func() {
defer close(batches)
for i := 0; i < len(keys); i += count {
end := i + count
if end > len(keys) {
end = len(keys)
}
select {
case batches <- keys[i:end]:
case <-ctx.Done():
return
}
}
}()
return batches
}
func BatchSliceOfInterfaces(ctx context.Context, keys []interface{}, count int) <-chan []interface{} {
batches := make(chan []interface{})
go func() {
defer close(batches)
for i := 0; i < len(keys); i += count {
end := i + count
if end > len(keys) {
end = len(keys)
}
select {
case batches <- keys[i:end]:
case <-ctx.Done():
return
}
}
}()
return batches
}
func IsContextCanceled(err error) bool {
return errors.Is(err, context.Canceled)
}
func CreateOrRead(name string, callback func() []byte) ([]byte, error) {
info, err := os.Stat(name)
if os.IsNotExist(err) {
b := callback()
if err := ioutil.WriteFile(name, b, 0660); err != nil {
defer os.Remove(name)
return nil, err
}
return b, nil
}
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fmt.Errorf("'%s' is a directory", name)
}
b, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
return b, nil
}
func Uuid() []byte {
u := uuid.New()
return u[:]
}
func Checksum(data interface{}) []byte {
var chksm [sha1.Size]byte
switch data := data.(type) {
case string:
chksm = sha1.Sum([]byte(data))
case []byte:
chksm = sha1.Sum(data)
default:
panic(fmt.Sprintf("Unable to create checksum for type %T", data))
}
return chksm[:]
}
func Fatal(err error) {
// TODO(el): Print stacktrace via some recover() magic?
panic(err)
}
// IsDeadlock returns whether the given error signals serialization failure.
func IsDeadlock(err error) bool {
switch e := err.(type) {
case *mysql.MySQLError:
switch e.Number {
case 1205, 1213:
return true
}
}
return false
}
func RandomSleep(sugar *zap.SugaredLogger) {
once := sync.Once{}
once.Do(func() {
rand.Seed(time.Now().UnixNano())
})
n := rand.Intn(100)
d := time.Duration(n) * time.Millisecond
sugar.Info("Sleeping for ", d)
time.Sleep(d)
}
var ellipsis = utf8string.NewString("...")
// Ellipsize shortens s to <=limit runes and indicates shortening by "...".
func Ellipsize(s string, limit int) string {
utf8 := utf8string.NewString(s)
switch {
case utf8.RuneCount() <= limit:
return s
case utf8.RuneCount() <= ellipsis.RuneCount():
return ellipsis.String()
default:
return utf8.Slice(0, limit-ellipsis.RuneCount()) + ellipsis.String()
}
}

15
schema/1.0.0-rc2.sql Normal file
View file

@ -0,0 +1,15 @@
ALTER TABLE host_state DROP PRIMARY KEY;
ALTER TABLE host_state ADD COLUMN id binary(20) NOT NULL FIRST;
UPDATE host_state SET id = host_id;
ALTER TABLE host_state ADD PRIMARY KEY (id);
ALTER TABLE host_state ADD COLUMN properties_checksum binary(20) AFTER environment_id;
UPDATE host_state SET properties_checksum = 0;
ALTER TABLE host_state MODIFY COLUMN properties_checksum binary(20) NOT NULL;
ALTER TABLE service_state DROP PRIMARY KEY;
ALTER TABLE service_state ADD COLUMN id binary(20) NOT NULL FIRST;
UPDATE service_state SET id = service_id;
ALTER TABLE service_state ADD PRIMARY KEY (id);
ALTER TABLE service_state ADD COLUMN properties_checksum binary(20) AFTER environment_id;
UPDATE service_state SET properties_checksum = 0;
ALTER TABLE service_state MODIFY COLUMN properties_checksum binary(20) NOT NULL;