first version

This commit is contained in:
2026-02-08 22:29:45 +01:00
commit d1e54774a7
15 changed files with 1355 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/build
/config.json

54
build.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
TARGET_CURRENT=true
usage() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " --all-target Build for all target available"
}
# Function to handle options and arguments
handle_options() {
while [ $# -gt 0 ]; do
case $1 in
--all-target)
TARGET_CURRENT=false
;;
*)
echo "Invalid option: $1" >&2
usage
exit 1
;;
esac
shift
done
}
# Main script execution
handle_options "$@"
if [ ! -d "./build" ]; then
mkdir ./build
fi
if [ "$TARGET_CURRENT" == "true" ]; then
GOOS=$(go env GOOS)
GOARCH=$(go env GOARCH)
echo "* Compiling for $GOOS/$GOARCH..."
CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH GORISCV64=rva22u64 GOAMD64=v3 GOARM64=v8.2 go build -o build/dockerupdater -a
exit 0
fi
platforms=("linux/amd64" "linux/arm64" "linux/riscv64" "linux/ppc64le" "windows/amd64")
for platform in "${platforms[@]}"; do
echo "* Compiling for $platform..."
platform_split=(${platform//\// })
CGO_ENABLED=0 GOOS=${platform_split[0]} GOARCH=${platform_split[1]} GORISCV64=rva22u64 GOAMD64=v3 GOARM64=v8.2 go build -o build/dockerupdater${platform_split[0]}_${platform_split[1]} -a
done

125
config/config.go Normal file
View File

@@ -0,0 +1,125 @@
package config
import (
"encoding/json"
"fmt"
"log/slog"
"os"
"github.com/robfig/cron/v3"
)
type (
Configuration struct {
GlobalContainerConfiguration ContainerConfiguration `json:"containers"`
DaemonConfiguration DaemonConfiguration `json:"daemon"`
IgnoreRunningUnspecifiedContainers bool `json:"ignore_running_unspecified_containers"`
StrictValidation bool `json:"strict_validation"`
}
DaemonConfiguration struct {
PullInterval uint `json:"pull_interval"`
}
ContainerConfiguration struct {
Enabled bool `json:"enabled"`
Schedule string `json:"schedule"`
}
)
func Load(configPath string) (Configuration, error) {
file, err := os.OpenFile(configPath, os.O_RDONLY, 0)
if err != nil {
slog.Warn("no configuration found, loading the default one", "thread", "main", "path", configPath)
return Default(), nil
}
defer file.Close()
var c map[string]any
d := json.NewDecoder(file)
if err := d.Decode(&c); err != nil {
return Configuration{}, fmt.Errorf("unable to parse the configuration file: %s", err)
}
return parseConfiguration(c)
}
func Default() Configuration {
return Configuration{
GlobalContainerConfiguration: ContainerConfiguration{
Enabled: false,
Schedule: "* * * * *",
},
DaemonConfiguration: DaemonConfiguration{
PullInterval: 2,
},
IgnoreRunningUnspecifiedContainers: true,
StrictValidation: false,
}
}
func parseConfiguration(currentConfig map[string]any) (Configuration, error) {
c := Default()
if defaultContainerConfiguration, ok := currentConfig["containers"]; ok {
if defaultContainerData, ok := defaultContainerConfiguration.(map[string]any); ok {
if schedule, ok := defaultContainerData["schedule"]; ok {
if data, ok := schedule.(string); ok {
c.GlobalContainerConfiguration.Schedule = data
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid containers.schedule value, expected a string")
}
}
if enabled, ok := defaultContainerData["enabled"]; ok {
if data, ok := enabled.(bool); ok {
c.GlobalContainerConfiguration.Enabled = data
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid containers.enabled value, expected a bool")
}
}
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid containers section, expected an object")
}
}
if daemonConfiguration, ok := currentConfig["daemon"]; ok {
if daemonData, ok := daemonConfiguration.(map[string]any); ok {
if pullInterval, ok := daemonData["pull_interval"]; ok {
if data, ok := pullInterval.(uint); ok {
c.DaemonConfiguration.PullInterval = data
}
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid daemon.pull_interval value, expected an unsigned integer")
}
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid daemon section, expected an object")
}
}
if ignoreRunningUnspecifiedContainers, ok := currentConfig["ignore_running_unspecified_containers"]; ok {
if data, ok := ignoreRunningUnspecifiedContainers.(bool); ok {
c.IgnoreRunningUnspecifiedContainers = data
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid ignore_running_unspecified_containers value, expected a bool")
}
}
if strictValidation, ok := currentConfig["strict_validation"]; ok {
if data, ok := strictValidation.(bool); ok {
c.StrictValidation = data
} else {
return Configuration{}, fmt.Errorf("configuration parsing: invalid strict_validation value, expected a bool")
}
}
return c, nil
}
func (c Configuration) Validate() error {
_, err := cron.ParseStandard(c.GlobalContainerConfiguration.Schedule)
if err != nil {
return fmt.Errorf("failed to validate: invalid schedule: %s", err)
}
return nil
}

65
constant/constant.go Normal file
View File

@@ -0,0 +1,65 @@
package constant
import (
"fmt"
"strconv"
)
type (
Version struct {
patch int
minor int
major int
}
)
var (
patch string = "0"
minor string = "0"
major string = "0"
version Version
)
func init() {
patchInt, err := strconv.Atoi(patch)
if err != nil {
panic(fmt.Errorf("failed to parse version number, this is a compilation error, try recompile the program"))
}
minorInt, err := strconv.Atoi(minor)
if err != nil {
panic(fmt.Errorf("failed to parse version number, this is a compilation error, try recompile the program"))
}
majorInt, err := strconv.Atoi(major)
if err != nil {
panic(fmt.Errorf("failed to parse version number, this is a compilation error, try recompile the program"))
}
version = Version{
patch: patchInt,
minor: minorInt,
major: majorInt,
}
}
func ProgramVersion() Version {
return version
}
func (v Version) Patch() int {
return v.patch
}
func (v Version) Minor() int {
return v.minor
}
func (v Version) Major() int {
return v.major
}
func (v Version) String() string {
return fmt.Sprintf("%d.%d.%d", v.major, v.minor, v.patch)
}

View File

@@ -0,0 +1,16 @@
package contextutil
import "context"
func WithThreadName(ctx context.Context, threadName string) context.Context {
return context.WithValue(ctx, "context_thread", threadName)
}
func ThreadName(ctx context.Context) string {
value := ctx.Value("context_thread")
if value, ok := value.(string); ok {
return value
}
return "unknown"
}

5
docker-compose.yml Normal file
View File

@@ -0,0 +1,5 @@
services:
runner:
build: ./
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"

490
docker/helper/helper.go Normal file
View File

@@ -0,0 +1,490 @@
package helper
import (
"context"
"crypto"
"docker-updater/config"
"docker-updater/constant"
"docker-updater/contextutil"
"encoding/hex"
"fmt"
"io"
"log/slog"
"slices"
"strings"
"sync"
"time"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/client"
"github.com/robfig/cron/v3"
)
type (
EventType int
EventContainerFunc func(ev ContainerEvent)
DockerHelper struct {
c *client.Client
w watcher
config config.Configuration
}
watcher struct {
running bool
ctx context.Context
cancelFunc context.CancelFunc
gracefulStop chan struct{}
cache cache
containersEventCallback EventContainerFunc
}
cache struct {
mu sync.Mutex
containers map[string]cacheEntry
}
cacheEntry struct {
c Container
}
Container struct {
id string
name string
image Image
status container.ContainerState
labels Labels
appliedConfiguration config.ContainerConfiguration
}
Labels struct {
labels map[string]string
hash string
}
Image struct {
id string
name string
hash string
}
ContainerEvent struct {
ctx context.Context
EventType EventType
Data Container
}
)
const (
NewContainer EventType = iota
DeletedContainer
UpdatedContainer
)
const (
ua string = "com.thelilfrog.docker-updater/%s"
)
func Open(configuration config.Configuration) (*DockerHelper, error) {
cli, err := client.New(client.FromEnv, client.WithUserAgent(fmt.Sprintf(ua, constant.ProgramVersion())))
if err != nil {
return nil, fmt.Errorf("failed to connect to docker daemon: %s", err)
}
dh := &DockerHelper{
w: watcher{
cache: cache{
containers: make(map[string]cacheEntry),
},
running: false,
gracefulStop: make(chan struct{}),
containersEventCallback: func(ev ContainerEvent) {},
},
config: configuration,
c: cli,
}
return dh, nil
}
func (dh *DockerHelper) Close() error {
if dh.w.running {
dh.w.gracefulStop <- struct{}{}
<-dh.w.ctx.Done()
}
return dh.c.Close()
}
func (dh *DockerHelper) StartWatcher(appCtx context.Context, interval uint) error {
if dh.w.running {
return fmt.Errorf("cannot start the watcher: already running")
}
ctx, cancelFunc := context.WithCancel(context.Background())
dh.w.ctx = contextutil.WithThreadName(ctx, "watcher")
dh.w.cancelFunc = cancelFunc
// watch a first time
dh.w.Watch(appCtx, dh)
go func() {
for {
select {
case <-dh.w.ctx.Done():
{
slog.Error("context: watcher closed", "thread", "watcher", "err", dh.w.ctx.Err())
dh.w.running = false
return
}
case <-dh.w.gracefulStop:
{
slog.Info("gracefully stopping the watcher", "thread", "watcher")
dh.w.cancelFunc()
return
}
case <-time.After(time.Duration(interval) * time.Second):
{
dh.w.Watch(dh.w.ctx, dh)
}
}
}
}()
dh.w.running = true
return nil
}
func (w *watcher) Watch(ctx context.Context, dh *DockerHelper) {
w.cache.mu.Lock()
defer w.cache.mu.Unlock()
runningContainers, err := dh.RunningContainers(ctx)
if err != nil {
slog.Error("cannot fetch the list of running containers", "thread", contextutil.ThreadName(ctx), "err", err)
return
}
for _, runningContainer := range runningContainers {
if foundContainer, ok := dh.w.cache.containers[runningContainer.name]; ok {
if runningContainer.labels.hash != foundContainer.c.labels.hash {
foundContainer.c = runningContainer
dh.w.cache.containers[runningContainer.name] = foundContainer
dh.w.containersEventCallback(ContainerEvent{
ctx: ctx,
EventType: UpdatedContainer,
Data: runningContainer,
})
}
continue
}
dh.w.cache.containers[runningContainer.name] = cacheEntry{
c: runningContainer,
}
dh.w.containersEventCallback(ContainerEvent{
ctx: ctx,
EventType: NewContainer,
Data: runningContainer,
})
}
notFound := make(map[string]Container)
for containerName, containerData := range w.cache.containers {
exists := slices.ContainsFunc(runningContainers, func(runningContainer Container) bool {
return runningContainer.name == containerName
})
if !exists {
notFound[containerName] = containerData.c
}
}
for containerName, containerData := range notFound {
delete(w.cache.containers, containerName)
w.containersEventCallback(ContainerEvent{
ctx: ctx,
EventType: DeletedContainer,
Data: containerData,
})
}
}
func (dh *DockerHelper) RunningContainers(ctx context.Context) ([]Container, error) {
containers, err := dh.c.ContainerList(context.Background(), client.ContainerListOptions{
All: false,
})
if err != nil {
return nil, fmt.Errorf("unable to get the list of running containers: %s", err)
}
var res []Container
for _, container := range containers.Items {
c, err := dh.parseContainer(ctx, container)
if err != nil {
slog.Warn("the container metadata contains errors, skipping this container")
continue
}
res = append(res, c)
}
return res, nil
}
func (dh *DockerHelper) RemoteImageMetadata(ctx context.Context, imageName string) (Image, error) {
ref, err := name.ParseReference(imageName)
if err != nil {
return Image{}, fmt.Errorf("failed to parse image reference: %s", err)
}
image, err := remote.Head(ref)
if err != nil {
return Image{}, fmt.Errorf("an error occured while getting the metadata of the image in the remote: %s", err)
}
return Image{
name: imageName,
hash: image.Digest.String(),
}, nil
}
func (dh *DockerHelper) Container(ctx context.Context, containerName string) (Container, error) {
containers, err := dh.c.ContainerList(context.Background(), client.ContainerListOptions{
All: true,
})
if err != nil {
return Container{}, fmt.Errorf("unable to get the list of containers: %s", err)
}
var res Container
for _, container := range containers.Items {
name := formatName(container.Names)
if name != containerName {
continue
}
res, err = dh.parseContainer(ctx, container)
if err != nil {
return Container{}, fmt.Errorf("failed to get the container: %s", err)
}
break
}
return res, nil
}
func (dh *DockerHelper) StopContainer(ctx context.Context, container Container) error {
if _, err := dh.c.ContainerStop(ctx, container.id, client.ContainerStopOptions{}); err != nil {
return fmt.Errorf("failed to stop the container: %s", err)
}
return nil
}
func (dh *DockerHelper) StartContainer(ctx context.Context, container Container) error {
if _, err := dh.c.ContainerStart(ctx, container.id, client.ContainerStartOptions{}); err != nil {
return fmt.Errorf("failed to start the container: %s", err)
}
return nil
}
func (dh *DockerHelper) PullImage(ctx context.Context, imageName string) error {
resp, err := dh.c.ImagePull(ctx, imageName, client.ImagePullOptions{})
if err != nil {
return fmt.Errorf("failed to pull the image: %s", err)
}
defer resp.Close()
var buf []byte
for {
_, err := resp.Read(buf)
if err == io.EOF {
break
}
}
return nil
}
func (dh *DockerHelper) ListSimilarContainers(ctx context.Context, containerName string) ([]Container, error) {
targetContainer, err := dh.Container(ctx, containerName)
if err != nil {
return nil, fmt.Errorf("unable to get the target container: %s", err)
}
containers, err := dh.RunningContainers(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get running containers: %s", err)
}
var res []Container
for _, container := range containers {
if container.image.name == targetContainer.name {
res = append(res, container)
}
}
return res, nil
}
func (c Container) ID() string {
return c.id
}
func (c Container) Name() string {
return c.name
}
func (c Container) Image() Image {
return c.image
}
func (c Container) Enabled() bool {
return c.appliedConfiguration.Enabled
}
func (c Container) Schedule() string {
return c.appliedConfiguration.Schedule
}
func (i Image) ID() string {
return i.id
}
func (i Image) Name() string {
return i.name
}
func (i Image) Hash() string {
return i.hash
}
func (dh *DockerHelper) SetContainersEventCallback(fn EventContainerFunc) {
dh.w.cache.mu.Lock()
defer dh.w.cache.mu.Unlock()
dh.w.containersEventCallback = fn
}
func (dh *DockerHelper) parseContainer(ctx context.Context, container container.Summary) (Container, error) {
name := formatName(container.Names)
config, err := dh.parseLocalConfiguration(container.Labels)
if err != nil {
slog.Warn("failed to get the local configuration from the labels",
"thread", contextutil.ThreadName(ctx),
"container", name,
"container_id", container.ID,
"image_name", container.Image,
"image_id", container.ImageID,
"labels", container.Labels)
return Container{}, err
}
imageMetadata, err := dh.c.ImageInspect(ctx, container.ImageID)
if err != nil {
slog.Warn("failed to get local image metadata",
"thread", contextutil.ThreadName(ctx),
"container", name,
"container_id", container.ID,
"image_name", container.Image,
"image_id", container.ImageID)
return Container{}, err
}
switch {
case len(imageMetadata.RepoDigests) == 0:
{
slog.Warn("no remote digest found, ignoring",
"thread", contextutil.ThreadName(ctx),
"container", name,
"container_id", container.ID,
"image_name", container.Image,
"image_id", container.ImageID)
return Container{}, err
}
case len(imageMetadata.RepoDigests) > 1:
{
slog.Warn("ambigous remote image digest",
"thread", contextutil.ThreadName(ctx),
"container", name,
"container_id", container.ID,
"image_name", container.Image,
"image_id", container.ImageID,
"repo_digests_count", len(imageMetadata.RepoDigests),
"repo_digests", imageMetadata.RepoDigests)
return Container{}, err
}
}
// get the first repo digest
hashes := strings.Split(imageMetadata.RepoDigests[0], "@")
if len(hashes) != 2 {
slog.Warn("failed to parse remote hash for this image",
"thread", contextutil.ThreadName(ctx),
"container", name,
"container_id", container.ID,
"image_name", container.Image,
"image_id", container.ImageID,
"repo_digest", imageMetadata.RepoDigests[0])
return Container{}, err
}
hash := hashes[1]
return Container{
id: container.ID,
name: name,
status: container.State,
labels: convertLabels(container.Labels),
appliedConfiguration: config,
image: Image{
id: container.ImageID,
name: container.Image,
hash: hash,
},
}, nil
}
func (ev ContainerEvent) Context() context.Context {
return ev.ctx
}
func formatName(names []string) string {
name := strings.Join(names, "-")
if after, ok := strings.CutPrefix(name, "/"); ok {
name = after
}
return name
}
func convertLabels(labels map[string]string) Labels {
var p string
for key, value := range labels {
p += key + ":" + value
}
md5 := crypto.MD5.New()
hash := md5.Sum([]byte(p))
return Labels{
labels: labels,
hash: hex.EncodeToString(hash),
}
}
func (dh *DockerHelper) parseLocalConfiguration(labels map[string]string) (config.ContainerConfiguration, error) {
c := dh.config.GlobalContainerConfiguration
if schedule, ok := labels["com.thelilfrog.image.update.schedule"]; ok {
_, err := cron.ParseStandard(schedule)
if err != nil {
return config.ContainerConfiguration{}, fmt.Errorf("invalid schedule: %s", err)
}
c.Schedule = schedule
}
return c, nil
}

15
dockerfile Normal file
View File

@@ -0,0 +1,15 @@
FROM golang:1.25.7-alpine3.22 AS build
COPY . /src
RUN cd /src \
&& go build -o dockerupdater \
&& mkdir -p ./fs/var/opt/dockerupdater \
&& mkdir -p ./fs/opt/dockerupdater \
&& cp dockerupdater ./fs/opt/dockerupdater/dockerupdater
FROM scratch AS prod
COPY --from=build --chmod=755 /src/fs /
ENTRYPOINT [ "/opt/dockerupdater/dockerupdater" ]

40
go.mod Normal file
View File

@@ -0,0 +1,40 @@
module docker-updater
go 1.25
require (
github.com/google/go-containerregistry v0.20.7
github.com/moby/moby/api v1.53.0
github.com/moby/moby/client v0.2.2
github.com/robfig/cron/v3 v3.0.1
)
require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v29.0.3+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/vbatts/tar-split v0.12.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
)

89
go.sum Normal file
View File

@@ -0,0 +1,89 @@
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E=
github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I=
github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=

59
main.go Normal file
View File

@@ -0,0 +1,59 @@
//go:build !debug
package main
import (
"context"
"docker-updater/constant"
"log/slog"
"os"
"runtime"
"runtime/debug"
"strings"
)
func main() {
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
slog.SetDefault(logger)
defer func() {
if r := recover(); r != nil {
if info, ok := debug.ReadBuildInfo(); ok {
var settings []string
for _, setting := range info.Settings {
settings = append(settings, setting.Key+":"+setting.Value)
}
slog.Error("a panic occured",
"err", r,
"os", runtime.GOOS,
"arch", runtime.GOARCH,
"num_cpu", runtime.NumCPU(),
"num_cgo_call", runtime.NumCgoCall(),
"num_goroutine", runtime.NumGoroutine(),
"go_version", info.GoVersion,
"build_settings", strings.Join(settings, ", "),
)
} else {
slog.Error("a panic occured, no build info available",
"err", r,
"os", runtime.GOOS,
"arch", runtime.GOARCH,
"num_cpu", runtime.NumCPU(),
"num_cgo_call", runtime.NumCgoCall(),
"num_goroutine", runtime.NumGoroutine(),
)
}
}
}()
debug.SetTraceback("none")
slog.Info("docker-updater", "version", constant.ProgramVersion().String(), "os", runtime.GOOS, "arch", runtime.GOARCH)
os.Exit(run(ctx, false))
}

26
main_debug.go Normal file
View File

@@ -0,0 +1,26 @@
//go:build debug
package main
import (
"context"
"fmt"
"log/slog"
"os"
"runtime"
"runtime/debug"
)
func main() {
slog.SetLogLoggerLevel(slog.LevelDebug)
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
debug.SetTraceback("all")
fmt.Printf("docker-updater -- debug (%s/%s)\n\n", runtime.GOOS, runtime.GOARCH)
slog.Debug("debug mode enabled", "thread", "main")
os.Exit(run(ctx, true))
}

12
makefile Normal file
View File

@@ -0,0 +1,12 @@
debug:
go run -tags "debug" ./...
all: clean build
clean:
echo "* Cleaning the output directory"
rm -rf build
build:
echo "* Building the binary"
./build.sh

90
runtime.go Normal file
View File

@@ -0,0 +1,90 @@
package main
import (
"context"
"docker-updater/config"
"docker-updater/contextutil"
"docker-updater/docker/helper"
"docker-updater/runtime"
"flag"
"log/slog"
"os"
"os/signal"
)
const (
success int = 0
failure int = 1
usageError int = 2
)
func run(ctx context.Context, debug bool) int {
var configPath string
var verbose bool
flag.StringVar(&configPath, "config", "./config.json", "Specify the configuration file path")
flag.BoolVar(&verbose, "verbose", false, "Set the log as verbose")
flag.Parse()
ctx = contextutil.WithThreadName(ctx, "main")
if verbose {
slog.SetLogLoggerLevel(slog.LevelDebug)
}
config, err := config.Load(configPath)
if err != nil {
slog.Error("failed to load the configuration", "thread", "main", "err", err)
return failure
}
if err := config.Validate(); err != nil {
slog.Error("failed to validate the configuration", "thread", "main", "err", err)
return failure
}
docker, err := helper.Open(config)
if err != nil {
slog.Error("unable to connect to the docker socket", "thread", "main", "err", err)
return failure
}
defer docker.Close()
if err := docker.StartWatcher(ctx, config.DaemonConfiguration.PullInterval); err != nil {
slog.Error("unable to start the docker watcher", "thread", "main", "err", err)
}
el, err := runtime.NewEventLoop(ctx, docker)
if err != nil {
slog.Error("failed to init the event loop", "thread", "main", "err", err)
return failure
}
defer el.Close()
go func() {
c := make(chan os.Signal, 10)
signal.Notify(c)
defer close(c)
for sig := range c {
switch sig {
case os.Interrupt:
{
slog.Info("the process received an interruption signal", "thread", "signal_watcher", "signal", sig.String())
el.Close()
}
case os.Kill:
{
slog.Info("the process received a kill signal", "thread", "signal_watcher", "signal", sig.String())
el.Close()
}
}
}
}()
slog.Info("everything seems ok, starting the event loop", "thread", "main")
el.Execute()
slog.Info("Need to go, byyye 👋", "thread", "main")
return success
}

267
runtime/runtime.go Normal file
View File

@@ -0,0 +1,267 @@
package runtime
import (
"context"
"docker-updater/contextutil"
"docker-updater/docker/helper"
"fmt"
"log/slog"
"sync"
"github.com/robfig/cron/v3"
)
type (
EventLoop struct {
mu sync.Mutex
docker *helper.DockerHelper
cr *cron.Cron
executors map[string]cron.EntryID
ctx context.Context
eventMu sync.Mutex
eventBuffer []helper.ContainerEvent
eventTrigger chan struct{}
}
)
func NewEventLoop(ctx context.Context, docker *helper.DockerHelper) (*EventLoop, error) {
loop := &EventLoop{
docker: docker,
cr: cron.New(),
ctx: contextutil.WithThreadName(context.Background(), "event_loop"),
executors: make(map[string]cron.EntryID),
}
if err := loop.firstRun(ctx); err != nil {
return nil, fmt.Errorf("unable to initialize the event loop: %s", err)
}
docker.SetContainersEventCallback(loop.onEventReceived)
go loop.startEventLoop()
return loop, nil
}
func (el *EventLoop) Close() error {
ctx := el.cr.Stop()
<-ctx.Done()
return nil
}
func (el *EventLoop) Execute() {
el.cr.Run()
slog.Info("event loop stopped", "thread", "main")
}
func (el *EventLoop) firstRun(ctx context.Context) error {
el.mu.Lock()
defer el.mu.Unlock()
containers, err := el.docker.RunningContainers(ctx)
if err != nil {
return fmt.Errorf("unable to get the list of running containers: %s", err)
}
for _, container := range containers {
if container.Enabled() {
if err := el.register(container); err != nil {
slog.Error("an error occured while registering a container in the scheduler",
"thread", contextutil.ThreadName(ctx),
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
"err", err,
)
continue
}
slog.Info("a container was scheduled",
"thread", contextutil.ThreadName(ctx),
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
)
} else {
slog.Debug("this container is disabled, ignoring",
"thread", contextutil.ThreadName(ctx),
"container_name", container.Name(),
"container_id", container.ID(),
)
}
}
return nil
}
func (el *EventLoop) onEventReceived(ev helper.ContainerEvent) {
el.eventMu.Lock()
defer el.eventMu.Unlock()
slog.Debug("event from daemon", "thread", contextutil.ThreadName(ev.Context()), "event_type", ev.EventType, "container_name", ev.Data.Name())
wakeUpEventLoop := false
if len(el.eventBuffer) == 0 {
wakeUpEventLoop = true
}
el.eventBuffer = append(el.eventBuffer, ev)
if len(el.eventBuffer) > 100 {
slog.Warn("slow event processing", "thread", contextutil.ThreadName(ev.Context()), "buffer_size", len(el.eventBuffer))
}
if wakeUpEventLoop {
el.eventTrigger <- struct{}{}
}
}
func (el *EventLoop) register(container helper.Container) error {
id, err := el.cr.AddFunc(container.Schedule(), func() {
if err := el.updateImage(container); err != nil {
slog.Error("failed to update the container image",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"image_name", container.Image().Name(),
"image_id", container.Image().ID(),
"err", err,
)
}
})
if err != nil {
return fmt.Errorf("the registration of the container in the scheduler has failed: %s", err)
}
el.executors[container.ID()] = id
return nil
}
func (el *EventLoop) unregister(container helper.Container) {
if id, ok := el.executors[container.ID()]; ok {
el.cr.Remove(id)
}
delete(el.executors, container.ID())
}
func (el *EventLoop) startEventLoop() {
for {
if len(el.eventBuffer) == 0 {
<-el.eventTrigger
}
el.eventMu.Lock()
ev := el.eventBuffer[0]
el.eventBuffer = append([]helper.ContainerEvent{}, el.eventBuffer[1:]...)
el.eventMu.Unlock()
el.process(ev)
}
}
func (el *EventLoop) process(ev helper.ContainerEvent) {
el.mu.Lock()
defer el.mu.Unlock()
container := ev.Data
switch ev.EventType {
case helper.NewContainer:
{
if container.Enabled() {
if err := el.register(container); err != nil {
slog.Error("an error occured while registering a container in the scheduler",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
"err", err,
)
return
}
slog.Info("a new container was scheduled",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
)
} else {
slog.Debug("receiving an event for a disabled container, ignoring",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
)
}
}
case helper.DeletedContainer:
{
el.unregister(container)
}
case helper.UpdatedContainer:
{
el.unregister(container)
if container.Enabled() {
if err := el.register(container); err != nil {
slog.Error("an error occured while updating a container in the scheduler",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
"err", err,
)
return
}
slog.Info("a container was updated",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"schedule", container.Schedule(),
)
} else {
slog.Debug("a previously enabled container is now disabled, the container will not be updated in the future",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
)
}
}
}
}
func (el *EventLoop) updateImage(container helper.Container) error {
image, err := el.docker.RemoteImageMetadata(el.ctx, container.Image().Name())
if err != nil {
return fmt.Errorf("unable to get metadata from the registry: %s", err)
}
if image.Hash() == container.Image().Hash() {
slog.Debug("the image is already up-to-date",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"image_name", container.Image().Name(),
"image_id", container.Image().ID(),
)
return nil
}
if err := el.docker.StopContainer(el.ctx, container); err != nil {
return fmt.Errorf("unable to stop the container to update the image: %s", err)
}
defer func() {
if err := el.docker.StartContainer(el.ctx, container); err != nil {
slog.Error("unable to restart the container after the update",
"thread", "event_loop",
"container_name", container.Name(),
"container_id", container.ID(),
"image_name", container.Image().Name(),
"image_id", container.Image().ID(),
)
}
}()
if err := el.docker.PullImage(el.ctx, container.Image().Name()); err != nil {
return fmt.Errorf("failed to pull the image from the registry: %s", err)
}
return nil
}