first version
This commit is contained in:
490
docker/helper/helper.go
Normal file
490
docker/helper/helper.go
Normal file
@@ -0,0 +1,490 @@
|
||||
package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"docker-updater/config"
|
||||
"docker-updater/constant"
|
||||
"docker-updater/contextutil"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/client"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
type (
|
||||
EventType int
|
||||
EventContainerFunc func(ev ContainerEvent)
|
||||
|
||||
DockerHelper struct {
|
||||
c *client.Client
|
||||
w watcher
|
||||
config config.Configuration
|
||||
}
|
||||
|
||||
watcher struct {
|
||||
running bool
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
gracefulStop chan struct{}
|
||||
cache cache
|
||||
containersEventCallback EventContainerFunc
|
||||
}
|
||||
|
||||
cache struct {
|
||||
mu sync.Mutex
|
||||
containers map[string]cacheEntry
|
||||
}
|
||||
|
||||
cacheEntry struct {
|
||||
c Container
|
||||
}
|
||||
|
||||
Container struct {
|
||||
id string
|
||||
name string
|
||||
image Image
|
||||
status container.ContainerState
|
||||
labels Labels
|
||||
appliedConfiguration config.ContainerConfiguration
|
||||
}
|
||||
|
||||
Labels struct {
|
||||
labels map[string]string
|
||||
hash string
|
||||
}
|
||||
|
||||
Image struct {
|
||||
id string
|
||||
name string
|
||||
hash string
|
||||
}
|
||||
|
||||
ContainerEvent struct {
|
||||
ctx context.Context
|
||||
EventType EventType
|
||||
Data Container
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
NewContainer EventType = iota
|
||||
DeletedContainer
|
||||
UpdatedContainer
|
||||
)
|
||||
|
||||
const (
|
||||
ua string = "com.thelilfrog.docker-updater/%s"
|
||||
)
|
||||
|
||||
func Open(configuration config.Configuration) (*DockerHelper, error) {
|
||||
cli, err := client.New(client.FromEnv, client.WithUserAgent(fmt.Sprintf(ua, constant.ProgramVersion())))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to docker daemon: %s", err)
|
||||
}
|
||||
|
||||
dh := &DockerHelper{
|
||||
w: watcher{
|
||||
cache: cache{
|
||||
containers: make(map[string]cacheEntry),
|
||||
},
|
||||
running: false,
|
||||
gracefulStop: make(chan struct{}),
|
||||
containersEventCallback: func(ev ContainerEvent) {},
|
||||
},
|
||||
config: configuration,
|
||||
c: cli,
|
||||
}
|
||||
|
||||
return dh, nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) Close() error {
|
||||
if dh.w.running {
|
||||
dh.w.gracefulStop <- struct{}{}
|
||||
<-dh.w.ctx.Done()
|
||||
}
|
||||
return dh.c.Close()
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) StartWatcher(appCtx context.Context, interval uint) error {
|
||||
if dh.w.running {
|
||||
return fmt.Errorf("cannot start the watcher: already running")
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
dh.w.ctx = contextutil.WithThreadName(ctx, "watcher")
|
||||
dh.w.cancelFunc = cancelFunc
|
||||
|
||||
// watch a first time
|
||||
dh.w.Watch(appCtx, dh)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-dh.w.ctx.Done():
|
||||
{
|
||||
slog.Error("context: watcher closed", "thread", "watcher", "err", dh.w.ctx.Err())
|
||||
dh.w.running = false
|
||||
return
|
||||
}
|
||||
case <-dh.w.gracefulStop:
|
||||
{
|
||||
slog.Info("gracefully stopping the watcher", "thread", "watcher")
|
||||
dh.w.cancelFunc()
|
||||
return
|
||||
}
|
||||
case <-time.After(time.Duration(interval) * time.Second):
|
||||
{
|
||||
dh.w.Watch(dh.w.ctx, dh)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
dh.w.running = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *watcher) Watch(ctx context.Context, dh *DockerHelper) {
|
||||
w.cache.mu.Lock()
|
||||
defer w.cache.mu.Unlock()
|
||||
|
||||
runningContainers, err := dh.RunningContainers(ctx)
|
||||
if err != nil {
|
||||
slog.Error("cannot fetch the list of running containers", "thread", contextutil.ThreadName(ctx), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, runningContainer := range runningContainers {
|
||||
if foundContainer, ok := dh.w.cache.containers[runningContainer.name]; ok {
|
||||
if runningContainer.labels.hash != foundContainer.c.labels.hash {
|
||||
foundContainer.c = runningContainer
|
||||
dh.w.cache.containers[runningContainer.name] = foundContainer
|
||||
|
||||
dh.w.containersEventCallback(ContainerEvent{
|
||||
ctx: ctx,
|
||||
EventType: UpdatedContainer,
|
||||
Data: runningContainer,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
dh.w.cache.containers[runningContainer.name] = cacheEntry{
|
||||
c: runningContainer,
|
||||
}
|
||||
|
||||
dh.w.containersEventCallback(ContainerEvent{
|
||||
ctx: ctx,
|
||||
EventType: NewContainer,
|
||||
Data: runningContainer,
|
||||
})
|
||||
}
|
||||
|
||||
notFound := make(map[string]Container)
|
||||
for containerName, containerData := range w.cache.containers {
|
||||
exists := slices.ContainsFunc(runningContainers, func(runningContainer Container) bool {
|
||||
return runningContainer.name == containerName
|
||||
})
|
||||
if !exists {
|
||||
notFound[containerName] = containerData.c
|
||||
}
|
||||
}
|
||||
|
||||
for containerName, containerData := range notFound {
|
||||
delete(w.cache.containers, containerName)
|
||||
w.containersEventCallback(ContainerEvent{
|
||||
ctx: ctx,
|
||||
EventType: DeletedContainer,
|
||||
Data: containerData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) RunningContainers(ctx context.Context) ([]Container, error) {
|
||||
containers, err := dh.c.ContainerList(context.Background(), client.ContainerListOptions{
|
||||
All: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get the list of running containers: %s", err)
|
||||
}
|
||||
|
||||
var res []Container
|
||||
for _, container := range containers.Items {
|
||||
c, err := dh.parseContainer(ctx, container)
|
||||
if err != nil {
|
||||
slog.Warn("the container metadata contains errors, skipping this container")
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, c)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) RemoteImageMetadata(ctx context.Context, imageName string) (Image, error) {
|
||||
ref, err := name.ParseReference(imageName)
|
||||
if err != nil {
|
||||
return Image{}, fmt.Errorf("failed to parse image reference: %s", err)
|
||||
}
|
||||
|
||||
image, err := remote.Head(ref)
|
||||
if err != nil {
|
||||
return Image{}, fmt.Errorf("an error occured while getting the metadata of the image in the remote: %s", err)
|
||||
}
|
||||
|
||||
return Image{
|
||||
name: imageName,
|
||||
hash: image.Digest.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) Container(ctx context.Context, containerName string) (Container, error) {
|
||||
containers, err := dh.c.ContainerList(context.Background(), client.ContainerListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return Container{}, fmt.Errorf("unable to get the list of containers: %s", err)
|
||||
}
|
||||
|
||||
var res Container
|
||||
for _, container := range containers.Items {
|
||||
name := formatName(container.Names)
|
||||
if name != containerName {
|
||||
continue
|
||||
}
|
||||
|
||||
res, err = dh.parseContainer(ctx, container)
|
||||
if err != nil {
|
||||
return Container{}, fmt.Errorf("failed to get the container: %s", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) StopContainer(ctx context.Context, container Container) error {
|
||||
if _, err := dh.c.ContainerStop(ctx, container.id, client.ContainerStopOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to stop the container: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) StartContainer(ctx context.Context, container Container) error {
|
||||
if _, err := dh.c.ContainerStart(ctx, container.id, client.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start the container: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) PullImage(ctx context.Context, imageName string) error {
|
||||
resp, err := dh.c.ImagePull(ctx, imageName, client.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull the image: %s", err)
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
var buf []byte
|
||||
for {
|
||||
_, err := resp.Read(buf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) ListSimilarContainers(ctx context.Context, containerName string) ([]Container, error) {
|
||||
targetContainer, err := dh.Container(ctx, containerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get the target container: %s", err)
|
||||
}
|
||||
|
||||
containers, err := dh.RunningContainers(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get running containers: %s", err)
|
||||
}
|
||||
|
||||
var res []Container
|
||||
for _, container := range containers {
|
||||
if container.image.name == targetContainer.name {
|
||||
res = append(res, container)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c Container) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c Container) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c Container) Image() Image {
|
||||
return c.image
|
||||
}
|
||||
|
||||
func (c Container) Enabled() bool {
|
||||
return c.appliedConfiguration.Enabled
|
||||
}
|
||||
|
||||
func (c Container) Schedule() string {
|
||||
return c.appliedConfiguration.Schedule
|
||||
}
|
||||
|
||||
func (i Image) ID() string {
|
||||
return i.id
|
||||
}
|
||||
|
||||
func (i Image) Name() string {
|
||||
return i.name
|
||||
}
|
||||
|
||||
func (i Image) Hash() string {
|
||||
return i.hash
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) SetContainersEventCallback(fn EventContainerFunc) {
|
||||
dh.w.cache.mu.Lock()
|
||||
defer dh.w.cache.mu.Unlock()
|
||||
|
||||
dh.w.containersEventCallback = fn
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) parseContainer(ctx context.Context, container container.Summary) (Container, error) {
|
||||
name := formatName(container.Names)
|
||||
|
||||
config, err := dh.parseLocalConfiguration(container.Labels)
|
||||
if err != nil {
|
||||
slog.Warn("failed to get the local configuration from the labels",
|
||||
"thread", contextutil.ThreadName(ctx),
|
||||
"container", name,
|
||||
"container_id", container.ID,
|
||||
"image_name", container.Image,
|
||||
"image_id", container.ImageID,
|
||||
"labels", container.Labels)
|
||||
return Container{}, err
|
||||
}
|
||||
|
||||
imageMetadata, err := dh.c.ImageInspect(ctx, container.ImageID)
|
||||
if err != nil {
|
||||
slog.Warn("failed to get local image metadata",
|
||||
"thread", contextutil.ThreadName(ctx),
|
||||
"container", name,
|
||||
"container_id", container.ID,
|
||||
"image_name", container.Image,
|
||||
"image_id", container.ImageID)
|
||||
return Container{}, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(imageMetadata.RepoDigests) == 0:
|
||||
{
|
||||
slog.Warn("no remote digest found, ignoring",
|
||||
"thread", contextutil.ThreadName(ctx),
|
||||
"container", name,
|
||||
"container_id", container.ID,
|
||||
"image_name", container.Image,
|
||||
"image_id", container.ImageID)
|
||||
return Container{}, err
|
||||
}
|
||||
case len(imageMetadata.RepoDigests) > 1:
|
||||
{
|
||||
slog.Warn("ambigous remote image digest",
|
||||
"thread", contextutil.ThreadName(ctx),
|
||||
"container", name,
|
||||
"container_id", container.ID,
|
||||
"image_name", container.Image,
|
||||
"image_id", container.ImageID,
|
||||
"repo_digests_count", len(imageMetadata.RepoDigests),
|
||||
"repo_digests", imageMetadata.RepoDigests)
|
||||
return Container{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// get the first repo digest
|
||||
hashes := strings.Split(imageMetadata.RepoDigests[0], "@")
|
||||
if len(hashes) != 2 {
|
||||
slog.Warn("failed to parse remote hash for this image",
|
||||
"thread", contextutil.ThreadName(ctx),
|
||||
"container", name,
|
||||
"container_id", container.ID,
|
||||
"image_name", container.Image,
|
||||
"image_id", container.ImageID,
|
||||
"repo_digest", imageMetadata.RepoDigests[0])
|
||||
return Container{}, err
|
||||
}
|
||||
hash := hashes[1]
|
||||
|
||||
return Container{
|
||||
id: container.ID,
|
||||
name: name,
|
||||
status: container.State,
|
||||
labels: convertLabels(container.Labels),
|
||||
appliedConfiguration: config,
|
||||
image: Image{
|
||||
id: container.ImageID,
|
||||
name: container.Image,
|
||||
hash: hash,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ev ContainerEvent) Context() context.Context {
|
||||
return ev.ctx
|
||||
}
|
||||
|
||||
func formatName(names []string) string {
|
||||
name := strings.Join(names, "-")
|
||||
if after, ok := strings.CutPrefix(name, "/"); ok {
|
||||
name = after
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func convertLabels(labels map[string]string) Labels {
|
||||
var p string
|
||||
|
||||
for key, value := range labels {
|
||||
p += key + ":" + value
|
||||
}
|
||||
|
||||
md5 := crypto.MD5.New()
|
||||
hash := md5.Sum([]byte(p))
|
||||
|
||||
return Labels{
|
||||
labels: labels,
|
||||
hash: hex.EncodeToString(hash),
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DockerHelper) parseLocalConfiguration(labels map[string]string) (config.ContainerConfiguration, error) {
|
||||
c := dh.config.GlobalContainerConfiguration
|
||||
|
||||
if schedule, ok := labels["com.thelilfrog.image.update.schedule"]; ok {
|
||||
_, err := cron.ParseStandard(schedule)
|
||||
if err != nil {
|
||||
return config.ContainerConfiguration{}, fmt.Errorf("invalid schedule: %s", err)
|
||||
}
|
||||
c.Schedule = schedule
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
Reference in New Issue
Block a user