Skip to content

Commit

Permalink
feat(stapel-to-buildah): support user stages and mounts
Browse files Browse the repository at this point in the history
* Working beforeInstall, install, beforeSetup, setup stages building.
    * Run each instruction from werf.yaml in the separate shell session for now.
* Fixed mountpoints cleaning in the 'from' stage.
* Added usage of mounts for user stages.

Signed-off-by: Timofey Kirillov <timofey.kirillov@flant.com>
  • Loading branch information
distorhead committed Mar 30, 2022
1 parent 9e242e9 commit da55b2a
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 31 deletions.
3 changes: 1 addition & 2 deletions pkg/build/builder/ansible.go
Expand Up @@ -135,8 +135,7 @@ func (b *Ansible) stage(ctx context.Context, cr container_backend.ContainerBacke

return nil
} else {
// TODO(stapel-to-buildah)
panic("not implemented")
return fmt.Errorf("ansible builder is not supported when using buildah backend, please use shell builder instead")
}
}

Expand Down
7 changes: 3 additions & 4 deletions pkg/build/builder/shell.go
Expand Up @@ -87,12 +87,11 @@ func (b *Shell) stage(cr container_backend.ContainerBackend, stageBuilder stage_
}

container.AddServiceRunCommands(containerTmpScriptFilePath)

return nil
} else {
// TODO(stapel-to-buildah)
panic("not implemented")
stageBuilder.StapelStageBuilder().AddUserCommands(b.stageCommands(userStageName)...)
}

return nil
}

func (b *Shell) stageChecksum(ctx context.Context, userStageName string) string {
Expand Down
23 changes: 17 additions & 6 deletions pkg/build/stage/base.go
Expand Up @@ -253,13 +253,13 @@ func (s *BaseStage) PrepareImage(ctx context.Context, c Conveyor, cr container_b

serviceMounts := s.getServiceMounts(prevBuiltImage)
s.addServiceMountsLabels(serviceMounts, c, cr, stageImage)
if err := s.addServiceMountsVolumes(serviceMounts, stageImage); err != nil {
if err := s.addServiceMountsVolumes(serviceMounts, c, cr, stageImage); err != nil {
return fmt.Errorf("error adding mounts volumes: %s", err)
}

customMounts := s.getCustomMounts(prevBuiltImage)
s.addCustomMountLabels(customMounts, c, cr, stageImage)
if err := s.addCustomMountVolumes(customMounts, stageImage); err != nil {
if err := s.addCustomMountVolumes(customMounts, c, cr, stageImage); err != nil {
return fmt.Errorf("error adding mounts volumes: %s", err)
}

Expand Down Expand Up @@ -313,7 +313,7 @@ func (s *BaseStage) getServiceMountsFromConfig() map[string][]string {
return mountpointsByType
}

func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]string, stageImage *StageImage) error {
func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]string, c Conveyor, cr container_backend.ContainerBackend, stageImage *StageImage) error {
for mountType, mountpoints := range mountpointsByType {
for _, mountpoint := range mountpoints {
absoluteMountpoint := path.Join("/", mountpoint)
Expand All @@ -333,7 +333,12 @@ func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]strin
return fmt.Errorf("error creating tmp path %s for mount: %s", absoluteFrom, err)
}

stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint))
volume := fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint)
if c.UseLegacyStapelBuilder(cr) {
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(volume)
} else {
stageImage.Builder.StapelStageBuilder().AddBuildVolumes(volume)
}
}
}

Expand Down Expand Up @@ -406,7 +411,7 @@ func (s *BaseStage) getCustomMountsFromConfig() map[string][]string {
return mountpointsByFrom
}

func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string, stageImage *StageImage) error {
func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string, c Conveyor, cr container_backend.ContainerBackend, stageImage *StageImage) error {
for from, mountpoints := range mountpointsByFrom {
absoluteFrom := util.ExpandPath(from)

Expand All @@ -424,7 +429,13 @@ func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string,

for _, mountpoint := range mountpoints {
absoluteMountpoint := path.Join("/", mountpoint)
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint))

volume := fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint)
if c.UseLegacyStapelBuilder(cr) {
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(volume)
} else {
stageImage.Builder.StapelStageBuilder().AddBuildVolumes(volume)
}
}
}

Expand Down
5 changes: 4 additions & 1 deletion pkg/build/stage/from.go
Expand Up @@ -8,6 +8,7 @@ import (
"path/filepath"
"strings"

"github.com/werf/logboek"
"github.com/werf/werf/pkg/config"
"github.com/werf/werf/pkg/container_backend"
imagePkg "github.com/werf/werf/pkg/image"
Expand Down Expand Up @@ -99,7 +100,9 @@ func (s *FromStage) PrepareImage(ctx context.Context, c Conveyor, cr container_b
} else {
stageImage.Builder.StapelStageBuilder().AddPrepareContainerActions(container_backend.PrepareContainerActionWith(func(containerRoot string) error {
for _, mountpoint := range mountpoints {
if err := os.RemoveAll(mountpoint); err != nil {
logboek.Context(ctx).Info().LogF("Removing mountpoint %q in the container dir: %q\n", mountpoint, filepath.Join(containerRoot, mountpoint))

if err := os.RemoveAll(filepath.Join(containerRoot, mountpoint)); err != nil {
return fmt.Errorf("unable to remove %q: %s", mountpoint, err)
}
}
Expand Down
67 changes: 49 additions & 18 deletions pkg/container_backend/buildah_backend.go
Expand Up @@ -6,6 +6,7 @@ import (
"strings"

"github.com/google/uuid"
"github.com/opencontainers/runtime-spec/specs-go"

"github.com/werf/logboek"
"github.com/werf/werf/pkg/buildah"
Expand All @@ -28,30 +29,32 @@ func (runtime *BuildahBackend) HasStapelBuildSupport() bool {
return true
}

// FIXME(stapel-to-buildah): proper deep implementation
func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage string, opts BuildStapelStageOpts) (string, error) {
/*
1. Create new temporary build container using 'from' and remain uniq container name.
2. Mount container root to host and run all prepare-container-actions, then unmount.
3. Run user instructions in container, mount volumes when build.
4. Set specified labels into container.
5. Save container name as builtID (ideally there is no need to commit an image here, because buildah allows to commit and push directly container, which would happen later).
*/
func (runtime *BuildahBackend) getBuildahCommonOpts(ctx context.Context, suppressLog bool) (opts buildah.CommonOpts) {
if !suppressLog {
opts.LogWriter = logboek.Context(ctx).OutStream()
}

containerID := uuid.New().String()
return
}

_, err := runtime.buildah.FromCommand(ctx, containerID, baseImage, buildah.FromCommandOpts{})
func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage string, opts BuildStapelStageOpts) (string, error) {
containerID := fmt.Sprintf("werf-stage-build-%s", uuid.New().String())

_, err := runtime.buildah.FromCommand(ctx, containerID, baseImage, buildah.FromCommandOpts(runtime.getBuildahCommonOpts(ctx, true)))
if err != nil {
return "", fmt.Errorf("unable to create container using base image %q: %s", baseImage, err)
}

// TODO(stapel-to-buildah): cleanup orphan build containers in werf-host-cleanup procedure
// defer runtime.buildah.Rm(ctx, containerID, buildah.RmOpts{CommonOpts: runtime.getBuildahCommonOpts(ctx, true)})

if len(opts.PrepareContainerActions) > 0 {
err := func() error {
containerRoot, err := runtime.buildah.Mount(ctx, containerID, buildah.MountOpts{})
containerRoot, err := runtime.buildah.Mount(ctx, containerID, buildah.MountOpts(runtime.getBuildahCommonOpts(ctx, true)))
if err != nil {
return fmt.Errorf("unable to mount container %q root dir: %s", containerID, err)
}
defer runtime.buildah.Umount(ctx, containerRoot, buildah.UmountOpts{})
defer runtime.buildah.Umount(ctx, containerRoot, buildah.UmountOpts(runtime.getBuildahCommonOpts(ctx, true)))

for _, action := range opts.PrepareContainerActions {
if err := action.PrepareContainer(containerRoot); err != nil {
Expand All @@ -67,16 +70,44 @@ func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage s
}

for _, cmd := range opts.UserCommands {
if err := runtime.buildah.RunCommand(ctx, containerID, strings.Fields(cmd), buildah.RunCommandOpts{}); err != nil {
var mounts []specs.Mount
for _, volume := range opts.BuildVolumes {
volumeParts := strings.SplitN(volume, ":", 2)
if len(volumeParts) != 2 {
panic(fmt.Sprintf("invalid volume %q: expected SOURCE:DESTINATION format", volume))
}

mounts = append(mounts, specs.Mount{
Type: "bind",
Source: volumeParts[0],
Destination: volumeParts[1],
})
}

// TODO(stapel-to-buildah): Consider support for shell script instead of separate run commands to allow shared
// usage of shell variables and functions between multiple commands.
// Maybe there is no need of such function, instead provide options to select shell in the werf.yaml.
// Is it important to provide compatibility between docker-server-based werf.yaml and buildah-based?
if err := runtime.buildah.RunCommand(ctx, containerID, []string{"sh", "-c", cmd}, buildah.RunCommandOpts{
CommonOpts: runtime.getBuildahCommonOpts(ctx, false),
Mounts: mounts,
}); err != nil {
return "", fmt.Errorf("unable to run %q: %s", cmd, err)
}
}

// TODO(stapel-to-buildah): use buildah.Change to set labels
fmt.Printf("[DEBUG] Setting labels %v for build container %q\n", opts.Labels, containerID)
logboek.Context(ctx).Debug().LogF("Setting labels %v for build container %q\n", opts.Labels, containerID)
if err := runtime.buildah.Config(ctx, containerID, buildah.ConfigOpts{
CommonOpts: runtime.getBuildahCommonOpts(ctx, true),
Labels: opts.Labels,
}); err != nil {
return "", fmt.Errorf("unable to set container %q config: %s", containerID, err)
}

fmt.Printf("[DEBUG] Committing container %q\n", containerID)
imgID, err := runtime.buildah.Commit(ctx, containerID, buildah.CommitOpts{})
// TODO(stapel-to-buildah): Save container name as builtID. There is no need to commit an image here,
// because buildah allows to commit and push directly container, which would happen later.
logboek.Context(ctx).Debug().LogF("committing container %q\n", containerID)
imgID, err := runtime.buildah.Commit(ctx, containerID, buildah.CommitOpts{CommonOpts: runtime.getBuildahCommonOpts(ctx, true)})
if err != nil {
return "", fmt.Errorf("unable to commit container %q: %s", containerID, err)
}
Expand Down

0 comments on commit da55b2a

Please sign in to comment.