From a4682754882ca4c84b8de6dfadd565143ad2e327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paolo=20Chil=C3=A0?= Date: Sat, 3 Feb 2024 16:53:26 +0100 Subject: [PATCH] Use package manifests for upgrade (#4174) * map paths when upgrading using .tar.gz packages * use structured output from unpack to identify old and new agents directories * copy state directories and rotate symlink correctly --- .../pkg/agent/application/upgrade/rollback.go | 30 +- .../agent/application/upgrade/step_mark.go | 99 +++-- .../agent/application/upgrade/step_relink.go | 11 +- .../agent/application/upgrade/step_unpack.go | 348 +++++++++++++++--- .../application/upgrade/step_unpack_test.go | 275 +++++++++++++- .../pkg/agent/application/upgrade/upgrade.go | 60 ++- internal/pkg/agent/cmd/watch.go | 6 +- 7 files changed, 683 insertions(+), 146 deletions(-) diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index bea5c8c4f23..e3f4a4faf90 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -33,9 +33,20 @@ const ( ) // Rollback rollbacks to previous version which was functioning before upgrade. -func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentHash string) error { +func Rollback(ctx context.Context, log *logger.Logger, prevVersionedHome, prevHash string, currentHash string) error { + symlinkPath := filepath.Join(paths.Top(), agentName) + + var symlinkTarget string + if prevVersionedHome != "" { + symlinkTarget = paths.BinaryPath(filepath.Join(paths.Top(), prevVersionedHome), agentName) + } else { + // fallback for upgrades that didn't use the manifest and path remapping + hashedDir := fmt.Sprintf("%s-%s", agentName, prevHash) + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + symlinkTarget = paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) + } // change symlink - if err := ChangeSymlink(ctx, log, prevHash); err != nil { + if err := changeSymlinkInternal(log, symlinkPath, symlinkTarget); err != nil { return err } @@ -51,11 +62,11 @@ func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentH } // cleanup everything except version we're rolling back into - return Cleanup(log, prevHash, true, true) + return Cleanup(log, prevVersionedHome, prevHash, true, true) } // Cleanup removes all artifacts and files related to a specified version. -func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs bool) error { +func Cleanup(log *logger.Logger, currentVersionedHome, currentHash string, removeMarker bool, keepLogs bool) error { log.Infow("Cleaning up upgrade", "hash", currentHash, "remove_marker", removeMarker) <-time.After(afterRestartDelay) @@ -83,7 +94,16 @@ func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs _ = os.Remove(prevSymlink) dirPrefix := fmt.Sprintf("%s-", agentName) - currentDir := fmt.Sprintf("%s-%s", agentName, currentHash) + var currentDir string + if currentVersionedHome != "" { + currentDir, err = filepath.Rel("data", currentVersionedHome) + if err != nil { + return fmt.Errorf("extracting elastic-agent path relative to data directory from %s: %w", currentVersionedHome, err) + } + } else { + currentDir = fmt.Sprintf("%s-%s", agentName, currentHash) + } + for _, dir := range subdirs { if dir == currentDir { continue diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 90bc11dfda6..71834f64a90 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -6,6 +6,7 @@ package upgrade import ( "context" + "fmt" "os" "path/filepath" "time" @@ -24,8 +25,13 @@ const markerFilename = ".update-marker" // UpdateMarker is a marker holding necessary information about ongoing upgrade. type UpdateMarker struct { + // Version represents the version the agent is upgraded to + Version string `json:"version" yaml:"version"` // Hash agent is updated to Hash string `json:"hash" yaml:"hash"` + // VersionedHome represents the path where the new agent is located relative to top path + VersionedHome string `json:"versioned_home" yaml:"versioned_home"` + //UpdatedOn marks a date when update happened UpdatedOn time.Time `json:"updated_on" yaml:"updated_on"` @@ -33,6 +39,8 @@ type UpdateMarker struct { PrevVersion string `json:"prev_version" yaml:"prev_version"` // PrevHash is a hash agent is updated from PrevHash string `json:"prev_hash" yaml:"prev_hash"` + // PrevVersionedHome represents the path where the old agent is located relative to top path + PrevVersionedHome string `json:"prev_versioned_home" yaml:"prev_versioned_home"` // Acked is a flag marking whether or not action was acked Acked bool `json:"acked" yaml:"acked"` @@ -83,42 +91,55 @@ func convertToActionUpgrade(a *MarkerActionUpgrade) *fleetapi.ActionUpgrade { } type updateMarkerSerializer struct { - Hash string `yaml:"hash"` - UpdatedOn time.Time `yaml:"updated_on"` - PrevVersion string `yaml:"prev_version"` - PrevHash string `yaml:"prev_hash"` - Acked bool `yaml:"acked"` - Action *MarkerActionUpgrade `yaml:"action"` - Details *details.Details `yaml:"details"` + Version string `yaml:"version"` + Hash string `yaml:"hash"` + VersionedHome string `yaml:"versioned_home"` + UpdatedOn time.Time `yaml:"updated_on"` + PrevVersion string `yaml:"prev_version"` + PrevHash string `yaml:"prev_hash"` + PrevVersionedHome string `yaml:"prev_versioned_home"` + Acked bool `yaml:"acked"` + Action *MarkerActionUpgrade `yaml:"action"` + Details *details.Details `yaml:"details"` } func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { return &updateMarkerSerializer{ - Hash: m.Hash, - UpdatedOn: m.UpdatedOn, - PrevVersion: m.PrevVersion, - PrevHash: m.PrevHash, - Acked: m.Acked, - Action: convertToMarkerAction(m.Action), - Details: m.Details, + Version: m.Version, + Hash: m.Hash, + VersionedHome: m.VersionedHome, + UpdatedOn: m.UpdatedOn, + PrevVersion: m.PrevVersion, + PrevHash: m.PrevHash, + PrevVersionedHome: m.PrevVersionedHome, + Acked: m.Acked, + Action: convertToMarkerAction(m.Action), + Details: m.Details, } } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action *fleetapi.ActionUpgrade, upgradeDetails *details.Details) error { - prevVersion := release.Version() +func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, version, hash, versionedHome string, action *fleetapi.ActionUpgrade, upgradeDetails *details.Details) error { + prevVersion := release.VersionWithSnapshot() prevHash := release.Commit() + prevVersionedHome, err := filepath.Rel(paths.Top(), paths.Home()) + if err != nil { + return fmt.Errorf("calculating home path relative to top, home: %q top: %q : %w", paths.Home(), paths.Top(), err) + } if len(prevHash) > hashLen { prevHash = prevHash[:hashLen] } marker := &UpdateMarker{ - Hash: hash, - UpdatedOn: time.Now(), - PrevVersion: prevVersion, - PrevHash: prevHash, - Action: action, - Details: upgradeDetails, + Version: version, + Hash: hash, + VersionedHome: versionedHome, + UpdatedOn: time.Now(), + PrevVersion: prevVersion, + PrevHash: prevHash, + PrevVersionedHome: prevVersionedHome, + Action: action, + Details: upgradeDetails, } markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) @@ -183,13 +204,16 @@ func loadMarker(markerFile string) (*UpdateMarker, error) { } return &UpdateMarker{ - Hash: marker.Hash, - UpdatedOn: marker.UpdatedOn, - PrevVersion: marker.PrevVersion, - PrevHash: marker.PrevHash, - Acked: marker.Acked, - Action: convertToActionUpgrade(marker.Action), - Details: marker.Details, + Version: marker.Version, + Hash: marker.Hash, + VersionedHome: marker.VersionedHome, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + PrevVersionedHome: marker.PrevVersionedHome, + Acked: marker.Acked, + Action: convertToActionUpgrade(marker.Action), + Details: marker.Details, }, nil } @@ -198,13 +222,16 @@ func loadMarker(markerFile string) (*UpdateMarker, error) { // file is immediately flushed to persistent storage. func SaveMarker(marker *UpdateMarker, shouldFsync bool) error { makerSerializer := &updateMarkerSerializer{ - Hash: marker.Hash, - UpdatedOn: marker.UpdatedOn, - PrevVersion: marker.PrevVersion, - PrevHash: marker.PrevHash, - Acked: marker.Acked, - Action: convertToMarkerAction(marker.Action), - Details: marker.Details, + Version: marker.Version, + Hash: marker.Hash, + VersionedHome: marker.VersionedHome, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + PrevVersionedHome: marker.PrevVersionedHome, + Acked: marker.Acked, + Action: convertToMarkerAction(marker.Action), + Details: marker.Details, } markerBytes, err := yaml.Marshal(makerSerializer) if err != nil { diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index 13c49693062..f272083bfe9 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -32,21 +32,26 @@ func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) e // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. newPath := paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) + return changeSymlinkInternal(log, symlinkPath, newPath) +} + +func changeSymlinkInternal(log *logger.Logger, symlinkPath, newTarget string) error { + // handle windows suffixes if runtime.GOOS == windows { symlinkPath += exe - newPath += exe + newTarget += exe } prevNewPath := prevSymlinkPath() - log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newPath, "prev_path", prevNewPath) + log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newTarget, "prev_path", prevNewPath) // remove symlink to avoid upgrade failures if err := os.Remove(prevNewPath); !os.IsNotExist(err) { return err } - if err := os.Symlink(newPath, prevNewPath); err != nil { + if err := os.Symlink(newTarget, prevNewPath); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update agent symlink") } diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 4f5b0bd7440..b9af3ed5620 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -7,52 +7,104 @@ package upgrade import ( "archive/tar" "archive/zip" + "bytes" "compress/gzip" + goerrors "errors" "fmt" "io" "io/fs" "os" + "path" "path/filepath" "runtime" "strings" - "github.com/hashicorp/go-multierror" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" "github.com/elastic/elastic-agent/pkg/core/logger" ) +// UnpackResult contains the location and hash of the unpacked agent files +type UnpackResult struct { + // Hash contains the unpacked agent commit hash, limited to a length of 6 for backward compatibility + Hash string `json:"hash" yaml:"hash"` + // VersionedHome indicates the path (forward slash separated) where to find the unpacked agent files + // The value depends on the mappings specified in manifest.yaml, if no manifest is found it assumes the legacy data/elastic-agent- format + VersionedHome string `json:"versioned-home" yaml:"versioned-home"` +} + // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(version, archivePath, dataDir string) (string, error) { +func (u *Upgrader) unpack(version, archivePath, dataDir string) (UnpackResult, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested - var hash string + var unpackRes UnpackResult var err error if runtime.GOOS == windows { - hash, err = unzip(u.log, archivePath, dataDir) + unpackRes, err = unzip(u.log, archivePath, dataDir) } else { - hash, err = untar(u.log, version, archivePath, dataDir) + unpackRes, err = untar(u.log, version, archivePath, dataDir) } if err != nil { - u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "hash", hash) - return "", err + u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "unpackResult", unpackRes) + return UnpackResult{}, err } - u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "hash", hash) - return hash, nil + u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "unpackResult", unpackRes) + return unpackRes, nil } -func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { +func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { - return "", err + return UnpackResult{}, err } defer r.Close() fileNamePrefix := strings.TrimSuffix(filepath.Base(archivePath), ".zip") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename + pm := pathMapper{} + versionedHome := "" + + // Load manifest, the use of path.Join is intentional since in .zip file paths use slash ('/') as separator + manifestFile, err := r.Open(path.Join(fileNamePrefix, "manifest.yaml")) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + // we got a real error looking up for the manifest + return UnpackResult{}, fmt.Errorf("looking up manifest in package: %w", err) + } + if err == nil { + // load manifest + defer manifestFile.Close() + manifest, err := v1.ParseManifest(manifestFile) + if err != nil { + return UnpackResult{}, fmt.Errorf("parsing package manifest: %w", err) + } + pm.mappings = manifest.Package.PathMappings + versionedHome = path.Clean(pm.Map(manifest.Package.VersionedHome)) + } + + // Load hash, the use of path.Join is intentional since in .zip file paths use slash ('/') as separator + hashFile, err := r.Open(path.Join(fileNamePrefix, agentCommitFile)) + if err != nil { + // we got a real error looking up for the manifest + return UnpackResult{}, fmt.Errorf("looking up %q in package: %w", agentCommitFile, err) + } + defer hashFile.Close() + + hashBytes, err := io.ReadAll(hashFile) + if err != nil { + return UnpackResult{}, fmt.Errorf("reading elastic-agent hash file content: %w", err) + } + if len(hashBytes) < hashLen { + return UnpackResult{}, fmt.Errorf("elastic-agent hash %q is too short (minimum %d)", string(hashBytes), hashLen) + } + hash = string(hashBytes[:hashLen]) + if versionedHome == "" { + // if at this point we didn't load the manifest, set the versioned to the backup value + versionedHome = createVersionedHomeFromHash(hash) + } + unpackFile := func(f *zip.File) (err error) { rc, err := f.Open() if err != nil { @@ -60,44 +112,58 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { } defer func() { if cerr := rc.Close(); cerr != nil { - err = multierror.Append(err, cerr) + err = goerrors.Join(err, cerr) } }() - //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := io.ReadAll(rc) - if err != nil || len(hashBytes) < hashLen { - return err - } - - hash = string(hashBytes[:hashLen]) + // we already loaded the hash, skip this one return nil } + mappedPackagePath := pm.Map(fileName) + // skip everything outside data/ - if !strings.HasPrefix(fileName, "data/") { + if !strings.HasPrefix(mappedPackagePath, "data/") { return nil } - path := filepath.Join(dataDir, strings.TrimPrefix(fileName, "data/")) + dstPath := strings.TrimPrefix(mappedPackagePath, "data/") + dstPath = filepath.Join(dataDir, dstPath) if f.FileInfo().IsDir() { - log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) - // remove any world permissions from the directory - _ = os.MkdirAll(path, f.Mode()&0770) + log.Debugw("Unpacking directory", "archive", "zip", "file.path", dstPath) + // check if the directory already exists + _, err = os.Stat(dstPath) + if errors.Is(err, fs.ErrNotExist) { + // the directory does not exist, create it and any non-existing parent directory with the same permissions + if err := os.MkdirAll(dstPath, f.Mode().Perm()&0770); err != nil { + return fmt.Errorf("creating directory %q: %w", dstPath, err) + } + } else if err != nil { + return fmt.Errorf("stat() directory %q: %w", dstPath, err) + } else { + // directory already exists, set the appropriate permissions + err = os.Chmod(dstPath, f.Mode().Perm()&0770) + if err != nil { + return fmt.Errorf("setting permissions %O for directory %q: %w", f.Mode().Perm()&0770, dstPath, err) + } + } + + _ = os.MkdirAll(dstPath, f.Mode()&0770) } else { - log.Debugw("Unpacking file", "archive", "zip", "file.path", path) - // remove any world permissions from the directory/file - _ = os.MkdirAll(filepath.Dir(path), f.Mode()&0770) - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()&0770) + log.Debugw("Unpacking file", "archive", "zip", "file.path", dstPath) + // create non-existing containing folders with 0770 permissions right now, we'll fix the permission of each + // directory as we come across them while processing the other package entries + _ = os.MkdirAll(filepath.Dir(dstPath), 0770) + f, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()&0770) if err != nil { return err } defer func() { if cerr := f.Close(); cerr != nil { - err = multierror.Append(err, cerr) + err = goerrors.Join(err, cerr) } }() @@ -119,29 +185,74 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { } if err := unpackFile(f); err != nil { - return "", err + return UnpackResult{}, err } } - return hash, nil + return UnpackResult{ + Hash: hash, + VersionedHome: versionedHome, + }, nil } -func untar(log *logger.Logger, version string, archivePath, dataDir string) (string, error) { +func untar(log *logger.Logger, version string, archivePath, dataDir string) (UnpackResult, error) { + + var versionedHome string + var rootDir string + var hash string + + // Look up manifest in the archive and prepare path mappings, if any + pm := pathMapper{} + + // quickly open the archive and look up manifest.yaml file + fileContents, err := getFilesContentFromTar(archivePath, "manifest.yaml", agentCommitFile) + if err != nil { + return UnpackResult{}, fmt.Errorf("looking for package metadata files: %w", err) + } + + manifestReader := fileContents["manifest.yaml"] + if manifestReader != nil { + manifest, err := v1.ParseManifest(manifestReader) + if err != nil { + return UnpackResult{}, fmt.Errorf("parsing package manifest: %w", err) + } + + // set the path mappings + pm.mappings = manifest.Package.PathMappings + versionedHome = path.Clean(pm.Map(manifest.Package.VersionedHome)) + } + + if agentCommitReader, ok := fileContents[agentCommitFile]; ok { + commitBytes, err := io.ReadAll(agentCommitReader) + if err != nil { + return UnpackResult{}, fmt.Errorf("reading agent commit hash file: %w", err) + } + if len(commitBytes) < hashLen { + return UnpackResult{}, fmt.Errorf("hash %q is shorter than minimum length %d", string(commitBytes), hashLen) + } + + agentCommitHash := string(commitBytes) + hash = agentCommitHash[:hashLen] + if versionedHome == "" { + // set default value of versioned home if it wasn't set by reading the manifest + versionedHome = createVersionedHomeFromHash(agentCommitHash) + } + } + r, err := os.Open(archivePath) if err != nil { - return "", errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) + return UnpackResult{}, errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) } defer r.Close() zr, err := gzip.NewReader(r) if err != nil { - return "", errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) + return UnpackResult{}, errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) } tr := tar.NewReader(zr) - var rootDir string - var hash string - fileNamePrefix := strings.TrimSuffix(filepath.Base(archivePath), ".tar.gz") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename + + fileNamePrefix := getFileNamePrefix(archivePath) // go through all the content of a tar archive // if elastic-agent.active.commit file is found, get commit of the version unpacked @@ -153,26 +264,26 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str break } if err != nil { - return "", err + return UnpackResult{}, err } if !validFileName(f.Name) { - return "", errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) + return UnpackResult{}, errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) } - //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := io.ReadAll(tr) - if err != nil || len(hashBytes) < hashLen { - return "", err - } - - hash = string(hashBytes[:hashLen]) + // we already loaded the hash, skip this one continue } + // map the filename + fileName = pm.Map(fileName) + + // we should check that the path is a local one but since we discard anything that is not under "data/" we can + // skip the additional check + // skip everything outside data/ if !strings.HasPrefix(fileName, "data/") { continue @@ -191,16 +302,16 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str switch { case mode.IsRegular(): log.Debugw("Unpacking file", "archive", "tar", "file.path", abs) - // just to be sure, it should already be created by Dir type - // remove any world permissions from the directory - if err = os.MkdirAll(filepath.Dir(abs), 0o750); err != nil { - return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + // create non-existing containing folders with 0750 permissions right now, we'll fix the permission of each + // directory as we come across them while processing the other package entries + if err = os.MkdirAll(filepath.Dir(abs), 0750); err != nil { + return UnpackResult{}, errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } // remove any world permissions from the file wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()&0770) if err != nil { - return "", errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } //nolint:gosec // legacy @@ -209,31 +320,39 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str err = closeErr } if err != nil { - return "", fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) + return UnpackResult{}, fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) } case mode.IsDir(): log.Debugw("Unpacking directory", "archive", "tar", "file.path", abs) - // remove any world permissions from the directory + // check if the directory already exists _, err = os.Stat(abs) if errors.Is(err, fs.ErrNotExist) { + // the directory does not exist, create it and any non-existing parent directory with the same permissions if err := os.MkdirAll(abs, mode.Perm()&0770); err != nil { - return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } } else if err != nil { - return "", errors.New(err, "TarInstaller: stat() directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: stat() directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } else { - // set the appropriate permissions - err = os.Chmod(abs, mode.Perm()&0o770) + // directory already exists, set the appropriate permissions + err = os.Chmod(abs, mode.Perm()&0770) if err != nil { - return "", errors.New(err, fmt.Sprintf("TarInstaller: setting permissions %O for directory %q", mode.Perm()&0o770, abs), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, fmt.Sprintf("TarInstaller: setting permissions %O for directory %q", mode.Perm()&0770, abs), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } } default: - return "", errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", fileName, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fileName)) + return UnpackResult{}, errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", fileName, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fileName)) } } - return hash, nil + return UnpackResult{ + Hash: hash, + VersionedHome: versionedHome, + }, nil +} + +func getFileNamePrefix(archivePath string) string { + return strings.TrimSuffix(filepath.Base(archivePath), ".tar.gz") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename } func validFileName(p string) bool { @@ -242,3 +361,110 @@ func validFileName(p string) bool { } return true } + +type pathMapper struct { + mappings []map[string]string +} + +func (pm pathMapper) Map(path string) string { + for _, mapping := range pm.mappings { + for pkgPath, mappedPath := range mapping { + if strings.HasPrefix(path, pkgPath) { + return filepath.Join(mappedPath, path[len(pkgPath):]) + } + } + } + return path +} + +type tarCloser struct { + tarFile *os.File + gzipReader *gzip.Reader +} + +func (tc *tarCloser) Close() error { + var err error + if tc.gzipReader != nil { + err = goerrors.Join(err, tc.gzipReader.Close()) + } + // prevent double Close() call to fzip reader + tc.gzipReader = nil + if tc.tarFile != nil { + err = goerrors.Join(err, tc.tarFile.Close()) + } + // prevent double Close() call the underlying file + tc.tarFile = nil + return err +} + +// openTar is a convenience function to open a tar.gz file. +// It returns a *tar.Reader, an io.Closer implementation to be called to release resources and an error +// In case of errors the *tar.Reader will be nil, but the io.Closer is always returned and must be called also in case +// of errors to close the underlying readers. +func openTar(archivePath string) (*tar.Reader, io.Closer, error) { + tc := new(tarCloser) + r, err := os.Open(archivePath) + if err != nil { + return nil, tc, fmt.Errorf("opening package %s: %w", archivePath, err) + } + tc.tarFile = r + + zr, err := gzip.NewReader(r) + if err != nil { + return nil, tc, fmt.Errorf("package %s does not seem to have a valid gzip compression: %w", archivePath, err) + } + tc.gzipReader = zr + + return tar.NewReader(zr), tc, nil +} + +// getFilesContentFromTar is a small utility function which will load in memory the contents of a list of files from the tar archive. +// It's meant to be used to load package information/metadata stored in small files within the .tar.gz archive +func getFilesContentFromTar(archivePath string, files ...string) (map[string]io.Reader, error) { + tr, tc, err := openTar(archivePath) + if err != nil { + return nil, fmt.Errorf("opening tar.gz package %s: %w", archivePath, err) + } + defer tc.Close() + + prefix := getFileNamePrefix(archivePath) + + result := make(map[string]io.Reader, len(files)) + fileset := make(map[string]struct{}, len(files)) + // load the fileset with the names we are looking for + for _, fName := range files { + fileset[fName] = struct{}{} + } + + // go through all the content of a tar archive + // if one of the listed files is found, read the contents and set a byte reader into the result map + for { + f, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return nil, fmt.Errorf("reading archive: %w", err) + } + + fileName := strings.TrimPrefix(f.Name, prefix) + if _, ok := fileset[fileName]; ok { + // it's one of the files we are looking for, retrieve the content and set a reader into the result map + manifestBytes, err := io.ReadAll(tr) + if err != nil { + return nil, fmt.Errorf("reading manifest bytes: %w", err) + } + + reader := bytes.NewReader(manifestBytes) + result[fileName] = reader + } + + } + + return result, nil +} + +func createVersionedHomeFromHash(hash string) string { + return fmt.Sprintf("data/elastic-agent-%s", hash[:hashLen]) +} diff --git a/internal/pkg/agent/application/upgrade/step_unpack_test.go b/internal/pkg/agent/application/upgrade/step_unpack_test.go index c2d5bf0ece7..493190d3454 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack_test.go +++ b/internal/pkg/agent/application/upgrade/step_unpack_test.go @@ -6,6 +6,7 @@ package upgrade import ( "archive/tar" + "archive/zip" "compress/gzip" "fmt" "io" @@ -13,7 +14,6 @@ import ( "os" "path" "path/filepath" - "runtime" "strings" "testing" "time" @@ -24,6 +24,19 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const agentBinaryPlaceholderContent = "Placeholder for the elastic-agent binary" + +const ea_123_manifest = ` +version: co.elastic.agent/v1 +kind: PackageManifest +package: + version: 1.2.3 + snapshot: true + versioned-home: data/elastic-agent-abcdef + path-mappings: + - data/elastic-agent-abcdef: data/elastic-agent-1.2.3-SNAPSHOT-abcdef + manifest.yaml: data/elastic-agent-1.2.3-SNAPSHOT-abcdef/manifest.yaml +` const foo_component_spec = ` version: 2 inputs: @@ -87,21 +100,22 @@ func (f files) Sys() any { type createArchiveFunc func(t *testing.T, archiveFiles []files) (string, error) type checkExtractedPath func(t *testing.T, testDataDir string) -func TestUpgrader_unpack(t *testing.T) { +func TestUpgrader_unpackTarGz(t *testing.T) { type args struct { version string archiveGenerator createArchiveFunc archiveFiles []files } + tests := []struct { name string args args - want string + want UnpackResult wantErr assert.ErrorAssertionFunc checkFiles checkExtractedPath }{ { - name: "targz with file before containing folder", + name: "file before containing folder", args: args{ version: "1.2.3", archiveFiles: []files{ @@ -110,7 +124,7 @@ func TestUpgrader_unpack(t *testing.T) { {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o700)}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/" + agentName, content: "Placeholder for the elastic-agent binary", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, @@ -120,10 +134,12 @@ func TestUpgrader_unpack(t *testing.T) { return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", i) }, }, - want: "abcdef", + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-abcdef"), + }, wantErr: assert.NoError, checkFiles: func(t *testing.T, testDataDir string) { - versionedHome := filepath.Join(testDataDir, "elastic-agent-abcdef") require.DirExists(t, versionedHome, "directory for package.version does not exists") stat, err := os.Stat(versionedHome) @@ -133,30 +149,189 @@ func TestUpgrader_unpack(t *testing.T) { assert.Equalf(t, expectedPermissions, actualPermissions, "Wrong permissions set on versioned home %q: expected %O, got %O", versionedHome, expectedPermissions, actualPermissions) }, }, + { + name: "package with manifest file", + args: args{ + version: "1.2.3", + archiveFiles: []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/manifest.yaml", content: ea_123_manifest, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, + {fType: SYMLINK, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/" + agentName, content: "data/elastic-agent-abcdef/" + agentName, mode: fs.ModeSymlink | (fs.ModePerm & 0o750)}, + }, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: "data/elastic-agent-1.2.3-SNAPSHOT-abcdef", + }, + wantErr: assert.NoError, + checkFiles: func(t *testing.T, testDataDir string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-1.2.3-SNAPSHOT-abcdef") + require.DirExists(t, versionedHome, "mapped versioned home directory does not exists") + mappedAgentExecutable := filepath.Join(versionedHome, agentName) + if assert.FileExistsf(t, mappedAgentExecutable, "agent executable %q is not found in mapped versioned home directory %q", mappedAgentExecutable, versionedHome) { + fileBytes, err := os.ReadFile(mappedAgentExecutable) + if assert.NoErrorf(t, err, "error reading elastic-agent executable %q", mappedAgentExecutable) { + assert.Equal(t, agentBinaryPlaceholderContent, string(fileBytes), "agent binary placeholder content does not match") + } + } + mappedPackageManifest := filepath.Join(versionedHome, "manifest.yaml") + if assert.FileExistsf(t, mappedPackageManifest, "package manifest %q is not found in mapped versioned home directory %q", mappedPackageManifest, versionedHome) { + fileBytes, err := os.ReadFile(mappedPackageManifest) + if assert.NoErrorf(t, err, "error reading package manifest %q", mappedPackageManifest) { + assert.Equal(t, ea_123_manifest, string(fileBytes), "package manifest content does not match") + } + } + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("tar.gz tests only run on Linux/MacOS") + testTop := t.TempDir() + testDataDir := filepath.Join(testTop, "data") + err := os.MkdirAll(testDataDir, 0o777) + assert.NoErrorf(t, err, "error creating initial structure %q", testDataDir) + log, _ := logger.NewTesting(tt.name) + + archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) + require.NoError(t, err, "creation of test archive file failed") + + got, err := untar(log, tt.args.version, archiveFile, testDataDir) + if !tt.wantErr(t, err, fmt.Sprintf("untar(%v, %v, %v)", tt.args.version, archiveFile, testDataDir)) { + return + } + assert.Equalf(t, tt.want, got, "untar(%v, %v, %v)", tt.args.version, archiveFile, testDataDir) + if tt.checkFiles != nil { + tt.checkFiles(t, testDataDir) } + }) + } +} + +func TestUpgrader_unpackZip(t *testing.T) { + type args struct { + archiveGenerator createArchiveFunc + archiveFiles []files + } + + tests := []struct { + name string + args args + want UnpackResult + wantErr assert.ErrorAssertionFunc + checkFiles checkExtractedPath + }{ + { + name: "file before containing folder", + args: args{ + archiveFiles: []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o700)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, + }, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-abcdef"), + }, + wantErr: assert.NoError, + checkFiles: func(t *testing.T, testDataDir string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-abcdef") + require.DirExists(t, versionedHome, "directory for package.version does not exists") + stat, err := os.Stat(versionedHome) + require.NoErrorf(t, err, "error calling Stat() for versionedHome %q", versionedHome) + expectedPermissions := fs.ModePerm & 0o700 + actualPermissions := fs.ModePerm & stat.Mode() + assert.Equalf(t, expectedPermissions, actualPermissions, "Wrong permissions set on versioned home %q: expected %O, got %O", versionedHome, expectedPermissions, actualPermissions) + agentExecutable := filepath.Join(versionedHome, agentName) + if assert.FileExistsf(t, agentExecutable, "agent executable %q is not found in versioned home directory %q", agentExecutable, versionedHome) { + fileBytes, err := os.ReadFile(agentExecutable) + if assert.NoErrorf(t, err, "error reading elastic-agent executable %q", agentExecutable) { + assert.Equal(t, agentBinaryPlaceholderContent, string(fileBytes), "agent binary placeholder content does not match") + } + } + }, + }, + { + name: "package with manifest file", + args: args{ + archiveFiles: []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/manifest.yaml", content: ea_123_manifest, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, + }, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + checkFiles: func(t *testing.T, testDataDir string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-1.2.3-SNAPSHOT-abcdef") + require.DirExists(t, versionedHome, "mapped versioned home directory does not exists") + mappedAgentExecutable := filepath.Join(versionedHome, agentName) + if assert.FileExistsf(t, mappedAgentExecutable, "agent executable %q is not found in mapped versioned home directory %q", mappedAgentExecutable, versionedHome) { + fileBytes, err := os.ReadFile(mappedAgentExecutable) + if assert.NoErrorf(t, err, "error reading elastic-agent executable %q", mappedAgentExecutable) { + assert.Equal(t, agentBinaryPlaceholderContent, string(fileBytes), "agent binary placeholder content does not match") + } + } + mappedPackageManifest := filepath.Join(versionedHome, "manifest.yaml") + if assert.FileExistsf(t, mappedPackageManifest, "package manifest %q is not found in mapped versioned home directory %q", mappedPackageManifest, versionedHome) { + fileBytes, err := os.ReadFile(mappedPackageManifest) + if assert.NoErrorf(t, err, "error reading package manifest %q", mappedPackageManifest) { + assert.Equal(t, ea_123_manifest, string(fileBytes), "package manifest content does not match") + } + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { testTop := t.TempDir() testDataDir := filepath.Join(testTop, "data") err := os.MkdirAll(testDataDir, 0o777) assert.NoErrorf(t, err, "error creating initial structure %q", testDataDir) log, _ := logger.NewTesting(tt.name) - u := &Upgrader{ - log: log, - } archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) require.NoError(t, err, "creation of test archive file failed") - got, err := u.unpack(tt.args.version, archiveFile, testDataDir) - if !tt.wantErr(t, err, fmt.Sprintf("unpack(%v, %v, %v)", tt.args.version, archiveFile, testDataDir)) { + got, err := unzip(log, archiveFile, testDataDir) + if !tt.wantErr(t, err, fmt.Sprintf("unzip(%v, %v)", archiveFile, testDataDir)) { return } - assert.Equalf(t, tt.want, got, "unpack(%v, %v, %v)", tt.args.version, archiveFile, testDataDir) + assert.Equalf(t, tt.want, got, "unzip(%v, %v)", archiveFile, testDataDir) if tt.checkFiles != nil { tt.checkFiles(t, testDataDir) } @@ -165,20 +340,23 @@ func TestUpgrader_unpack(t *testing.T) { } func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { - + t.Helper() outDir := t.TempDir() outFilePath := filepath.Join(outDir, archiveName) file, err := os.OpenFile(outFilePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) require.NoErrorf(t, err, "error creating output archive %q", outFilePath) - defer file.Close() + defer func(file *os.File) { + err := file.Close() + assert.NoError(t, err, "error closing tar.gz archive file") + }(file) zipWriter := gzip.NewWriter(file) writer := tar.NewWriter(zipWriter) defer func(writer *tar.Writer) { err := writer.Close() - require.NoError(t, err, "error closing tar writer") + assert.NoError(t, err, "error closing tar writer") err = zipWriter.Close() - require.NoError(t, err, "error closing gzip writer") + assert.NoError(t, err, "error closing gzip writer") }(writer) for _, af := range archiveFiles { @@ -210,3 +388,62 @@ func addEntryToTarArchive(af files, writer *tar.Writer) error { } return nil } + +func createZipArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { + t.Helper() + outDir := t.TempDir() + + outFilePath := filepath.Join(outDir, archiveName) + file, err := os.OpenFile(outFilePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) + require.NoErrorf(t, err, "error creating output archive %q", outFilePath) + defer func(file *os.File) { + err := file.Close() + assert.NoError(t, err, "error closing zip archive file") + }(file) + + w := zip.NewWriter(file) + defer func(writer *zip.Writer) { + err := writer.Close() + assert.NoError(t, err, "error closing tar writer") + }(w) + + for _, af := range archiveFiles { + if af.fType == SYMLINK { + return "", fmt.Errorf("entry %q is a symlink. Not supported in .zip files", af.path) + } + + err = addEntryToZipArchive(af, w) + require.NoErrorf(t, err, "error adding %q to tar archive", af.path) + } + return outFilePath, nil +} + +func addEntryToZipArchive(af files, writer *zip.Writer) error { + header, err := zip.FileInfoHeader(&af) + if err != nil { + return fmt.Errorf("creating header for %q: %w", af.path, err) + } + + header.SetMode(af.Mode() & os.ModePerm) + header.Name = af.path + if af.IsDir() { + header.Name += string(filepath.Separator) + } else { + header.Method = zip.Deflate + } + + w, err := writer.CreateHeader(header) + if err != nil { + return err + } + + if af.IsDir() { + return nil + } + + if _, err = io.Copy(w, strings.NewReader(af.content)); err != nil { + return err + } + + return nil +} diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e154251f552..d8f8662ce11 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -7,6 +7,7 @@ package upgrade import ( "context" "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -169,46 +170,65 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string } det.SetState(details.StateExtracting) - - newHash, err := u.unpack(version, archivePath, paths.Data()) + // TODO: add check that no archive files end up in the current versioned home + unpackRes, err := u.unpack(version, archivePath, paths.Data()) if err != nil { return nil, err } + newHash := unpackRes.Hash if newHash == "" { return nil, errors.New("unknown hash") } - if strings.HasPrefix(release.Commit(), newHash) { - u.log.Warn("Upgrade action skipped: upgrade did not occur because its the same version") - return nil, nil + if unpackRes.VersionedHome == "" { + // FIXME this should be treated as an error + return nil, fmt.Errorf("versionedhome is empty: %v", unpackRes) } - if err := copyActionStore(u.log, newHash); err != nil { + //if strings.HasPrefix(release.Commit(), newHash) { + // u.log.Warn("Upgrade action skipped: upgrade did not occur because its the same version") + // return nil, nil + //} + + newHome := filepath.Join(paths.Top(), unpackRes.VersionedHome) + + if err := copyActionStore(u.log, newHome); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := copyRunDirectory(u.log, newHash); err != nil { + newRunPath := filepath.Join(newHome, "run") + oldRunPath := filepath.Join(paths.Home(), "run") + + if err := copyRunDirectory(u.log, oldRunPath, newRunPath); err != nil { return nil, errors.New(err, "failed to copy run directory") } det.SetState(details.StateReplacing) - if err := ChangeSymlink(ctx, u.log, newHash); err != nil { + // create symlink to the /elastic-agent + hashedDir := unpackRes.VersionedHome + + symlinkPath := filepath.Join(paths.Top(), agentName) + + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + newPath := paths.BinaryPath(filepath.Join(paths.Top(), hashedDir), agentName) + + if err := changeSymlinkInternal(u.log, symlinkPath, newPath); err != nil { u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) + rollbackInstall(ctx, u.log, hashedDir) return nil, err } - if err := u.markUpgrade(ctx, u.log, newHash, action, det); err != nil { + if err := u.markUpgrade(ctx, u.log, version, unpackRes.Hash, unpackRes.VersionedHome, action, det); err != nil { u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) + rollbackInstall(ctx, u.log, hashedDir) return nil, err } if err := InvokeWatcher(u.log); err != nil { u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) + rollbackInstall(ctx, u.log, hashedDir) return nil, err } @@ -269,15 +289,19 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -func rollbackInstall(ctx context.Context, log *logger.Logger, hash string) { - os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) +func rollbackInstall(ctx context.Context, log *logger.Logger, versionedHome string) { + err := os.RemoveAll(filepath.Join(paths.Top(), versionedHome)) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + // TODO should this be a warning or an error ? + log.Warnw("error rolling back install", "error.message", err) + } + // FIXME update _ = ChangeSymlink(ctx, log, release.ShortCommit()) } -func copyActionStore(log *logger.Logger, newHash string) error { +func copyActionStore(log *logger.Logger, newHome string) error { // copies legacy action_store.yml, state.yml and state.enc encrypted file if exists storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreYmlFile(), paths.AgentStateStoreFile()} - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) log.Infow("Copying action store", "new_home_path", newHome) for _, currentActionStorePath := range storePaths { @@ -300,9 +324,7 @@ func copyActionStore(log *logger.Logger, newHash string) error { return nil } -func copyRunDirectory(log *logger.Logger, newHash string) error { - newRunPath := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash), "run") - oldRunPath := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, release.ShortCommit()), "run") +func copyRunDirectory(log *logger.Logger, oldRunPath, newRunPath string) error { log.Infow("Copying run directory", "new_run_path", newRunPath, "old_run_path", oldRunPath) diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 69f0b0b033f..91425bec5eb 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -97,7 +97,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { // if we're not within grace and marker is still there it might mean // that cleanup was not performed ok, cleanup everything except current version // hash is the same as hash of agent which initiated watcher. - if err := upgrade.Cleanup(log, release.ShortCommit(), true, false); err != nil { + if err := upgrade.Cleanup(log, marker.VersionedHome, release.ShortCommit(), true, false); err != nil { log.Error("clean up of prior watcher run failed", err) } // exit nicely @@ -114,7 +114,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { log.Error("Error detected, proceeding to rollback: %v", err) upgradeDetails.SetState(details.StateRollback) - err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) + err = upgrade.Rollback(ctx, log, marker.PrevVersionedHome, marker.PrevHash, marker.Hash) if err != nil { log.Error("rollback failed", err) upgradeDetails.Fail(err) @@ -132,7 +132,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { // Why is this being skipped on Windows? The comment above is not clear. // issue: https://github.com/elastic/elastic-agent/issues/3027 removeMarker := !isWindows() - err = upgrade.Cleanup(log, marker.Hash, removeMarker, false) + err = upgrade.Cleanup(log, marker.VersionedHome, marker.Hash, removeMarker, false) if err != nil { log.Error("cleanup after successful watch failed", err) }