diff --git a/changelog/fragments/1706703918-Add-the-full-version-number-to-the-installation-directory-name.yaml b/changelog/fragments/1706703918-Add-the-full-version-number-to-the-installation-directory-name.yaml new file mode 100644 index 00000000000..9e439a7c284 --- /dev/null +++ b/changelog/fragments/1706703918-Add-the-full-version-number-to-the-installation-directory-name.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add the full version number to the installation directory name + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/mage/settings.go b/dev-tools/mage/settings.go index a087fd51f00..772ba8b3110 100644 --- a/dev-tools/mage/settings.go +++ b/dev-tools/mage/settings.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" "github.com/magefile/mage/sh" - "golang.org/x/tools/go/vcs" + "golang.org/x/tools/go/vcs" //nolint:staticcheck // this deprecation will be handled in https://github.com/elastic/elastic-agent/issues/4138 "github.com/elastic/elastic-agent/dev-tools/mage/gotool" v1 "github.com/elastic/elastic-agent/pkg/api/v1" @@ -48,6 +48,7 @@ const ( // Mapped functions agentPackageVersionMappedFunc = "agent_package_version" agentManifestGeneratorMappedFunc = "manifest" + snapshotSuffix = "snapshot_suffix" ) // Common settings with defaults derived from files, CWD, and environment. @@ -108,6 +109,7 @@ var ( "contains": strings.Contains, agentPackageVersionMappedFunc: AgentPackageVersion, agentManifestGeneratorMappedFunc: PackageManifest, + snapshotSuffix: SnapshotSuffix, } ) @@ -253,6 +255,7 @@ repo.RootDir = {{ repo.RootDir }} repo.ImportPath = {{ repo.ImportPath }} repo.SubDir = {{ repo.SubDir }} agent_package_version = {{ agent_package_version}} +snapshot_suffix = {{ snapshot_suffix }} ` return Expand(dumpTemplate) @@ -309,6 +312,13 @@ func PackageManifest() (string, error) { return "", fmt.Errorf("retrieving agent package version: %w", err) } m.Package.Version = packageVersion + + hash, err := CommitHash() + if err != nil { + return "", fmt.Errorf("retrieving agent commit hash: %w", err) + } + m.Package.Hash = hash + commitHashShort, err := CommitHashShort() if err != nil { return "", fmt.Errorf("retrieving agent commit hash: %w", err) @@ -318,7 +328,7 @@ func PackageManifest() (string, error) { m.Package.VersionedHome = versionedHomePath m.Package.PathMappings = []map[string]string{{}} m.Package.PathMappings[0][versionedHomePath] = fmt.Sprintf("data/elastic-agent-%s%s-%s", m.Package.Version, SnapshotSuffix(), commitHashShort) - m.Package.PathMappings[0]["manifest.yaml"] = fmt.Sprintf("data/elastic-agent-%s%s-%s/manifest.yaml", m.Package.Version, SnapshotSuffix(), commitHashShort) + m.Package.PathMappings[0][v1.ManifestFileName] = fmt.Sprintf("data/elastic-agent-%s%s-%s/%s", m.Package.Version, SnapshotSuffix(), commitHashShort, v1.ManifestFileName) yamlBytes, err := yaml.Marshal(m) if err != nil { return "", fmt.Errorf("marshaling manifest: %w", err) diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index f648f69995f..2c928e5d858 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -201,9 +201,9 @@ func checkZip(t *testing.T, file string) { func checkManifestFileContents(t *testing.T, extractedPackageDir string) { t.Log("Checking file manifest.yaml") - manifestReadCloser, err := os.Open(filepath.Join(extractedPackageDir, "manifest.yaml")) + manifestReadCloser, err := os.Open(filepath.Join(extractedPackageDir, v1.ManifestFileName)) if err != nil { - t.Errorf("opening manifest %s : %v", "manifest.yaml", err) + t.Errorf("opening manifest %s : %v", v1.ManifestFileName, err) } defer func(closer io.ReadCloser) { err := closer.Close() @@ -219,6 +219,9 @@ func checkManifestFileContents(t *testing.T, extractedPackageDir string) { assert.Equal(t, v1.ManifestKind, m.Kind, "manifest specifies wrong kind") assert.Equal(t, v1.VERSION, m.Version, "manifest specifies wrong api version") + assert.NotEmpty(t, m.Package.Version, "manifest version must not be empty") + assert.NotEmpty(t, m.Package.Hash, "manifest hash must not be empty") + if assert.NotEmpty(t, m.Package.PathMappings, "path mappings in manifest are empty") { versionedHome := m.Package.VersionedHome assert.DirExistsf(t, filepath.Join(extractedPackageDir, versionedHome), "versionedHome directory %q not found in %q", versionedHome, extractedPackageDir) diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index c64a26bb0bc..b948dad3d16 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -62,19 +62,19 @@ shared: /etc/init.d/{{.BeatServiceName}}: template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{agent_package_version}}{{snapshot_suffix}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/package.version: + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{agent_package_version}}{{snapshot_suffix}}-{{ commit_short }}/package.version: content: > {{ agent_package_version }} mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{agent_package_version}}{{snapshot_suffix}}-{{ commit_short }}/components: source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' mode: 0755 config_mode: 0644 skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/manifest.yaml: + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{agent_package_version}}{{snapshot_suffix}}-{{ commit_short }}/manifest.yaml: mode: 0644 content: > {{ manifest }} diff --git a/dev-tools/packaging/templates/darwin/elastic-agent.tmpl b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl index 74c0f238c28..bfcc2d99a26 100644 --- a/dev-tools/packaging/templates/darwin/elastic-agent.tmpl +++ b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl @@ -6,6 +6,9 @@ set -e symlink="/Library/Elastic/Agent/elastic-agent" if test -L "$symlink"; then - ln -sfn "data/elastic-agent-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" "$symlink" + symlinkTarget="data/elastic-agent-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" + if test -f "data/elastic-agent-{{ agent_package_version }}{{ snapshot_suffix }}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent"; then + symlinkTarget="data/elastic-agent-{{ agent_package_version }}{{ snapshot_suffix }}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" + ln -sfn "$symlinkTarget" "$symlink" fi diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl index d96f21a8629..c1927d9d550 100644 --- a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -16,8 +16,9 @@ if test -L "$symlink"; then fi commit_hash="{{ commit_short }}" +version_dir="{{agent_package_version}}{{snapshot_suffix}}" -new_agent_dir="/var/lib/elastic-agent/data/elastic-agent-$commit_hash" +new_agent_dir="/var/lib/elastic-agent/data/elastic-agent-$version_dir-$commit_hash" # copy the state files if there was a previous agent install if ! [ -z "$old_agent_dir" ] && ! [ "$old_agent_dir" -ef "$new_agent_dir" ]; then diff --git a/docs/upgrades.md b/docs/upgrades.md index 36e6c6a2c23..708bce06616 100644 --- a/docs/upgrades.md +++ b/docs/upgrades.md @@ -61,3 +61,74 @@ sequenceDiagram end end ``` + +### Introducing package manifest + +Starting from version 8.13.0 an additional file `manifest.yaml` is present in elastic-agent packages. +The purpose of this file is to present some metadata and package information to be used during install/upgrade operations. + +The first enhancement that makes use of this package manifest is [#2579](https://github.com/elastic/elastic-agent/issues/2579) +as we use the manifest to map the package directory structure (based on agent commit hash) into one that takes also the +agent version into account. This allows releasing versions of the agent package where only the component versions change, +with the agent commit unchanged. + + +The [structure](../pkg/api/v1/manifest.go) of such manifest is defined in the [api/v1 package](../pkg/api/v1/). +The manifest data is generated during packaging and the file is added to the package files. This is an example of a +complete manifest: + +```yaml +version: co.elastic.agent/v1 +kind: PackageManifest +package: + version: 8.13.0 + snapshot: true + hash: 15658b38b48ba4487afadc5563b1576b85ce0264 + versioned-home: data/elastic-agent-15658b + path-mappings: + - data/elastic-agent-15658b: data/elastic-agent-8.13.0-SNAPSHOT-15658b + manifest.yaml: data/elastic-agent-8.13.0-SNAPSHOT-15658b/manifest.yaml +``` + +The package information describes the package version, whether it's a snapshot build, the elastic-agent commit hash it +has been built from and where to find the versioned home of the elastic agent within the package. + +Another section lists the path mappings that must be applied by an elastic-agent that is aware of the package manifest +(version >8.13.0): these path mappings allow the incoming agent version to have some control over where the files in +package will be stored on disk. + +#### Upgrading without the manifest + +Legacy elastic-agent upgrade is a pretty straightforward affair: +- Download the agent package to use for upgrade +- Open the .zip or .tar.gz archive and iterate over the files + - Look for the elastic-agent commit file to retrieve the actual hash of the agent version we want to install + - Extract any package file under `/data` under the installed agent `/data` directory +- After extraction check if the hash we read from the package matches with the one from the current agent: + - if it's the same hash the upgrade fails because we are trying to upgrade to the same version + - if we extracted a package with a different hash, the upgrade keeps going +- Copy the elastic agent action store and components run directory into the new agent directories `elastic-agent-` +- Rotate the symlink in the top directory to point to the new agent executable `data/elastic-agent-/elastic-agent` +- Write the update marker containing the information about the new and old agent versions/hashes in `data` directory +- Invoke the watcher `elastic-agent watch` command to ensure that the new version of agent works correctly after restart +- Shutdown current agent and its command components, copy components state once again and restart + +#### Upgrading using the manifest + +Upgrading using the manifest allows for the new version to pass along some information about the package to the upgrading agent. +The new process looks like this: +- Download the elastic-agent package to use for upgrade +- Extract package metadata from the new elastic-agent package (`version`, `snapshot` and `hash`): + - if the package has a manifest we extract `version` and `snapshot` flag as declared by the package manifest + - if there is no manifest for the package we extract `version` and `snapshot` from the version string passed to the upgrader + - the `hash` is always retrieved from the agent commit file (this is always present in the package) +- compare the tuple of new `(version, snapshot, hash)` to the current `(version, snapshot, hash)`: if they are the same + the upgrade fails because we are trying to upgrade to the same version as current +- Extract any package file (after mapping it using file mappings in manifest if present) that should go under `/data`. + Return the new versionedHome (where the new version of agent has its files, returned as path relative to the top directory) +- Copy the elastic agent action store and components run directory into the new agent in `/run` +- Write the update marker containing the information about the new and old agent version, hash and home in `data` directory +- Invoke the watcher `elastic-agent watch` command to ensure that the new version of agent works correctly after restart: + - we invoke the current agent binary if the new version < 8.13.0 (needed to make sure it supports the paths written in the update marker) + - we invoke the new agent binary if the new version > 8.13.0 +- Shutdown current agent and its command components, copy components state once again and restart diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 5a90712744a..871435e65a1 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -180,11 +180,16 @@ func ExternalInputs() string { // Data returns the data directory for Agent func Data() string { + return DataFrom(Top()) +} + +// DataFrom returns the data directory for Agent using the passed directory as top path +func DataFrom(topDirPath string) string { if unversionedHome { // unversioned means the topPath is the data path - return topPath + return topDirPath } - return filepath.Join(Top(), "data") + return filepath.Join(topDirPath, "data") } // Run returns the run directory for Agent diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index bea5c8c4f23..37b2f414717 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "os" + "os/exec" "path/filepath" "strings" "time" @@ -33,44 +34,64 @@ const ( ) // Rollback rollbacks to previous version which was functioning before upgrade. -func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentHash string) error { +func Rollback(ctx context.Context, log *logger.Logger, c client.Client, topDirPath, prevVersionedHome, prevHash string) error { + symlinkPath := filepath.Join(topDirPath, agentName) + + var symlinkTarget string + if prevVersionedHome != "" { + symlinkTarget = paths.BinaryPath(filepath.Join(topDirPath, prevVersionedHome), agentName) + } else { + // fallback for upgrades that didn't use the manifest and path remapping + hashedDir := fmt.Sprintf("%s-%s", agentName, prevHash) + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + symlinkTarget = paths.BinaryPath(filepath.Join(paths.DataFrom(topDirPath), hashedDir), agentName) + } // change symlink - if err := ChangeSymlink(ctx, log, prevHash); err != nil { + if err := changeSymlink(log, topDirPath, symlinkPath, symlinkTarget); err != nil { return err } // revert active commit - if err := UpdateActiveCommit(log, prevHash); err != nil { + if err := UpdateActiveCommit(log, topDirPath, prevHash); err != nil { return err } // Restart log.Info("Restarting the agent after rollback") - if err := restartAgent(ctx, log); err != nil { + if err := restartAgent(ctx, log, c); err != nil { return err } // cleanup everything except version we're rolling back into - return Cleanup(log, prevHash, true, true) + return Cleanup(log, topDirPath, prevVersionedHome, prevHash, true, true) } // Cleanup removes all artifacts and files related to a specified version. -func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs bool) error { +func Cleanup(log *logger.Logger, topDirPath, currentVersionedHome, currentHash string, removeMarker, keepLogs bool) error { log.Infow("Cleaning up upgrade", "hash", currentHash, "remove_marker", removeMarker) <-time.After(afterRestartDelay) + // data directory path + dataDirPath := paths.DataFrom(topDirPath) + // remove upgrade marker if removeMarker { - if err := CleanMarker(log); err != nil { + if err := CleanMarker(log, dataDirPath); err != nil { return err } } // remove data/elastic-agent-{hash} - dataDir, err := os.Open(paths.Data()) + dataDir, err := os.Open(dataDirPath) if err != nil { return err } + defer func(dataDir *os.File) { + err := dataDir.Close() + if err != nil { + log.Errorw("Error closing data directory", "file.directory", dataDirPath) + } + }(dataDir) subdirs, err := dataDir.Readdirnames(0) if err != nil { @@ -78,12 +99,21 @@ func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs } // remove symlink to avoid upgrade failures, ignore error - prevSymlink := prevSymlinkPath() - log.Infow("Removing previous symlink path", "file.path", prevSymlinkPath()) + prevSymlink := prevSymlinkPath(topDirPath) + log.Infow("Removing previous symlink path", "file.path", prevSymlinkPath(topDirPath)) _ = os.Remove(prevSymlink) dirPrefix := fmt.Sprintf("%s-", agentName) - currentDir := fmt.Sprintf("%s-%s", agentName, currentHash) + var currentDir string + if currentVersionedHome != "" { + currentDir, err = filepath.Rel("data", currentVersionedHome) + if err != nil { + return fmt.Errorf("extracting elastic-agent path relative to data directory from %s: %w", currentVersionedHome, err) + } + } else { + currentDir = fmt.Sprintf("%s-%s", agentName, currentHash) + } + for _, dir := range subdirs { if dir == currentDir { continue @@ -93,7 +123,7 @@ func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs continue } - hashedDir := filepath.Join(paths.Data(), dir) + hashedDir := filepath.Join(dataDirPath, dir) log.Infow("Removing hashed data directory", "file.path", hashedDir) var ignoredDirs []string if keepLogs { @@ -109,13 +139,13 @@ func Cleanup(log *logger.Logger, currentHash string, removeMarker bool, keepLogs // InvokeWatcher invokes an agent instance using watcher argument for watching behavior of // agent during upgrade period. -func InvokeWatcher(log *logger.Logger) error { +func InvokeWatcher(log *logger.Logger, agentExecutable string) (*exec.Cmd, error) { if !IsUpgradeable() { log.Info("agent is not upgradable, not starting watcher") - return nil + return nil, nil } - cmd := invokeCmd() + cmd := invokeCmd(agentExecutable) defer func() { if cmd.Process != nil { log.Infof("releasing watcher %v", cmd.Process.Pid) @@ -125,19 +155,18 @@ func InvokeWatcher(log *logger.Logger) error { log.Infow("Starting upgrade watcher", "path", cmd.Path, "args", cmd.Args, "env", cmd.Env, "dir", cmd.Dir) if err := cmd.Start(); err != nil { - return fmt.Errorf("failed to start Upgrade Watcher: %w", err) + return nil, fmt.Errorf("failed to start Upgrade Watcher: %w", err) } upgradeWatcherPID := cmd.Process.Pid agentPID := os.Getpid() log.Infow("Upgrade Watcher invoked", "agent.upgrade.watcher.process.pid", upgradeWatcherPID, "agent.process.pid", agentPID) - return nil + return cmd, nil } -func restartAgent(ctx context.Context, log *logger.Logger) error { +func restartAgent(ctx context.Context, log *logger.Logger, c client.Client) error { restartViaDaemonFn := func(ctx context.Context) error { - c := client.New() connectCtx, connectCancel := context.WithTimeout(ctx, 3*time.Second) defer connectCancel() err := c.Connect(connectCtx, grpc.WithBlock(), grpc.WithDisableRetry()) diff --git a/internal/pkg/agent/application/upgrade/rollback_darwin.go b/internal/pkg/agent/application/upgrade/rollback_darwin.go index 5ab2ab4cd01..3c9b497cbfa 100644 --- a/internal/pkg/agent/application/upgrade/rollback_darwin.go +++ b/internal/pkg/agent/application/upgrade/rollback_darwin.go @@ -21,9 +21,9 @@ const ( afterRestartDelay = 2 * time.Second ) -func invokeCmd() *exec.Cmd { +func invokeCmd(agentExecutable string) *exec.Cmd { // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + cmd := exec.Command(agentExecutable, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), ) diff --git a/internal/pkg/agent/application/upgrade/rollback_linux.go b/internal/pkg/agent/application/upgrade/rollback_linux.go index 934e3953fa0..846e4978dfa 100644 --- a/internal/pkg/agent/application/upgrade/rollback_linux.go +++ b/internal/pkg/agent/application/upgrade/rollback_linux.go @@ -21,9 +21,9 @@ const ( afterRestartDelay = 2 * time.Second ) -func invokeCmd() *exec.Cmd { +func invokeCmd(agentExecutable string) *exec.Cmd { // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + cmd := exec.Command(agentExecutable, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), ) diff --git a/internal/pkg/agent/application/upgrade/rollback_test.go b/internal/pkg/agent/application/upgrade/rollback_test.go new file mode 100644 index 00000000000..833844cdd1f --- /dev/null +++ b/internal/pkg/agent/application/upgrade/rollback_test.go @@ -0,0 +1,512 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/control/v2/client/mocks" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +type hookFunc func(t *testing.T, topDir string) + +type testAgentVersion struct { + version string + hash string +} + +type testAgentInstall struct { + version testAgentVersion + useVersionInPath bool +} + +type setupAgentInstallations struct { + installedAgents []testAgentInstall + upgradeFrom testAgentVersion + upgradeTo testAgentVersion + currentAgent testAgentVersion +} + +var ( + version123Snapshot = testAgentVersion{ + version: "1.2.3-SNAPSHOT", + hash: "abcdef", + } + version456Snapshot = testAgentVersion{ + version: "4.5.6-SNAPSHOT", + hash: "ghijkl", + } +) + +func TestCleanup(t *testing.T) { + type args struct { + currentVersionedHome string + currentHash string + removeMarker bool + keepLogs bool + } + + tests := map[string]struct { + args args + agentInstallsSetup setupAgentInstallations + additionalSetup hookFunc + wantErr assert.ErrorAssertionFunc + checkAfterCleanup hookFunc + }{ + "cleanup without versionedHome (legacy upgrade process)": { + args: args{ + currentVersionedHome: "data/elastic-agent-ghijkl", + currentHash: "ghijkl", + removeMarker: true, + keepLogs: false, + }, + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: false, + }, + { + version: version456Snapshot, + useVersionInPath: false, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterCleanup: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-ghijkl") + checkFilesAfterCleanup(t, topDir, newAgentHome, oldAgentHome) + }, + }, + "cleanup with versionedHome (new upgrade process)": { + args: args{ + currentVersionedHome: "data/elastic-agent-4.5.6-SNAPSHOT-ghijkl", + currentHash: "ghijkl", + removeMarker: true, + keepLogs: false, + }, + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: true, + }, + { + version: version456Snapshot, + useVersionInPath: true, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterCleanup: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-4.5.6-SNAPSHOT-ghijkl") + checkFilesAfterCleanup(t, topDir, newAgentHome, oldAgentHome) + }, + }, + "cleanup with versionedHome only on the new agent (new upgrade process from an old agent upgraded with legacy process)": { + args: args{ + currentVersionedHome: "data/elastic-agent-4.5.6-SNAPSHOT-ghijkl", + currentHash: "ghijkl", + removeMarker: true, + keepLogs: false, + }, + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: false, + }, + { + version: version456Snapshot, + useVersionInPath: true, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterCleanup: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-4.5.6-SNAPSHOT-ghijkl") + checkFilesAfterCleanup(t, topDir, newAgentHome, oldAgentHome) + }, + }, + "cleanup with versionedHome only on the new agent + extra old agent installs": { + args: args{ + currentVersionedHome: "data/elastic-agent-4.5.6-SNAPSHOT-ghijkl", + currentHash: "ghijkl", + removeMarker: true, + keepLogs: false, + }, + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: testAgentVersion{ + version: "0.9.9", + hash: "aaaaaa", + }, + useVersionInPath: false, + }, + { + version: testAgentVersion{ + version: "1.1.1", + hash: "aaabbb", + }, + useVersionInPath: false, + }, + { + version: version123Snapshot, + useVersionInPath: false, + }, + { + version: version456Snapshot, + useVersionInPath: true, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterCleanup: func(t *testing.T, topDir string) { + newAgentHome := filepath.Join("data", "elastic-agent-4.5.6-SNAPSHOT-ghijkl") + oldAgentHomes := []string{ + filepath.Join("data", "elastic-agent-abcdef"), + filepath.Join("data", "elastic-agent-aaabbb"), + filepath.Join("data", "elastic-agent-aaaaaa"), + } + + checkFilesAfterCleanup(t, topDir, newAgentHome, oldAgentHomes...) + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + testLogger, _ := logger.NewTesting(t.Name()) + testTop := t.TempDir() + setupAgents(t, testLogger, testTop, tt.agentInstallsSetup) + if tt.additionalSetup != nil { + tt.additionalSetup(t, testTop) + } + marker, err := LoadMarker(paths.DataFrom(testTop)) + require.NoError(t, err, "error loading update marker") + require.NotNil(t, marker, "loaded marker must not be nil") + t.Logf("Loaded update marker %+v", marker) + tt.wantErr(t, Cleanup(testLogger, testTop, marker.VersionedHome, marker.Hash, tt.args.removeMarker, tt.args.keepLogs), fmt.Sprintf("Cleanup(%v, %v, %v, %v)", marker.VersionedHome, marker.Hash, tt.args.removeMarker, tt.args.keepLogs)) + tt.checkAfterCleanup(t, testTop) + }) + } +} + +func TestRollback(t *testing.T) { + tests := map[string]struct { + agentInstallsSetup setupAgentInstallations + additionalSetup hookFunc + wantErr assert.ErrorAssertionFunc + checkAfterRollback hookFunc + }{ + "rollback without versionedHome (legacy upgrade process)": { + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: false, + }, + { + version: version456Snapshot, + useVersionInPath: false, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterRollback: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-ghijkl") + checkFilesAfterRollback(t, topDir, oldAgentHome, newAgentHome) + }, + }, + "rollback with versionedHome (new upgrade process)": { + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: true, + }, + { + version: version456Snapshot, + useVersionInPath: true, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterRollback: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-4.5.6-SNAPSHOT-ghijkl") + checkFilesAfterRollback(t, topDir, oldAgentHome, newAgentHome) + }, + }, + "rollback with versionedHome only on the new agent (new upgrade process from an old agent upgraded with legacy process)": { + agentInstallsSetup: setupAgentInstallations{ + installedAgents: []testAgentInstall{ + { + version: version123Snapshot, + useVersionInPath: false, + }, + { + version: version456Snapshot, + useVersionInPath: true, + }, + }, + upgradeFrom: version123Snapshot, + upgradeTo: version456Snapshot, + currentAgent: version456Snapshot, + }, + wantErr: assert.NoError, + checkAfterRollback: func(t *testing.T, topDir string) { + oldAgentHome := filepath.Join("data", "elastic-agent-abcdef") + newAgentHome := filepath.Join("data", "elastic-agent-4.5.6-SNAPSHOT-ghijkl") + checkFilesAfterRollback(t, topDir, oldAgentHome, newAgentHome) + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + testLogger, _ := logger.NewTesting(t.Name()) + testTop := t.TempDir() + setupAgents(t, testLogger, testTop, tt.agentInstallsSetup) + if tt.additionalSetup != nil { + tt.additionalSetup(t, testTop) + } + marker, err := LoadMarker(paths.DataFrom(testTop)) + require.NoError(t, err, "error loading update marker") + require.NotNil(t, marker, "loaded marker must not be nil") + t.Logf("Loaded update marker %+v", marker) + + // mock client + mockClient := mocks.NewClient(t) + mockClient.EXPECT().Connect( + mock.AnythingOfType("*context.timerCtx"), + mock.AnythingOfType("*grpc.funcDialOption"), + mock.AnythingOfType("*grpc.funcDialOption"), + ).Return(nil) + mockClient.EXPECT().Disconnect().Return() + mockClient.EXPECT().Restart(mock.Anything).Return(nil).Once() + + ctx := context.TODO() + tt.wantErr(t, Rollback(ctx, testLogger, mockClient, testTop, marker.PrevVersionedHome, marker.PrevHash), fmt.Sprintf("Rollback(%v, %v, %v, %v, %v, %v)", ctx, testLogger, mockClient, testTop, marker.PrevVersionedHome, marker.PrevHash)) + tt.checkAfterRollback(t, testTop) + }) + } +} + +// checkFilesAfterCleanup is a convenience function to check the file structure within topDir. +// *AgentHome paths must be the expected old and new agent paths relative to topDir (typically in the form of "data/elastic-agent-*") +func checkFilesAfterCleanup(t *testing.T, topDir, newAgentHome string, oldAgentHomes ...string) { + t.Helper() + // the old agent directories must not exist anymore + for _, oldAgentHome := range oldAgentHomes { + assert.NoDirExistsf(t, filepath.Join(topDir, oldAgentHome), "old agent directory %q should be deleted after cleanup", oldAgentHome) + } + + // check the new agent home + assert.DirExists(t, filepath.Join(topDir, newAgentHome), "new agent directory should exist after cleanup") + agentExecutable := agentName + if runtime.GOOS == "windows" { + agentExecutable += ".exe" + } + symlinkPath := filepath.Join(topDir, agentExecutable) + linkTarget, err := os.Readlink(symlinkPath) + if assert.NoError(t, err, "unable to read symbolic link") { + assert.Equal(t, paths.BinaryPath(newAgentHome, agentExecutable), linkTarget, "symbolic link should point to the new agent executable after cleanup") + } + + // read the elastic agent placeholder via the symlink + elasticAgentBytes, err := os.ReadFile(symlinkPath) + if assert.NoError(t, err, "error reading elastic-agent content through the symlink") { + assert.Equal(t, []byte("Placeholder for agent 4.5.6-SNAPSHOT"), elasticAgentBytes, "reading elastic-agent content through symbolic link should point to the new version") + } + + assert.NoFileExists(t, filepath.Join(topDir, "data", markerFilename), "update marker should have been cleaned up") +} + +// checkFilesAfterRollback is a convenience function to check the file structure within topDir. +// *AgentHome paths must be the expected old and new agent paths relative to topDir (typically in the form of "data/elastic-agent-*") +func checkFilesAfterRollback(t *testing.T, topDir, oldAgentHome, newAgentHome string) { + t.Helper() + // the new agent directory must still exist (for the logs) + assert.DirExists(t, filepath.Join(topDir, newAgentHome), "new agent directory should still exist after rollback") + assert.DirExists(t, filepath.Join(topDir, newAgentHome, "logs"), "new agent logs directory should still exist after rollback") + // some things should have been removed from the new agent directory + assert.NoDirExists(t, filepath.Join(topDir, newAgentHome, "components"), "new agent components directory should have been cleaned up in the rollback") + assert.NoDirExists(t, filepath.Join(topDir, newAgentHome, "run"), "new agent run directory should have been cleaned up in the rollback") + assert.NoFileExists(t, filepath.Join(topDir, newAgentHome, agentName), "new agent binary should have been cleaned up in the rollback") + + // check the old agent home + assert.DirExists(t, filepath.Join(topDir, oldAgentHome), "old agent directory should exist after rollback") + agentExecutable := agentName + if runtime.GOOS == "windows" { + agentExecutable += ".exe" + } + symlinkPath := filepath.Join(topDir, agentExecutable) + linkTarget, err := os.Readlink(symlinkPath) + if assert.NoError(t, err, "unable to read symbolic link") { + // Due to the unique way the rollback process works we end up with an absolute path in the link + expectedLinkTargetAfterRollback := paths.BinaryPath(filepath.Join(topDir, oldAgentHome), agentExecutable) + assert.Equal(t, expectedLinkTargetAfterRollback, linkTarget, "symbolic link should point to the old agent executable after rollback") + } + + // read the elastic agent placeholder via the symlink + elasticAgentBytes, err := os.ReadFile(symlinkPath) + if assert.NoError(t, err, "error reading elastic-agent content through the symlink") { + assert.Equal(t, []byte("Placeholder for agent 1.2.3-SNAPSHOT"), elasticAgentBytes, "reading elastic-agent content through symbolic link should point to the old version") + } + + assert.NoFileExists(t, filepath.Join(topDir, "data", markerFilename), "update marker should have been cleaned up") +} + +// setupAgents create fake agent installs, update marker file and symlink pointing to one of the installations' elastic-agent placeholder +func setupAgents(t *testing.T, log *logger.Logger, topDir string, installations setupAgentInstallations) { + + var ( + oldAgentVersion testAgentVersion + oldAgentVersionedHome string + newAgentVersion testAgentVersion + newAgentVersionedHome string + useNewMarker bool + ) + for _, ia := range installations.installedAgents { + versionedHome := createFakeAgentInstall(t, topDir, ia.version.version, ia.version.hash, ia.useVersionInPath) + t.Logf("Created fake agent install for %+v located at %q", ia, versionedHome) + if installations.upgradeFrom == ia.version { + t.Logf("Setting version %+v as FROM version for the update marker", ia.version) + oldAgentVersion = ia.version + oldAgentVersionedHome = versionedHome + } + + if installations.upgradeTo == ia.version { + t.Logf("Setting version %+v as TO version for the update marker", ia.version) + newAgentVersion = ia.version + newAgentVersionedHome = versionedHome + useNewMarker = ia.useVersionInPath + } + + if installations.currentAgent == ia.version { + t.Logf("Creating symlink pointing to %q", versionedHome) + createLink(t, topDir, versionedHome) + } + } + + t.Logf("Creating upgrade marker from %+v located at %q to %+v located at %q", oldAgentVersion, oldAgentVersionedHome, newAgentVersion, newAgentVersionedHome) + createUpdateMarker(t, log, topDir, newAgentVersion.version, newAgentVersion.hash, newAgentVersionedHome, oldAgentVersion.version, oldAgentVersion.hash, oldAgentVersionedHome, useNewMarker) +} + +// createFakeAgentInstall will create a mock agent install within topDir, possibly using the version in the directory name, depending on useVersionInPath +// it MUST return the path to the created versionedHome relative to topDir, to mirror what step_unpack returns +func createFakeAgentInstall(t *testing.T, topDir, version, hash string, useVersionInPath bool) string { + + // create versioned home + versionedHome := fmt.Sprintf("elastic-agent-%s", hash[:hashLen]) + if useVersionInPath { + // use the version passed as parameter + versionedHome = fmt.Sprintf("elastic-agent-%s-%s", version, hash[:hashLen]) + } + relVersionedHomePath := filepath.Join("data", versionedHome) + absVersionedHomePath := filepath.Join(topDir, relVersionedHomePath) + + // recalculate the binary path and launch a mkDirAll to account for MacOS weirdness + // (the extra nesting of elastic agent binary within versionedHome) + absVersioneHomeBinaryPath := paths.BinaryPath(absVersionedHomePath, "") + err := os.MkdirAll(absVersioneHomeBinaryPath, 0o750) + require.NoError(t, err, "error creating fake install versioned home directory (including binary path) %q", absVersioneHomeBinaryPath) + + // place a few directories in the fake install + absComponentsDirPath := filepath.Join(absVersionedHomePath, "components") + err = os.MkdirAll(absComponentsDirPath, 0o750) + require.NoError(t, err, "error creating fake install components directory %q", absVersionedHomePath) + + absLogsDirPath := filepath.Join(absVersionedHomePath, "logs") + err = os.MkdirAll(absLogsDirPath, 0o750) + require.NoError(t, err, "error creating fake install logs directory %q", absLogsDirPath) + + absRunDirPath := filepath.Join(absVersionedHomePath, "run") + err = os.MkdirAll(absRunDirPath, 0o750) + require.NoError(t, err, "error creating fake install run directory %q", absRunDirPath) + + // put some placeholder for files + agentExecutableName := agentName + if runtime.GOOS == "windows" { + agentExecutableName += ".exe" + } + err = os.WriteFile(paths.BinaryPath(absVersionedHomePath, agentExecutableName), []byte(fmt.Sprintf("Placeholder for agent %s", version)), 0o750) + require.NoErrorf(t, err, "error writing elastic agent binary placeholder %q", agentExecutableName) + err = os.WriteFile(filepath.Join(absLogsDirPath, "fakelog.ndjson"), []byte(fmt.Sprintf("Sample logs for agent %s", version)), 0o750) + require.NoErrorf(t, err, "error writing fake log placeholder %q") + + // return the path relative to top exactly like the step_unpack does + return relVersionedHomePath +} + +func createLink(t *testing.T, topDir string, currentAgentVersionedHome string) { + linkTarget := paths.BinaryPath(currentAgentVersionedHome, agentName) + linkName := agentName + if runtime.GOOS == "windows" { + linkTarget += ".exe" + linkName += ".exe" + } + err := os.Symlink(linkTarget, filepath.Join(topDir, linkName)) + require.NoError(t, err, "error creating test symlink to fake agent install") +} + +func createUpdateMarker(t *testing.T, log *logger.Logger, topDir, newAgentVersion, newAgentHash, newAgentVersionedHome, oldAgentVersion, oldAgentHash, oldAgentVersionedHome string, useNewMarker bool) { + + if !useNewMarker { + newAgentVersion = "" + newAgentVersionedHome = "" + oldAgentVersionedHome = "" + } + + newAgentInstall := agentInstall{ + version: newAgentVersion, + hash: newAgentHash, + versionedHome: newAgentVersionedHome, + } + oldAgentInstall := agentInstall{ + version: oldAgentVersion, + hash: oldAgentHash, + versionedHome: oldAgentVersionedHome, + } + err := markUpgrade(log, + paths.DataFrom(topDir), + newAgentInstall, + oldAgentInstall, + nil, nil) + require.NoError(t, err, "error writing fake update marker") +} diff --git a/internal/pkg/agent/application/upgrade/rollback_windows.go b/internal/pkg/agent/application/upgrade/rollback_windows.go index 2315202e770..ca715354e3b 100644 --- a/internal/pkg/agent/application/upgrade/rollback_windows.go +++ b/internal/pkg/agent/application/upgrade/rollback_windows.go @@ -19,9 +19,9 @@ const ( afterRestartDelay = 15 * time.Second ) -func invokeCmd() *exec.Cmd { +func invokeCmd(agentExecutable string) *exec.Cmd { // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + cmd := exec.Command(agentExecutable, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), ) diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 579ec656f55..943eaf1ed08 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -40,19 +40,14 @@ type downloaderFactory func(*agtversion.ParsedSemVer, *logger.Logger, *artifact. type downloader func(context.Context, downloaderFactory, *agtversion.ParsedSemVer, *artifact.Config, *details.Details) (string, error) -func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI string, upgradeDetails *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ string, err error) { +func (u *Upgrader) downloadArtifact(ctx context.Context, parsedVersion *agtversion.ParsedSemVer, sourceURI string, upgradeDetails *details.Details, skipVerifyOverride, skipDefaultPgp bool, pgpBytes ...string) (_ string, err error) { span, ctx := apm.StartSpan(ctx, "downloadArtifact", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() span.End() }() - pgpBytes = u.appendFallbackPGP(version, pgpBytes) - - parsedVersion, err := agtversion.ParseVersion(version) - if err != nil { - return "", fmt.Errorf("error parsing version %q: %w", version, err) - } + pgpBytes = u.appendFallbackPGP(parsedVersion, pgpBytes) // do not update source config settings := *u.settings @@ -82,7 +77,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } // log that a local upgrade artifact is being used - u.log.Infow("Using local upgrade artifact", "version", version, + u.log.Infow("Using local upgrade artifact", "version", parsedVersion, "drop_path", settings.DropPath, "target_path", settings.TargetDirectory, "install_path", settings.InstallPath) } else { @@ -93,7 +88,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri if factory == nil { // set the factory to the newDownloader factory factory = newDownloader - u.log.Infow("Downloading upgrade artifact", "version", version, + u.log.Infow("Downloading upgrade artifact", "version", parsedVersion, "source_uri", settings.SourceURI, "drop_path", settings.DropPath, "target_path", settings.TargetDirectory, "install_path", settings.InstallPath) } @@ -127,7 +122,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return path, nil } -func (u *Upgrader) appendFallbackPGP(targetVersion string, pgpBytes []string) []string { +func (u *Upgrader) appendFallbackPGP(targetVersion *agtversion.ParsedSemVer, pgpBytes []string) []string { if pgpBytes == nil { pgpBytes = make([]string, 0, 1) } @@ -138,21 +133,15 @@ func (u *Upgrader) appendFallbackPGP(targetVersion string, pgpBytes []string) [] // add a secondary fallback if fleet server is configured u.log.Debugf("Considering fleet server uri for pgp check fallback %q", u.fleetServerURI) if u.fleetServerURI != "" { - tpv, err := agtversion.ParseVersion(targetVersion) + secondaryPath, err := url.JoinPath( + u.fleetServerURI, + fmt.Sprintf(fleetUpgradeFallbackPGPFormat, targetVersion.Major(), targetVersion.Minor(), targetVersion.Patch()), + ) if err != nil { - // best effort, log failure - u.log.Warnf("failed to parse agent version (%q) for secondary GPG fallback: %v", targetVersion, err) + u.log.Warnf("failed to compose Fleet Server URI: %v", err) } else { - secondaryPath, err := url.JoinPath( - u.fleetServerURI, - fmt.Sprintf(fleetUpgradeFallbackPGPFormat, tpv.Major(), tpv.Minor(), tpv.Patch()), - ) - if err != nil { - u.log.Warnf("failed to compose Fleet Server URI: %v", err) - } else { - secondaryFallback := download.PgpSourceURIPrefix + secondaryPath - pgpBytes = append(pgpBytes, secondaryFallback) - } + secondaryFallback := download.PgpSourceURIPrefix + secondaryPath + pgpBytes = append(pgpBytes, secondaryFallback) } } diff --git a/internal/pkg/agent/application/upgrade/step_download_test.go b/internal/pkg/agent/application/upgrade/step_download_test.go index af485aaca77..5718b082a38 100644 --- a/internal/pkg/agent/application/upgrade/step_download_test.go +++ b/internal/pkg/agent/application/upgrade/step_download_test.go @@ -33,6 +33,7 @@ func (md *mockDownloader) Download(ctx context.Context, a artifact.Artifact, ver } func TestFallbackIsAppended(t *testing.T) { + testAgentVersion123 := agtversion.NewParsedSemVer(1, 2, 3, "", "") testCases := []struct { name string passedBytes []string @@ -40,14 +41,13 @@ func TestFallbackIsAppended(t *testing.T) { expectedDefaultIdx int expectedSecondaryIdx int fleetServerURI string - targetVersion string + targetVersion *agtversion.ParsedSemVer }{ - {"nil input", nil, 1, 0, -1, "", ""}, - {"empty input", []string{}, 1, 0, -1, "", ""}, - {"valid input with pgp", []string{"pgp-bytes"}, 2, 1, -1, "", ""}, - {"valid input with pgp and version, no fleet uri", []string{"pgp-bytes"}, 2, 1, -1, "", "1.2.3"}, - {"valid input with pgp and version and fleet uri", []string{"pgp-bytes"}, 3, 1, 2, "some-uri", "1.2.3"}, - {"valid input with pgp and fleet uri no version", []string{"pgp-bytes"}, 2, 1, -1, "some-uri", ""}, + {"nil input", nil, 1, 0, -1, "", testAgentVersion123}, + {"empty input", []string{}, 1, 0, -1, "", testAgentVersion123}, + {"valid input with pgp", []string{"pgp-bytes"}, 2, 1, -1, "", nil}, + {"valid input with pgp and version, no fleet uri", []string{"pgp-bytes"}, 2, 1, -1, "", testAgentVersion123}, + {"valid input with pgp and version and fleet uri", []string{"pgp-bytes"}, 3, 1, 2, "some-uri", testAgentVersion123}, } for _, tc := range testCases { @@ -65,7 +65,7 @@ func TestFallbackIsAppended(t *testing.T) { if tc.expectedSecondaryIdx >= 0 { // last element is fleet uri - expectedPgpURI := download.PgpSourceURIPrefix + tc.fleetServerURI + strings.Replace(fleetUpgradeFallbackPGPFormat, "%d.%d.%d", tc.targetVersion, 1) + expectedPgpURI := download.PgpSourceURIPrefix + tc.fleetServerURI + strings.Replace(fleetUpgradeFallbackPGPFormat, "%d.%d.%d", tc.targetVersion.CoreVersion(), 1) require.Equal(t, expectedPgpURI, res[len(res)-1]) } }) diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 90bc11dfda6..44a869ee2dd 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -5,7 +5,6 @@ package upgrade import ( - "context" "os" "path/filepath" "time" @@ -16,7 +15,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -24,8 +22,13 @@ const markerFilename = ".update-marker" // UpdateMarker is a marker holding necessary information about ongoing upgrade. type UpdateMarker struct { + // Version represents the version the agent is upgraded to + Version string `json:"version" yaml:"version"` // Hash agent is updated to Hash string `json:"hash" yaml:"hash"` + // VersionedHome represents the path where the new agent is located relative to top path + VersionedHome string `json:"versioned_home" yaml:"versioned_home"` + //UpdatedOn marks a date when update happened UpdatedOn time.Time `json:"updated_on" yaml:"updated_on"` @@ -33,6 +36,8 @@ type UpdateMarker struct { PrevVersion string `json:"prev_version" yaml:"prev_version"` // PrevHash is a hash agent is updated from PrevHash string `json:"prev_hash" yaml:"prev_hash"` + // PrevVersionedHome represents the path where the old agent is located relative to top path + PrevVersionedHome string `json:"prev_versioned_home" yaml:"prev_versioned_home"` // Acked is a flag marking whether or not action was acked Acked bool `json:"acked" yaml:"acked"` @@ -83,42 +88,56 @@ func convertToActionUpgrade(a *MarkerActionUpgrade) *fleetapi.ActionUpgrade { } type updateMarkerSerializer struct { - Hash string `yaml:"hash"` - UpdatedOn time.Time `yaml:"updated_on"` - PrevVersion string `yaml:"prev_version"` - PrevHash string `yaml:"prev_hash"` - Acked bool `yaml:"acked"` - Action *MarkerActionUpgrade `yaml:"action"` - Details *details.Details `yaml:"details"` + Version string `yaml:"version"` + Hash string `yaml:"hash"` + VersionedHome string `yaml:"versioned_home"` + UpdatedOn time.Time `yaml:"updated_on"` + PrevVersion string `yaml:"prev_version"` + PrevHash string `yaml:"prev_hash"` + PrevVersionedHome string `yaml:"prev_versioned_home"` + Acked bool `yaml:"acked"` + Action *MarkerActionUpgrade `yaml:"action"` + Details *details.Details `yaml:"details"` } func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { return &updateMarkerSerializer{ - Hash: m.Hash, - UpdatedOn: m.UpdatedOn, - PrevVersion: m.PrevVersion, - PrevHash: m.PrevHash, - Acked: m.Acked, - Action: convertToMarkerAction(m.Action), - Details: m.Details, + Version: m.Version, + Hash: m.Hash, + VersionedHome: m.VersionedHome, + UpdatedOn: m.UpdatedOn, + PrevVersion: m.PrevVersion, + PrevHash: m.PrevHash, + PrevVersionedHome: m.PrevVersionedHome, + Acked: m.Acked, + Action: convertToMarkerAction(m.Action), + Details: m.Details, } } +type agentInstall struct { + version string + hash string + versionedHome string +} + // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action *fleetapi.ActionUpgrade, upgradeDetails *details.Details) error { - prevVersion := release.Version() - prevHash := release.Commit() - if len(prevHash) > hashLen { - prevHash = prevHash[:hashLen] +func markUpgrade(log *logger.Logger, dataDirPath string, agent, previousAgent agentInstall, action *fleetapi.ActionUpgrade, upgradeDetails *details.Details) error { + + if len(previousAgent.hash) > hashLen { + previousAgent.hash = previousAgent.hash[:hashLen] } marker := &UpdateMarker{ - Hash: hash, - UpdatedOn: time.Now(), - PrevVersion: prevVersion, - PrevHash: prevHash, - Action: action, - Details: upgradeDetails, + Version: agent.version, + Hash: agent.hash, + VersionedHome: agent.versionedHome, + UpdatedOn: time.Now(), + PrevVersion: previousAgent.version, + PrevHash: previousAgent.hash, + PrevVersionedHome: previousAgent.versionedHome, + Action: action, + Details: upgradeDetails, } markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) @@ -126,13 +145,13 @@ func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash strin return errors.New(err, errors.TypeConfig, "failed to parse marker file") } - markerPath := markerFilePath() - log.Infow("Writing upgrade marker file", "file.path", markerPath, "hash", marker.Hash, "prev_hash", prevHash) + markerPath := markerFilePath(dataDirPath) + log.Infow("Writing upgrade marker file", "file.path", markerPath, "hash", marker.Hash, "prev_hash", marker.PrevHash) if err := os.WriteFile(markerPath, markerBytes, 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to create update marker file", errors.M(errors.MetaKeyPath, markerPath)) } - if err := UpdateActiveCommit(log, hash); err != nil { + if err := UpdateActiveCommit(log, paths.Top(), agent.hash); err != nil { return err } @@ -140,8 +159,8 @@ func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash strin } // UpdateActiveCommit updates active.commit file to point to active version. -func UpdateActiveCommit(log *logger.Logger, hash string) error { - activeCommitPath := filepath.Join(paths.Top(), agentCommitFile) +func UpdateActiveCommit(log *logger.Logger, topDirPath, hash string) error { + activeCommitPath := filepath.Join(topDirPath, agentCommitFile) log.Infow("Updating active commit", "file.path", activeCommitPath, "hash", hash) if err := os.WriteFile(activeCommitPath, []byte(hash), 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update active commit", errors.M(errors.MetaKeyPath, activeCommitPath)) @@ -151,8 +170,8 @@ func UpdateActiveCommit(log *logger.Logger, hash string) error { } // CleanMarker removes a marker from disk. -func CleanMarker(log *logger.Logger) error { - markerFile := markerFilePath() +func CleanMarker(log *logger.Logger, dataDirPath string) error { + markerFile := markerFilePath(dataDirPath) log.Infow("Removing marker file", "file.path", markerFile) if err := os.Remove(markerFile); !os.IsNotExist(err) { return err @@ -163,8 +182,8 @@ func CleanMarker(log *logger.Logger) error { // LoadMarker loads the update marker. If the file does not exist it returns nil // and no error. -func LoadMarker() (*UpdateMarker, error) { - return loadMarker(markerFilePath()) +func LoadMarker(dataDirPath string) (*UpdateMarker, error) { + return loadMarker(markerFilePath(dataDirPath)) } func loadMarker(markerFile string) (*UpdateMarker, error) { @@ -183,13 +202,16 @@ func loadMarker(markerFile string) (*UpdateMarker, error) { } return &UpdateMarker{ - Hash: marker.Hash, - UpdatedOn: marker.UpdatedOn, - PrevVersion: marker.PrevVersion, - PrevHash: marker.PrevHash, - Acked: marker.Acked, - Action: convertToActionUpgrade(marker.Action), - Details: marker.Details, + Version: marker.Version, + Hash: marker.Hash, + VersionedHome: marker.VersionedHome, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + PrevVersionedHome: marker.PrevVersionedHome, + Acked: marker.Acked, + Action: convertToActionUpgrade(marker.Action), + Details: marker.Details, }, nil } @@ -198,22 +220,25 @@ func loadMarker(markerFile string) (*UpdateMarker, error) { // file is immediately flushed to persistent storage. func SaveMarker(marker *UpdateMarker, shouldFsync bool) error { makerSerializer := &updateMarkerSerializer{ - Hash: marker.Hash, - UpdatedOn: marker.UpdatedOn, - PrevVersion: marker.PrevVersion, - PrevHash: marker.PrevHash, - Acked: marker.Acked, - Action: convertToMarkerAction(marker.Action), - Details: marker.Details, + Version: marker.Version, + Hash: marker.Hash, + VersionedHome: marker.VersionedHome, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + PrevVersionedHome: marker.PrevVersionedHome, + Acked: marker.Acked, + Action: convertToMarkerAction(marker.Action), + Details: marker.Details, } markerBytes, err := yaml.Marshal(makerSerializer) if err != nil { return err } - return writeMarkerFile(markerFilePath(), markerBytes, shouldFsync) + return writeMarkerFile(markerFilePath(paths.Data()), markerBytes, shouldFsync) } -func markerFilePath() string { - return filepath.Join(paths.Data(), markerFilename) +func markerFilePath(dataDirPath string) string { + return filepath.Join(dataDirPath, markerFilename) } diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index 13c49693062..82bc6beb138 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -5,14 +5,11 @@ package upgrade import ( - "context" - "fmt" "os" "path/filepath" "runtime" "github.com/elastic/elastic-agent-libs/file" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -22,31 +19,23 @@ const ( exe = ".exe" ) -// ChangeSymlink updates symlink paths to match current version. -func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) error { - // create symlink to elastic-agent-{hash} - hashedDir := fmt.Sprintf("%s-%s", agentName, targetHash) - - symlinkPath := filepath.Join(paths.Top(), agentName) - - // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. - newPath := paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) +func changeSymlink(log *logger.Logger, topDirPath, symlinkPath, newTarget string) error { // handle windows suffixes if runtime.GOOS == windows { symlinkPath += exe - newPath += exe + newTarget += exe } - prevNewPath := prevSymlinkPath() - log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newPath, "prev_path", prevNewPath) + prevNewPath := prevSymlinkPath(topDirPath) + log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newTarget, "prev_path", prevNewPath) // remove symlink to avoid upgrade failures if err := os.Remove(prevNewPath); !os.IsNotExist(err) { return err } - if err := os.Symlink(newPath, prevNewPath); err != nil { + if err := os.Symlink(newTarget, prevNewPath); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update agent symlink") } @@ -54,7 +43,7 @@ func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) e return file.SafeFileRotate(symlinkPath, prevNewPath) } -func prevSymlinkPath() string { +func prevSymlinkPath(topDirPath string) string { agentPrevName := agentName + ".prev" // handle windows suffixes @@ -62,5 +51,5 @@ func prevSymlinkPath() string { agentPrevName = agentName + ".exe.prev" } - return filepath.Join(paths.Top(), agentPrevName) + return filepath.Join(topDirPath, agentPrevName) } diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 4f5b0bd7440..aaa929b5ecc 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -7,52 +7,103 @@ package upgrade import ( "archive/tar" "archive/zip" + "bytes" "compress/gzip" + goerrors "errors" "fmt" "io" "io/fs" "os" + "path" "path/filepath" "runtime" "strings" - "github.com/hashicorp/go-multierror" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" "github.com/elastic/elastic-agent/pkg/core/logger" ) +// UnpackResult contains the location and hash of the unpacked agent files +type UnpackResult struct { + // Hash contains the unpacked agent commit hash, limited to a length of 6 for backward compatibility + Hash string `json:"hash" yaml:"hash"` + // VersionedHome indicates the path (relative to topPath, formatted in os-dependent fashion) where to find the unpacked agent files + // The value depends on the mappings specified in manifest.yaml, if no manifest is found it assumes the legacy data/elastic-agent- format + VersionedHome string `json:"versioned-home" yaml:"versioned-home"` +} + // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(version, archivePath, dataDir string) (string, error) { +func (u *Upgrader) unpack(version, archivePath, dataDir string) (UnpackResult, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested - var hash string + var unpackRes UnpackResult var err error if runtime.GOOS == windows { - hash, err = unzip(u.log, archivePath, dataDir) + unpackRes, err = unzip(u.log, archivePath, dataDir) } else { - hash, err = untar(u.log, version, archivePath, dataDir) + unpackRes, err = untar(u.log, archivePath, dataDir) } if err != nil { - u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "hash", hash) - return "", err + u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "unpack_result", unpackRes) + return UnpackResult{}, err } - u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "hash", hash) - return hash, nil + u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "unpack_result", unpackRes) + return unpackRes, nil } -func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { +type packageMetadata struct { + manifest *v1.PackageManifest + hash string +} + +func (u *Upgrader) getPackageMetadata(archivePath string) (packageMetadata, error) { + ext := filepath.Ext(archivePath) + if ext == ".gz" { + // if we got gzip extension we need another extension before last + ext = filepath.Ext(strings.TrimSuffix(archivePath, ext)) + ext + } + + switch ext { + case ".zip": + return getPackageMetadataFromZip(archivePath) + case ".tar.gz": + return getPackageMetadataFromTar(archivePath) + default: + return packageMetadata{}, fmt.Errorf("unknown package format %q", ext) + } +} + +func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { - return "", err + return UnpackResult{}, err } defer r.Close() fileNamePrefix := strings.TrimSuffix(filepath.Base(archivePath), ".zip") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename + pm := pathMapper{} + var versionedHome string + + metadata, err := getPackageMetadataFromZipReader(r, fileNamePrefix) + if err != nil { + return UnpackResult{}, fmt.Errorf("retrieving package metadata from %q: %w", archivePath, err) + } + + hash = metadata.hash[:hashLen] + + if metadata.manifest != nil { + pm.mappings = metadata.manifest.Package.PathMappings + versionedHome = filepath.FromSlash(pm.Map(metadata.manifest.Package.VersionedHome)) + } else { + // if at this point we didn't load the manifest, set the versioned to the backup value + versionedHome = createVersionedHomeFromHash(hash) + } + unpackFile := func(f *zip.File) (err error) { rc, err := f.Open() if err != nil { @@ -60,44 +111,58 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { } defer func() { if cerr := rc.Close(); cerr != nil { - err = multierror.Append(err, cerr) + err = goerrors.Join(err, cerr) } }() - //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := io.ReadAll(rc) - if err != nil || len(hashBytes) < hashLen { - return err - } - - hash = string(hashBytes[:hashLen]) + // we already loaded the hash, skip this one return nil } + mappedPackagePath := pm.Map(fileName) + // skip everything outside data/ - if !strings.HasPrefix(fileName, "data/") { + if !strings.HasPrefix(mappedPackagePath, "data/") { return nil } - path := filepath.Join(dataDir, strings.TrimPrefix(fileName, "data/")) + dstPath := strings.TrimPrefix(mappedPackagePath, "data/") + dstPath = filepath.Join(dataDir, dstPath) if f.FileInfo().IsDir() { - log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) - // remove any world permissions from the directory - _ = os.MkdirAll(path, f.Mode()&0770) + log.Debugw("Unpacking directory", "archive", "zip", "file.path", dstPath) + // check if the directory already exists + _, err = os.Stat(dstPath) + if errors.Is(err, fs.ErrNotExist) { + // the directory does not exist, create it and any non-existing parent directory with the same permissions + if err := os.MkdirAll(dstPath, f.Mode().Perm()&0770); err != nil { + return fmt.Errorf("creating directory %q: %w", dstPath, err) + } + } else if err != nil { + return fmt.Errorf("stat() directory %q: %w", dstPath, err) + } else { + // directory already exists, set the appropriate permissions + err = os.Chmod(dstPath, f.Mode().Perm()&0770) + if err != nil { + return fmt.Errorf("setting permissions %O for directory %q: %w", f.Mode().Perm()&0770, dstPath, err) + } + } + + _ = os.MkdirAll(dstPath, f.Mode()&0770) } else { - log.Debugw("Unpacking file", "archive", "zip", "file.path", path) - // remove any world permissions from the directory/file - _ = os.MkdirAll(filepath.Dir(path), f.Mode()&0770) - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()&0770) + log.Debugw("Unpacking file", "archive", "zip", "file.path", dstPath) + // create non-existing containing folders with 0770 permissions right now, we'll fix the permission of each + // directory as we come across them while processing the other package entries + _ = os.MkdirAll(filepath.Dir(dstPath), 0770) + f, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()&0770) if err != nil { return err } defer func() { if cerr := f.Close(); cerr != nil { - err = multierror.Append(err, cerr) + err = goerrors.Join(err, cerr) } }() @@ -119,29 +184,102 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (string, error) { } if err := unpackFile(f); err != nil { - return "", err + return UnpackResult{}, err } } - return hash, nil + return UnpackResult{ + Hash: hash, + VersionedHome: versionedHome, + }, nil +} + +func getPackageMetadataFromZip(archivePath string) (packageMetadata, error) { + r, err := zip.OpenReader(archivePath) + if err != nil { + return packageMetadata{}, fmt.Errorf("opening zip archive %q: %w", archivePath, err) + } + defer r.Close() + fileNamePrefix := strings.TrimSuffix(filepath.Base(archivePath), ".zip") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename + return getPackageMetadataFromZipReader(r, fileNamePrefix) } -func untar(log *logger.Logger, version string, archivePath, dataDir string) (string, error) { +func getPackageMetadataFromZipReader(r *zip.ReadCloser, fileNamePrefix string) (packageMetadata, error) { + ret := packageMetadata{} + + // Load manifest, the use of path.Join is intentional since in .zip file paths use slash ('/') as separator + manifestFile, err := r.Open(path.Join(fileNamePrefix, v1.ManifestFileName)) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + // we got a real error looking up for the manifest + return packageMetadata{}, fmt.Errorf("looking up manifest in package: %w", err) + } + + if err == nil { + // load manifest + defer manifestFile.Close() + ret.manifest, err = v1.ParseManifest(manifestFile) + if err != nil { + return packageMetadata{}, fmt.Errorf("parsing package manifest: %w", err) + } + } + + // Load hash, the use of path.Join is intentional since in .zip file paths use slash ('/') as separator + hashFile, err := r.Open(path.Join(fileNamePrefix, agentCommitFile)) + if err != nil { + // we got a real error looking up for the agent commit file + return packageMetadata{}, fmt.Errorf("looking up %q in package: %w", agentCommitFile, err) + } + defer hashFile.Close() + + hash, err := readCommitHash(hashFile) + if err != nil { + return packageMetadata{}, err + } + + ret.hash = hash + + return ret, nil +} + +func untar(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error) { + + var versionedHome string + var rootDir string + var hash string + + // Look up manifest in the archive and prepare path mappings, if any + pm := pathMapper{} + + metadata, err := getPackageMetadataFromTar(archivePath) + if err != nil { + return UnpackResult{}, fmt.Errorf("retrieving package metadata from %q: %w", archivePath, err) + } + + hash = metadata.hash[:hashLen] + + if metadata.manifest != nil { + // set the path mappings + pm.mappings = metadata.manifest.Package.PathMappings + versionedHome = filepath.FromSlash(pm.Map(metadata.manifest.Package.VersionedHome)) + } else { + // set default value of versioned home if it wasn't set by reading the manifest + versionedHome = createVersionedHomeFromHash(metadata.hash) + } + r, err := os.Open(archivePath) if err != nil { - return "", errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) + return UnpackResult{}, errors.New(fmt.Sprintf("artifact for 'elastic-agent' could not be found at '%s'", archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) } defer r.Close() zr, err := gzip.NewReader(r) if err != nil { - return "", errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) + return UnpackResult{}, errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) } tr := tar.NewReader(zr) - var rootDir string - var hash string - fileNamePrefix := strings.TrimSuffix(filepath.Base(archivePath), ".tar.gz") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename + + fileNamePrefix := getFileNamePrefix(archivePath) // go through all the content of a tar archive // if elastic-agent.active.commit file is found, get commit of the version unpacked @@ -153,26 +291,26 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str break } if err != nil { - return "", err + return UnpackResult{}, err } if !validFileName(f.Name) { - return "", errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) + return UnpackResult{}, errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) } - //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := io.ReadAll(tr) - if err != nil || len(hashBytes) < hashLen { - return "", err - } - - hash = string(hashBytes[:hashLen]) + // we already loaded the hash, skip this one continue } + // map the filename + fileName = pm.Map(fileName) + + // we should check that the path is a local one but since we discard anything that is not under "data/" we can + // skip the additional check + // skip everything outside data/ if !strings.HasPrefix(fileName, "data/") { continue @@ -191,16 +329,16 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str switch { case mode.IsRegular(): log.Debugw("Unpacking file", "archive", "tar", "file.path", abs) - // just to be sure, it should already be created by Dir type - // remove any world permissions from the directory - if err = os.MkdirAll(filepath.Dir(abs), 0o750); err != nil { - return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + // create non-existing containing folders with 0750 permissions right now, we'll fix the permission of each + // directory as we come across them while processing the other package entries + if err = os.MkdirAll(filepath.Dir(abs), 0750); err != nil { + return UnpackResult{}, errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } // remove any world permissions from the file wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()&0770) if err != nil { - return "", errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } //nolint:gosec // legacy @@ -209,36 +347,193 @@ func untar(log *logger.Logger, version string, archivePath, dataDir string) (str err = closeErr } if err != nil { - return "", fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) + return UnpackResult{}, fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) } case mode.IsDir(): log.Debugw("Unpacking directory", "archive", "tar", "file.path", abs) - // remove any world permissions from the directory + // check if the directory already exists _, err = os.Stat(abs) if errors.Is(err, fs.ErrNotExist) { + // the directory does not exist, create it and any non-existing parent directory with the same permissions if err := os.MkdirAll(abs, mode.Perm()&0770); err != nil { - return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } } else if err != nil { - return "", errors.New(err, "TarInstaller: stat() directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, "TarInstaller: stat() directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } else { - // set the appropriate permissions - err = os.Chmod(abs, mode.Perm()&0o770) + // directory already exists, set the appropriate permissions + err = os.Chmod(abs, mode.Perm()&0770) if err != nil { - return "", errors.New(err, fmt.Sprintf("TarInstaller: setting permissions %O for directory %q", mode.Perm()&0o770, abs), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) + return UnpackResult{}, errors.New(err, fmt.Sprintf("TarInstaller: setting permissions %O for directory %q", mode.Perm()&0770, abs), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } } default: - return "", errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", fileName, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fileName)) + return UnpackResult{}, errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", fileName, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fileName)) } } + return UnpackResult{ + Hash: hash, + VersionedHome: versionedHome, + }, nil +} + +func getPackageMetadataFromTar(archivePath string) (packageMetadata, error) { + // quickly open the archive and look up manifest.yaml file + fileContents, err := getFilesContentFromTar(archivePath, v1.ManifestFileName, agentCommitFile) + if err != nil { + return packageMetadata{}, fmt.Errorf("looking for package metadata files: %w", err) + } + + ret := packageMetadata{} + + manifestReader, ok := fileContents[v1.ManifestFileName] + if ok && manifestReader != nil { + ret.manifest, err = v1.ParseManifest(manifestReader) + if err != nil { + return packageMetadata{}, fmt.Errorf("parsing package manifest: %w", err) + } + } + + if agentCommitReader, ok := fileContents[agentCommitFile]; ok { + hash, err := readCommitHash(agentCommitReader) + if err != nil { + return packageMetadata{}, err + } + ret.hash = hash + } + + return ret, nil +} + +func readCommitHash(reader io.Reader) (string, error) { + commitBytes, err := io.ReadAll(reader) + if err != nil { + return "", fmt.Errorf("reading agent commit hash file: %w", err) + } + hash := strings.TrimSpace(string(commitBytes)) + if len(hash) < hashLen { + return "", fmt.Errorf("hash %q is shorter than minimum length %d", string(commitBytes), hashLen) + } return hash, nil } +func getFileNamePrefix(archivePath string) string { + return strings.TrimSuffix(filepath.Base(archivePath), ".tar.gz") + "/" // omitting `elastic-agent-{version}-{os}-{arch}/` in filename +} + func validFileName(p string) bool { if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { return false } return true } + +type pathMapper struct { + mappings []map[string]string +} + +func (pm pathMapper) Map(packagePath string) string { + for _, mapping := range pm.mappings { + for pkgPath, mappedPath := range mapping { + if strings.HasPrefix(packagePath, pkgPath) { + return path.Join(mappedPath, packagePath[len(pkgPath):]) + } + } + } + return packagePath +} + +type tarCloser struct { + tarFile *os.File + gzipReader *gzip.Reader +} + +func (tc *tarCloser) Close() error { + var err error + if tc.gzipReader != nil { + err = goerrors.Join(err, tc.gzipReader.Close()) + } + // prevent double Close() call to fzip reader + tc.gzipReader = nil + if tc.tarFile != nil { + err = goerrors.Join(err, tc.tarFile.Close()) + } + // prevent double Close() call the underlying file + tc.tarFile = nil + return err +} + +// openTar is a convenience function to open a tar.gz file. +// It returns a *tar.Reader, an io.Closer implementation to be called to release resources and an error +// In case of errors the *tar.Reader will be nil, but the io.Closer is always returned and must be called also in case +// of errors to close the underlying readers. +func openTar(archivePath string) (*tar.Reader, io.Closer, error) { + tc := new(tarCloser) + r, err := os.Open(archivePath) + if err != nil { + return nil, tc, fmt.Errorf("opening package %s: %w", archivePath, err) + } + tc.tarFile = r + + zr, err := gzip.NewReader(r) + if err != nil { + return nil, tc, fmt.Errorf("package %s does not seem to have a valid gzip compression: %w", archivePath, err) + } + tc.gzipReader = zr + + return tar.NewReader(zr), tc, nil +} + +// getFilesContentFromTar is a small utility function which will load in memory the contents of a list of files from the tar archive. +// It's meant to be used to load package information/metadata stored in small files within the .tar.gz archive +func getFilesContentFromTar(archivePath string, files ...string) (map[string]io.Reader, error) { + tr, tc, err := openTar(archivePath) + if err != nil { + return nil, fmt.Errorf("opening tar.gz package %s: %w", archivePath, err) + } + defer tc.Close() + + prefix := getFileNamePrefix(archivePath) + + result := make(map[string]io.Reader, len(files)) + fileset := make(map[string]struct{}, len(files)) + // load the fileset with the names we are looking for + for _, fName := range files { + fileset[fName] = struct{}{} + } + + // go through all the content of a tar archive + // if one of the listed files is found, read the contents and set a byte reader into the result map + for { + f, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return nil, fmt.Errorf("reading archive: %w", err) + } + + fileName := strings.TrimPrefix(f.Name, prefix) + if _, ok := fileset[fileName]; ok { + // it's one of the files we are looking for, retrieve the content and set a reader into the result map + manifestBytes, err := io.ReadAll(tr) + if err != nil { + return nil, fmt.Errorf("reading manifest bytes: %w", err) + } + + reader := bytes.NewReader(manifestBytes) + result[fileName] = reader + } + + } + + return result, nil +} + +// createVersionedHomeFromHash returns a versioned home path relative to topPath in the legacy format `elastic-agent-` +// formatted using OS-dependent path separators +func createVersionedHomeFromHash(hash string) string { + return filepath.Join("data", fmt.Sprintf("elastic-agent-%s", hash[:hashLen])) +} diff --git a/internal/pkg/agent/application/upgrade/step_unpack_test.go b/internal/pkg/agent/application/upgrade/step_unpack_test.go index c2d5bf0ece7..5151058e0e7 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack_test.go +++ b/internal/pkg/agent/application/upgrade/step_unpack_test.go @@ -6,6 +6,7 @@ package upgrade import ( "archive/tar" + "archive/zip" "compress/gzip" "fmt" "io" @@ -21,9 +22,23 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" "github.com/elastic/elastic-agent/pkg/core/logger" ) +const agentBinaryPlaceholderContent = "Placeholder for the elastic-agent binary" + +const ea_123_manifest = ` +version: co.elastic.agent/v1 +kind: PackageManifest +package: + version: 1.2.3 + snapshot: true + versioned-home: data/elastic-agent-abcdef + path-mappings: + - data/elastic-agent-abcdef: data/elastic-agent-1.2.3-SNAPSHOT-abcdef + manifest.yaml: data/elastic-agent-1.2.3-SNAPSHOT-abcdef/manifest.yaml +` const foo_component_spec = ` version: 2 inputs: @@ -45,6 +60,33 @@ inputs: - baz ` +var archiveFilesWithManifestNoSymlink = []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + v1.ManifestFileName, content: ea_123_manifest, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, +} + +var outOfOrderArchiveFilesNoManifestNoSymlink = []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o700)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, +} + +var agentArchiveSymLink = files{fType: SYMLINK, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + agentName, content: "data/elastic-agent-abcdef/" + agentName, mode: fs.ModeSymlink | (fs.ModePerm & 0o750)} + type fileType uint const ( @@ -87,76 +129,143 @@ func (f files) Sys() any { type createArchiveFunc func(t *testing.T, archiveFiles []files) (string, error) type checkExtractedPath func(t *testing.T, testDataDir string) -func TestUpgrader_unpack(t *testing.T) { +func TestUpgrader_unpackTarGz(t *testing.T) { type args struct { version string archiveGenerator createArchiveFunc archiveFiles []files } + tests := []struct { name string args args - want string + want UnpackResult wantErr assert.ErrorAssertionFunc checkFiles checkExtractedPath }{ { - name: "targz with file before containing folder", + name: "file before containing folder", args: args{ - version: "1.2.3", - archiveFiles: []files{ - {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, - {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, - {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o700)}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/" + agentName, content: "Placeholder for the elastic-agent binary", mode: fs.ModePerm & 0o750}, - {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, - {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, - {fType: SYMLINK, path: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64/" + agentName, content: "data/elastic-agent-abcdef/" + agentName, mode: fs.ModeSymlink | (fs.ModePerm & 0o750)}, - }, + version: "1.2.3", + archiveFiles: append(outOfOrderArchiveFilesNoManifestNoSymlink, agentArchiveSymLink), archiveGenerator: func(t *testing.T, i []files) (string, error) { - return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", i) + return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.tar.gz", i) }, }, - want: "abcdef", + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-abcdef"), + }, wantErr: assert.NoError, checkFiles: func(t *testing.T, testDataDir string) { - versionedHome := filepath.Join(testDataDir, "elastic-agent-abcdef") - require.DirExists(t, versionedHome, "directory for package.version does not exists") - stat, err := os.Stat(versionedHome) - require.NoErrorf(t, err, "error calling Stat() for versionedHome %q", versionedHome) - expectedPermissions := fs.ModePerm & 0o700 - actualPermissions := fs.ModePerm & stat.Mode() - assert.Equalf(t, expectedPermissions, actualPermissions, "Wrong permissions set on versioned home %q: expected %O, got %O", versionedHome, expectedPermissions, actualPermissions) + checkExtractedFilesOutOfOrder(t, versionedHome) + }, + }, + { + name: "package with manifest file", + args: args{ + version: "1.2.3", + archiveFiles: append(archiveFilesWithManifestNoSymlink, agentArchiveSymLink), + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.tar.gz", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), }, + wantErr: assert.NoError, + checkFiles: checkExtractedFilesWithManifest, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("tar.gz tests only run on Linux/MacOS") + testTop := t.TempDir() + testDataDir := filepath.Join(testTop, "data") + err := os.MkdirAll(testDataDir, 0o777) + assert.NoErrorf(t, err, "error creating initial structure %q", testDataDir) + log, _ := logger.NewTesting(tt.name) + + archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) + require.NoError(t, err, "creation of test archive file failed") + + got, err := untar(log, archiveFile, testDataDir) + if !tt.wantErr(t, err, fmt.Sprintf("untar(%v, %v, %v)", tt.args.version, archiveFile, testDataDir)) { + return } + assert.Equalf(t, tt.want, got, "untar(%v, %v, %v)", tt.args.version, archiveFile, testDataDir) + if tt.checkFiles != nil { + tt.checkFiles(t, testDataDir) + } + }) + } +} + +func TestUpgrader_unpackZip(t *testing.T) { + type args struct { + archiveGenerator createArchiveFunc + archiveFiles []files + } + + tests := []struct { + name string + args args + want UnpackResult + wantErr assert.ErrorAssertionFunc + checkFiles checkExtractedPath + }{ + { + name: "file before containing folder", + args: args{ + archiveFiles: outOfOrderArchiveFilesNoManifestNoSymlink, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-abcdef"), + }, + wantErr: assert.NoError, + checkFiles: func(t *testing.T, testDataDir string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-abcdef") + checkExtractedFilesOutOfOrder(t, versionedHome) + }, + }, + { + name: "package with manifest file", + args: args{ + archiveFiles: archiveFilesWithManifestNoSymlink, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + checkFiles: checkExtractedFilesWithManifest, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { testTop := t.TempDir() testDataDir := filepath.Join(testTop, "data") err := os.MkdirAll(testDataDir, 0o777) assert.NoErrorf(t, err, "error creating initial structure %q", testDataDir) log, _ := logger.NewTesting(tt.name) - u := &Upgrader{ - log: log, - } archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) require.NoError(t, err, "creation of test archive file failed") - got, err := u.unpack(tt.args.version, archiveFile, testDataDir) - if !tt.wantErr(t, err, fmt.Sprintf("unpack(%v, %v, %v)", tt.args.version, archiveFile, testDataDir)) { + got, err := unzip(log, archiveFile, testDataDir) + if !tt.wantErr(t, err, fmt.Sprintf("unzip(%v, %v)", archiveFile, testDataDir)) { return } - assert.Equalf(t, tt.want, got, "unpack(%v, %v, %v)", tt.args.version, archiveFile, testDataDir) + assert.Equalf(t, tt.want, got, "unzip(%v, %v)", archiveFile, testDataDir) if tt.checkFiles != nil { tt.checkFiles(t, testDataDir) } @@ -164,21 +273,62 @@ func TestUpgrader_unpack(t *testing.T) { } } -func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { +func checkExtractedFilesOutOfOrder(t *testing.T, versionedHome string) { + require.DirExists(t, versionedHome, "directory for package.version does not exists") + stat, err := os.Stat(versionedHome) + require.NoErrorf(t, err, "error calling Stat() for versionedHome %q", versionedHome) + expectedPermissions := fs.ModePerm & 0o700 + if runtime.GOOS == "windows" { + // windows permissions are not very fine grained :/ + expectedPermissions = fs.ModePerm & 0o777 + } + actualPermissions := fs.ModePerm & stat.Mode() + assert.Equalf(t, expectedPermissions, actualPermissions, "Wrong permissions set on versioned home %q: expected %O, got %O", versionedHome, expectedPermissions, actualPermissions) + agentExecutable := filepath.Join(versionedHome, agentName) + if assert.FileExistsf(t, agentExecutable, "agent executable %q is not found in versioned home directory %q", agentExecutable, versionedHome) { + fileBytes, err := os.ReadFile(agentExecutable) + if assert.NoErrorf(t, err, "error reading elastic-agent executable %q", agentExecutable) { + assert.Equal(t, agentBinaryPlaceholderContent, string(fileBytes), "agent binary placeholder content does not match") + } + } +} +func checkExtractedFilesWithManifest(t *testing.T, testDataDir string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-1.2.3-SNAPSHOT-abcdef") + require.DirExists(t, versionedHome, "mapped versioned home directory does not exists") + mappedAgentExecutable := filepath.Join(versionedHome, agentName) + if assert.FileExistsf(t, mappedAgentExecutable, "agent executable %q is not found in mapped versioned home directory %q", mappedAgentExecutable, versionedHome) { + fileBytes, err := os.ReadFile(mappedAgentExecutable) + if assert.NoErrorf(t, err, "error reading elastic-agent executable %q", mappedAgentExecutable) { + assert.Equal(t, agentBinaryPlaceholderContent, string(fileBytes), "agent binary placeholder content does not match") + } + } + mappedPackageManifest := filepath.Join(versionedHome, v1.ManifestFileName) + if assert.FileExistsf(t, mappedPackageManifest, "package manifest %q is not found in mapped versioned home directory %q", mappedPackageManifest, versionedHome) { + fileBytes, err := os.ReadFile(mappedPackageManifest) + if assert.NoErrorf(t, err, "error reading package manifest %q", mappedPackageManifest) { + assert.Equal(t, ea_123_manifest, string(fileBytes), "package manifest content does not match") + } + } +} + +func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { outDir := t.TempDir() outFilePath := filepath.Join(outDir, archiveName) file, err := os.OpenFile(outFilePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) require.NoErrorf(t, err, "error creating output archive %q", outFilePath) - defer file.Close() + defer func(file *os.File) { + err := file.Close() + assert.NoError(t, err, "error closing tar.gz archive file") + }(file) zipWriter := gzip.NewWriter(file) writer := tar.NewWriter(zipWriter) defer func(writer *tar.Writer) { err := writer.Close() - require.NoError(t, err, "error closing tar writer") + assert.NoError(t, err, "error closing tar writer") err = zipWriter.Close() - require.NoError(t, err, "error closing gzip writer") + assert.NoError(t, err, "error closing gzip writer") }(writer) for _, af := range archiveFiles { @@ -192,13 +342,13 @@ func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (s func addEntryToTarArchive(af files, writer *tar.Writer) error { header, err := tar.FileInfoHeader(&af, af.content) if err != nil { - return err + return fmt.Errorf("creating header for %q: %w", af.path, err) } - header.Name = af.path + header.Name = filepath.ToSlash(af.path) if err := writer.WriteHeader(header); err != nil { - return err + return fmt.Errorf("writing header for %q: %w", af.path, err) } if af.IsDir() || af.fType == SYMLINK { @@ -210,3 +360,62 @@ func addEntryToTarArchive(af files, writer *tar.Writer) error { } return nil } + +func createZipArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { + t.Helper() + outDir := t.TempDir() + + outFilePath := filepath.Join(outDir, archiveName) + file, err := os.OpenFile(outFilePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) + require.NoErrorf(t, err, "error creating output archive %q", outFilePath) + defer func(file *os.File) { + err := file.Close() + assert.NoError(t, err, "error closing zip archive file") + }(file) + + w := zip.NewWriter(file) + defer func(writer *zip.Writer) { + err := writer.Close() + assert.NoError(t, err, "error closing tar writer") + }(w) + + for _, af := range archiveFiles { + if af.fType == SYMLINK { + return "", fmt.Errorf("entry %q is a symlink. Not supported in .zip files", af.path) + } + + err = addEntryToZipArchive(af, w) + require.NoErrorf(t, err, "error adding %q to tar archive", af.path) + } + return outFilePath, nil +} + +func addEntryToZipArchive(af files, writer *zip.Writer) error { + header, err := zip.FileInfoHeader(&af) + if err != nil { + return fmt.Errorf("creating header for %q: %w", af.path, err) + } + + header.SetMode(af.Mode() & os.ModePerm) + header.Name = filepath.ToSlash(af.path) + if af.IsDir() { + header.Name += "/" + } else { + header.Method = zip.Deflate + } + + w, err := writer.CreateHeader(header) + if err != nil { + return err + } + + if af.IsDir() { + return nil + } + + if _, err = io.Copy(w, strings.NewReader(af.content)); err != nil { + return err + } + + return nil +} diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e154251f552..54a5c9f528b 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -6,11 +6,15 @@ package upgrade import ( "context" + goerrors "errors" "fmt" + "io/fs" "os" + "os/exec" "path/filepath" "runtime" "strings" + "time" "github.com/otiai10/copy" "go.elastic.co/apm" @@ -31,13 +35,16 @@ import ( "github.com/elastic/elastic-agent/pkg/control/v2/client" "github.com/elastic/elastic-agent/pkg/control/v2/cproto" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) const ( - agentName = "elastic-agent" - hashLen = 6 - agentCommitFile = ".elastic-agent.active.commit" - runDirMod = 0770 + agentName = "elastic-agent" + hashLen = 6 + agentCommitFile = ".elastic-agent.active.commit" + runDirMod = 0770 + snapshotSuffix = "-SNAPSHOT" + watcherMaxWaitTime = 30 * time.Second ) var agentArtifact = artifact.Artifact{ @@ -46,8 +53,7 @@ var agentArtifact = artifact.Artifact{ Artifact: "beats/" + agentName, } -// ErrSameVersion error is returned when the upgrade results in the same installed version. -var ErrSameVersion = errors.New("upgrade did not occur because its the same version") +var ErrWatcherNotStarted = errors.New("watcher did not start in time") // Upgrader performs an upgrade type Upgrader struct { @@ -73,7 +79,7 @@ func NewUpgrader(log *logger.Logger, settings *artifact.Config, agentInfo info.A settings: settings, agentInfo: agentInfo, upgradeable: IsUpgradeable(), - markerWatcher: newMarkerFileWatcher(markerFilePath(), log), + markerWatcher: newMarkerFileWatcher(markerFilePath(paths.Data()), log), }, nil } @@ -133,6 +139,24 @@ func (u *Upgrader) Upgradeable() bool { return u.upgradeable } +type agentVersion struct { + version string + snapshot bool + hash string +} + +func (av agentVersion) String() string { + buf := strings.Builder{} + buf.WriteString(av.version) + if av.snapshot { + buf.WriteString(snapshotSuffix) + } + buf.WriteString(" (hash: ") + buf.WriteString(av.hash) + buf.WriteString(")") + return buf.String() +} + // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, det *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) @@ -157,7 +181,13 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string det.SetState(details.StateDownloading) sourceURI = u.sourceURI(sourceURI) - archivePath, err := u.downloadArtifact(ctx, version, sourceURI, det, skipVerifyOverride, skipDefaultPgp, pgpBytes...) + + parsedVersion, err := agtversion.ParseVersion(version) + if err != nil { + return nil, fmt.Errorf("error parsing version %q: %w", version, err) + } + + archivePath, err := u.downloadArtifact(ctx, parsedVersion, sourceURI, det, skipVerifyOverride, skipDefaultPgp, pgpBytes...) if err != nil { // Run the same pre-upgrade cleanup task to get rid of any newly downloaded files // This may have an issue if users are upgrading to the same version number. @@ -170,49 +200,123 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string det.SetState(details.StateExtracting) - newHash, err := u.unpack(version, archivePath, paths.Data()) + metadata, err := u.getPackageMetadata(archivePath) + if err != nil { + return nil, fmt.Errorf("reading metadata for elastic agent version %s package %q: %w", version, archivePath, err) + } + + currentVersion := agentVersion{ + version: release.Version(), + snapshot: release.Snapshot(), + hash: release.Commit(), + } + + same, newVersion := isSameVersion(u.log, currentVersion, metadata, version) + if same { + return nil, fmt.Errorf("agent version is already %s", currentVersion) + } + + u.log.Infow("Unpacking agent package", "version", newVersion) + + // Nice to have: add check that no archive files end up in the current versioned home + unpackRes, err := u.unpack(version, archivePath, paths.Data()) if err != nil { return nil, err } + newHash := unpackRes.Hash if newHash == "" { return nil, errors.New("unknown hash") } - if strings.HasPrefix(release.Commit(), newHash) { - u.log.Warn("Upgrade action skipped: upgrade did not occur because its the same version") - return nil, nil + if unpackRes.VersionedHome == "" { + return nil, fmt.Errorf("versionedhome is empty: %v", unpackRes) } - if err := copyActionStore(u.log, newHash); err != nil { + newHome := filepath.Join(paths.Top(), unpackRes.VersionedHome) + + if err := copyActionStore(u.log, newHome); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := copyRunDirectory(u.log, newHash); err != nil { + newRunPath := filepath.Join(newHome, "run") + oldRunPath := filepath.Join(paths.Home(), "run") + + if err := copyRunDirectory(u.log, oldRunPath, newRunPath); err != nil { return nil, errors.New(err, "failed to copy run directory") } det.SetState(details.StateReplacing) - if err := ChangeSymlink(ctx, u.log, newHash); err != nil { + // create symlink to the /elastic-agent + hashedDir := unpackRes.VersionedHome + + symlinkPath := filepath.Join(paths.Top(), agentName) + + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + newPath := paths.BinaryPath(filepath.Join(paths.Top(), hashedDir), agentName) + + currentVersionedHome, err := filepath.Rel(paths.Top(), paths.Home()) + if err != nil { + return nil, fmt.Errorf("calculating home path relative to top, home: %q top: %q : %w", paths.Home(), paths.Top(), err) + } + + if err := changeSymlink(u.log, paths.Top(), symlinkPath, newPath); err != nil { u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) - return nil, err + rollbackErr := rollbackInstall(ctx, u.log, paths.Top(), hashedDir, currentVersionedHome) + return nil, goerrors.Join(err, rollbackErr) + } + + // We rotated the symlink successfully: prepare the current and previous agent installation details for the update marker + // In update marker the `current` agent install is the one where the symlink is pointing (the new one we didn't start yet) + // while the `previous` install is the currently executing elastic-agent that is no longer reachable via the symlink. + // After the restart at the end of the function, everything lines up correctly. + current := agentInstall{ + version: version, + hash: unpackRes.Hash, + versionedHome: unpackRes.VersionedHome, + } + + previous := agentInstall{ + version: release.VersionWithSnapshot(), + hash: release.Commit(), + versionedHome: currentVersionedHome, } - if err := u.markUpgrade(ctx, u.log, newHash, action, det); err != nil { + if err := markUpgrade(u.log, + paths.Data(), // data dir to place the marker in + current, // new agent version data + previous, // old agent version data + action, det); err != nil { u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) - return nil, err + rollbackErr := rollbackInstall(ctx, u.log, paths.Top(), hashedDir, currentVersionedHome) + return nil, goerrors.Join(err, rollbackErr) } - if err := InvokeWatcher(u.log); err != nil { + minParsedVersionForNewUpdateMarker := agtversion.NewParsedSemVer(8, 13, 0, "", "") + var watcherExecutable string + if parsedVersion.Less(*minParsedVersionForNewUpdateMarker) { + // use the current agent executable for watch, if downgrading the old agent doesn't understand the current agent's path structure. + watcherExecutable = paths.BinaryPath(paths.VersionedHome(paths.Top()), agentName) + } else { + // use the new agent executable as it should be able to parse the new update marker + watcherExecutable = paths.BinaryPath(filepath.Join(paths.Top(), unpackRes.VersionedHome), agentName) + } + var watcherCmd *exec.Cmd + if watcherCmd, err = InvokeWatcher(u.log, watcherExecutable); err != nil { u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) - rollbackInstall(ctx, u.log, newHash) - return nil, err + rollbackErr := rollbackInstall(ctx, u.log, paths.Top(), hashedDir, currentVersionedHome) + return nil, goerrors.Join(err, rollbackErr) } - cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) + watcherWaitErr := waitForWatcher(ctx, u.log, markerFilePath(paths.Data()), watcherMaxWaitTime) + if watcherWaitErr != nil { + killWatcherErr := watcherCmd.Process.Kill() + rollbackErr := rollbackInstall(ctx, u.log, paths.Top(), hashedDir, currentVersionedHome) + return nil, goerrors.Join(watcherWaitErr, killWatcherErr, rollbackErr) + } + + cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, filepath.Join(paths.Top(), unpackRes.VersionedHome)) // Clean everything from the downloads dir u.log.Infow("Removing downloads directory", "file.path", paths.Downloads()) @@ -224,10 +328,39 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return cb, nil } +func waitForWatcher(ctx context.Context, log *logger.Logger, markerFilePath string, waitTime time.Duration) error { + // Wait for the watcher to be up and running + watcherContext, cancel := context.WithTimeout(ctx, waitTime) + defer cancel() + + markerWatcher := newMarkerFileWatcher(markerFilePath, log) + err := markerWatcher.Run(watcherContext) + if err != nil { + return fmt.Errorf("error starting update marker watcher: %w", err) + } + + log.Info("waiting up to %s for upgrade watcher to set %s state in upgrade marker", waitTime, details.StateWatching) + + for { + select { + case updMarker := <-markerWatcher.Watch(): + if updMarker.Details != nil && updMarker.Details.State == details.StateWatching { + // watcher started and it is watching, all good + log.Info("upgrade watcher set %s state in upgrade marker: exiting wait loop", details.StateWatching) + return nil + } + + case <-watcherContext.Done(): + log.Error("upgrade watcher did not start watching within %s or context has expired", waitTime) + return goerrors.Join(ErrWatcherNotStarted, watcherContext.Err()) + } + } +} + // Ack acks last upgrade action func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { // get upgrade action - marker, err := LoadMarker() + marker, err := LoadMarker(paths.Data()) if err != nil { return err } @@ -269,15 +402,43 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -func rollbackInstall(ctx context.Context, log *logger.Logger, hash string) { - os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) - _ = ChangeSymlink(ctx, log, release.ShortCommit()) +func isSameVersion(log *logger.Logger, current agentVersion, metadata packageMetadata, upgradeVersion string) (bool, agentVersion) { + newVersion := agentVersion{} + if metadata.manifest != nil { + packageDesc := metadata.manifest.Package + newVersion.version = packageDesc.Version + newVersion.snapshot = packageDesc.Snapshot + } else { + // extract version info from the version string (we can ignore parsing errors as it would have never passed the download step) + parsedVersion, _ := agtversion.ParseVersion(upgradeVersion) + newVersion.version = strings.TrimSuffix(parsedVersion.VersionWithPrerelease(), snapshotSuffix) + newVersion.snapshot = parsedVersion.IsSnapshot() + } + newVersion.hash = metadata.hash + + log.Debugw("Comparing current and new agent version", "current_version", current, "new_version", newVersion) + + return current == newVersion, newVersion +} + +func rollbackInstall(ctx context.Context, log *logger.Logger, topDirPath, versionedHome, oldVersionedHome string) error { + oldAgentPath := paths.BinaryPath(filepath.Join(topDirPath, oldVersionedHome), agentName) + err := changeSymlink(log, topDirPath, filepath.Join(topDirPath, agentName), oldAgentPath) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("rolling back install: restoring symlink to %q failed: %w", oldAgentPath, err) + } + + newAgentInstallPath := filepath.Join(topDirPath, versionedHome) + err = os.RemoveAll(newAgentInstallPath) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("rolling back install: removing new agent install at %q failed: %w", newAgentInstallPath, err) + } + return nil } -func copyActionStore(log *logger.Logger, newHash string) error { +func copyActionStore(log *logger.Logger, newHome string) error { // copies legacy action_store.yml, state.yml and state.enc encrypted file if exists storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreYmlFile(), paths.AgentStateStoreFile()} - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) log.Infow("Copying action store", "new_home_path", newHome) for _, currentActionStorePath := range storePaths { @@ -300,9 +461,7 @@ func copyActionStore(log *logger.Logger, newHash string) error { return nil } -func copyRunDirectory(log *logger.Logger, newHash string) error { - newRunPath := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash), "run") - oldRunPath := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, release.ShortCommit()), "run") +func copyRunDirectory(log *logger.Logger, oldRunPath, newRunPath string) error { log.Infow("Copying run directory", "new_run_path", newRunPath, "old_run_path", oldRunPath) @@ -326,24 +485,28 @@ func copyRunDirectory(log *logger.Logger, newHash string) error { // shutdownCallback returns a callback function to be executing during shutdown once all processes are closed. // this goes through runtime directory of agent and copies all the state files created by processes to new versioned // home directory with updated process name to match new version. -func shutdownCallback(l *logger.Logger, homePath, prevVersion, newVersion, newHash string) reexec.ShutdownCallbackFn { +func shutdownCallback(l *logger.Logger, homePath, prevVersion, newVersion, newHome string) reexec.ShutdownCallbackFn { if release.Snapshot() { // SNAPSHOT is part of newVersion - prevVersion += "-SNAPSHOT" + prevVersion += snapshotSuffix } return func() error { runtimeDir := filepath.Join(homePath, "run") + l.Debugf("starting copy of run directories from %q to %q", homePath, newHome) processDirs, err := readProcessDirs(runtimeDir) if err != nil { return err } oldHome := homePath - newHome := filepath.Join(filepath.Dir(homePath), fmt.Sprintf("%s-%s", agentName, newHash)) for _, processDir := range processDirs { - newDir := strings.ReplaceAll(processDir, prevVersion, newVersion) - newDir = strings.ReplaceAll(newDir, oldHome, newHome) + relPath, _ := filepath.Rel(oldHome, processDir) + + newRelPath := strings.ReplaceAll(relPath, prevVersion, newVersion) + newRelPath = strings.ReplaceAll(newRelPath, oldHome, newHome) + newDir := filepath.Join(newHome, newRelPath) + l.Debugf("copying %q -> %q", processDir, newDir) if err := copyDir(l, processDir, newDir, true); err != nil { return err } diff --git a/internal/pkg/agent/application/upgrade/upgrade_test.go b/internal/pkg/agent/application/upgrade/upgrade_test.go index 57230fdedbe..d82cb15c6df 100644 --- a/internal/pkg/agent/application/upgrade/upgrade_test.go +++ b/internal/pkg/agent/application/upgrade/upgrade_test.go @@ -11,20 +11,23 @@ import ( "os" "path/filepath" "runtime" - "strings" + "sync" "testing" "time" "github.com/gofrs/flock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent-libs/transport/tlscommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/release" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" "github.com/elastic/elastic-agent/pkg/control/v2/client" "github.com/elastic/elastic-agent/pkg/control/v2/client/mocks" "github.com/elastic/elastic-agent/pkg/control/v2/cproto" @@ -110,40 +113,80 @@ func Test_CopyFile(t *testing.T) { } func TestShutdownCallback(t *testing.T) { - l, _ := logger.New("test", false) - tmpDir, err := os.MkdirTemp("", "shutdown-test-") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - // make homepath agent consistent (in a form of elastic-agent-hash) - homePath := filepath.Join(tmpDir, fmt.Sprintf("%s-%s", agentName, release.ShortCommit())) + type testcase struct { + name string + agentHomeDirectory string + newAgentHomeDirectory string + agentVersion string + newAgentVersion string + oldRunFile string + newRunFile string + } + + testcases := []testcase{ + { + name: "legacy run directories", + agentHomeDirectory: fmt.Sprintf("%s-%s", agentName, release.ShortCommit()), + newAgentHomeDirectory: fmt.Sprintf("%s-%s", agentName, "abc123"), + agentVersion: "7.14.0", + newAgentVersion: "7.15.0", + oldRunFile: filepath.Join("run", "default", "process-7.14.0", "file.test"), + newRunFile: filepath.Join("run", "default", "process-7.15.0", "file.test"), + }, + { + name: "new run directories", + agentHomeDirectory: "elastic-agent-abcdef", + newAgentHomeDirectory: "elastic-agent-ghijkl", + agentVersion: "1.2.3", + newAgentVersion: "4.5.6", + oldRunFile: filepath.Join("run", "component", "unit", "file.test"), + newRunFile: filepath.Join("run", "component", "unit", "file.test"), + }, + { + name: "new run directories, agents with version in path", + agentHomeDirectory: "elastic-agent-1.2.3-abcdef", + newAgentHomeDirectory: "elastic-agent-4.5.6-ghijkl", + agentVersion: "1.2.3", + newAgentVersion: "4.5.6", + oldRunFile: filepath.Join("run", "component", "unit", "file.test"), + newRunFile: filepath.Join("run", "component", "unit", "file.test"), + }, + } + + for _, tt := range testcases { - filename := "file.test" - newCommit := "abc123" - sourceVersion := "7.14.0" - targetVersion := "7.15.0" + t.Run(tt.name, func(t *testing.T) { + l, _ := logger.New(tt.name, false) + tmpDir := t.TempDir() - content := []byte("content") - newHome := strings.ReplaceAll(homePath, release.ShortCommit(), newCommit) - sourceDir := filepath.Join(homePath, "run", "default", "process-"+sourceVersion) - targetDir := filepath.Join(newHome, "run", "default", "process-"+targetVersion) + // make homepath agent consistent + homePath := filepath.Join(tmpDir, tt.agentHomeDirectory) + newHome := filepath.Join(tmpDir, tt.newAgentHomeDirectory) - require.NoError(t, os.MkdirAll(sourceDir, 0755)) - require.NoError(t, os.MkdirAll(targetDir, 0755)) + content := []byte("content") + sourceDir := filepath.Join(homePath, filepath.Dir(tt.oldRunFile)) + targetDir := filepath.Join(newHome, filepath.Dir(tt.newRunFile)) - cb := shutdownCallback(l, homePath, sourceVersion, targetVersion, newCommit) + require.NoError(t, os.MkdirAll(sourceDir, 0755)) + require.NoError(t, os.MkdirAll(targetDir, 0755)) - oldFilename := filepath.Join(sourceDir, filename) - err = os.WriteFile(oldFilename, content, 0640) - require.NoError(t, err, "preparing file failed") + cb := shutdownCallback(l, homePath, tt.agentVersion, tt.newAgentVersion, newHome) - err = cb() - require.NoError(t, err, "callback failed") + oldFilename := filepath.Join(homePath, tt.oldRunFile) + err := os.WriteFile(oldFilename, content, 0640) + require.NoError(t, err, "preparing file failed") + + err = cb() + require.NoError(t, err, "callback failed") + + newFilename := filepath.Join(newHome, tt.newRunFile) + newContent, err := os.ReadFile(newFilename) + require.NoError(t, err, "reading file failed") + require.Equal(t, content, newContent, "contents are not equal") + }) + } - newFilename := filepath.Join(targetDir, filename) - newContent, err := os.ReadFile(newFilename) - require.NoError(t, err, "reading file failed") - require.Equal(t, content, newContent, "contents are not equal") } func TestIsInProgress(t *testing.T) { @@ -498,3 +541,351 @@ agent.download: }) } } + +var agentVersion123SNAPSHOTabcdef = agentVersion{ + version: "1.2.3", + snapshot: true, + hash: "abcdef", +} + +var agentVersion123SNAPSHOTabcdefRepackaged = agentVersion{ + version: "1.2.3-repackaged", + snapshot: true, + hash: "abcdef", +} + +var agentVersion123abcdef = agentVersion{ + version: "1.2.3", + snapshot: false, + hash: "abcdef", +} + +var agentVersion123SNAPSHOTghijkl = agentVersion{ + version: "1.2.3", + snapshot: true, + hash: "ghijkl", +} + +func TestIsSameVersion(t *testing.T) { + type args struct { + current agentVersion + metadata packageMetadata + version string + } + type want struct { + same bool + newVersion agentVersion + } + + tests := []struct { + name string + args args + want want + }{ + { + name: "same version, snapshot flag and hash", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: &v1.PackageManifest{ + Package: v1.PackageDesc{ + Version: "1.2.3", + Snapshot: true, + VersionedHome: "", + PathMappings: nil, + }, + }, + hash: "abcdef", + }, + version: "unused", + }, + want: want{ + same: true, + newVersion: agentVersion123SNAPSHOTabcdef, + }, + }, + { + name: "same hash, snapshot flag, different version", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: &v1.PackageManifest{ + Package: v1.PackageDesc{ + Version: "1.2.3-repackaged", + Snapshot: true, + VersionedHome: "", + PathMappings: nil, + }, + }, + hash: "abcdef", + }, + version: "unused", + }, + want: want{ + same: false, + newVersion: agentVersion123SNAPSHOTabcdefRepackaged, + }, + }, + { + name: "same version and hash, different snapshot flag (SNAPSHOT promotion to release)", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: &v1.PackageManifest{ + Package: v1.PackageDesc{ + Version: "1.2.3", + Snapshot: false, + VersionedHome: "", + PathMappings: nil, + }, + }, + hash: "abcdef", + }, + version: "unused", + }, + want: want{ + same: false, + newVersion: agentVersion123abcdef, + }, + }, + { + name: "same version and snapshot, different hash (SNAPSHOT upgrade)", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: &v1.PackageManifest{ + Package: v1.PackageDesc{ + Version: "1.2.3", + Snapshot: true, + VersionedHome: "", + PathMappings: nil, + }, + }, + hash: "ghijkl", + }, + version: "unused", + }, + want: want{ + same: false, + newVersion: agentVersion123SNAPSHOTghijkl, + }, + }, + { + name: "same version, snapshot flag and hash, no manifest", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: nil, + hash: "abcdef", + }, + version: "1.2.3-SNAPSHOT", + }, + want: want{ + same: true, + newVersion: agentVersion123SNAPSHOTabcdef, + }, + }, + { + name: "same hash, snapshot flag, different version, no manifest", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: nil, + hash: "abcdef", + }, + version: "1.2.3-repackaged-SNAPSHOT", + }, + want: want{ + same: false, + newVersion: agentVersion123SNAPSHOTabcdefRepackaged, + }, + }, + { + name: "same version and hash, different snapshot flag, no manifest (SNAPSHOT promotion to release)", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: nil, + hash: "abcdef", + }, + version: "1.2.3", + }, + want: want{ + same: false, + newVersion: agentVersion123abcdef, + }, + }, + { + name: "same version and snapshot, different hash (SNAPSHOT upgrade)", + args: args{ + current: agentVersion123SNAPSHOTabcdef, + metadata: packageMetadata{ + manifest: nil, + hash: "ghijkl", + }, + version: "1.2.3-SNAPSHOT", + }, + want: want{ + same: false, + newVersion: agentVersion123SNAPSHOTghijkl, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + log, _ := logger.NewTesting(test.name) + actualSame, actualNewVersion := isSameVersion(log, test.args.current, test.args.metadata, test.args.version) + + assert.Equal(t, test.want.same, actualSame, "Unexpected boolean comparison result: isSameVersion(%v, %v, %v, %v) should be %v", + log, test.args.current, test.args.metadata, test.args.version, test.want.same) + assert.Equal(t, test.want.newVersion, actualNewVersion, "Unexpected new version result: isSameVersion(%v, %v, %v, %v) should be %v", + log, test.args.current, test.args.metadata, test.args.version, test.want.newVersion) + }) + } +} + +func TestWaitForWatcher(t *testing.T) { + wantErrWatcherNotStarted := func(t assert.TestingT, err error, i ...interface{}) bool { + return assert.ErrorIs(t, err, ErrWatcherNotStarted, i) + } + tests := []struct { + name string + states []details.State + stateChangeInterval time.Duration + timeout time.Duration + wantErr assert.ErrorAssertionFunc + }{ + { + name: "Happy path: watcher is watching already", + states: []details.State{details.StateWatching}, + stateChangeInterval: 1 * time.Millisecond, + timeout: 50 * time.Millisecond, + wantErr: assert.NoError, + }, + { + name: "Sad path: watcher is never starting", + states: []details.State{details.StateReplacing}, + stateChangeInterval: 1 * time.Millisecond, + timeout: 50 * time.Millisecond, + wantErr: wantErrWatcherNotStarted, + }, + { + name: "Runaround path: marker is jumping around and landing on watching", + states: []details.State{ + details.StateRequested, + details.StateScheduled, + details.StateDownloading, + details.StateExtracting, + details.StateReplacing, + details.StateRestarting, + details.StateWatching, + }, + stateChangeInterval: 1 * time.Millisecond, + timeout: 500 * time.Millisecond, + wantErr: assert.NoError, + }, + { + name: "Timeout: marker is never created", + states: nil, + stateChangeInterval: 1 * time.Millisecond, + timeout: 50 * time.Millisecond, + wantErr: wantErrWatcherNotStarted, + }, + { + name: "Timeout2: state doesn't get there in time", + states: []details.State{ + details.StateRequested, + details.StateScheduled, + details.StateDownloading, + details.StateExtracting, + details.StateReplacing, + details.StateRestarting, + details.StateWatching, + }, + + stateChangeInterval: 5 * time.Millisecond, + timeout: 20 * time.Millisecond, + wantErr: wantErrWatcherNotStarted, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + deadline, ok := t.Deadline() + if !ok { + deadline = time.Now().Add(5 * time.Second) + } + ctx, cancel := context.WithDeadline(context.TODO(), deadline) + defer cancel() + + tmpDir := t.TempDir() + updMarkerFilePath := filepath.Join(tmpDir, markerFilename) + + if len(tt.states) > 0 { + initialState := tt.states[0] + writeState(t, updMarkerFilePath, initialState) + } + + wg := new(sync.WaitGroup) + + var furtherStates []details.State + if len(tt.states) > 1 { + // we have more states to produce + furtherStates = tt.states[1:] + + wg.Add(1) + + go func() { + defer wg.Done() + tick := time.NewTicker(tt.stateChangeInterval) + defer tick.Stop() + for _, state := range furtherStates { + select { + case <-ctx.Done(): + return + case <-tick.C: + writeState(t, updMarkerFilePath, state) + } + } + + }() + } + + log, _ := logger.NewTesting(tt.name) + + tt.wantErr(t, waitForWatcher(ctx, log, updMarkerFilePath, tt.timeout), fmt.Sprintf("waitForWatcher %s, %v, %s, %s)", updMarkerFilePath, tt.states, tt.stateChangeInterval, tt.timeout)) + + // cancel context + cancel() + + // wait for goroutines to finish + wg.Wait() + }) + } +} + +func writeState(t *testing.T, path string, state details.State) { + ms := newMarkerSerializer(&UpdateMarker{ + Version: "version", + Hash: "hash", + VersionedHome: "versionedHome", + UpdatedOn: time.Now(), + PrevVersion: "prev_version", + PrevHash: "prev_hash", + PrevVersionedHome: "prev_versionedhome", + Acked: false, + Action: nil, + Details: &details.Details{ + TargetVersion: "version", + State: state, + ActionID: "", + Metadata: details.Metadata{}, + }, + }) + + bytes, err := yaml.Marshal(ms) + if assert.NoError(t, err, "error marshaling the test upgrade marker") { + err = os.WriteFile(path, bytes, 0770) + assert.NoError(t, err, "error writing out the test upgrade marker") + } +} diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index 1c44dfc79b3..ae99ba108a9 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -226,7 +226,7 @@ func runElasticAgent(ctx context.Context, cancel context.CancelFunc, override cf } // initiate agent watcher - if err := upgrade.InvokeWatcher(l); err != nil { + if _, err := upgrade.InvokeWatcher(l, paths.TopBinaryPath()); err != nil { // we should not fail because watcher is not working l.Error(errors.New(err, "failed to invoke rollback watcher")) } @@ -629,7 +629,7 @@ func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringConfig) bool { // ongoing upgrade operation, i.e. being re-exec'd and performs // any upgrade-specific work, if needed. func handleUpgrade() error { - upgradeMarker, err := upgrade.LoadMarker() + upgradeMarker, err := upgrade.LoadMarker(paths.Data()) if err != nil { return fmt.Errorf("unable to load upgrade marker to check if Agent is being upgraded: %w", err) } diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 69f0b0b033f..01d86a92dd4 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/logp/configure" + "github.com/elastic/elastic-agent/pkg/control/v2/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" @@ -65,7 +66,7 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { log.Infow("Upgrade Watcher started", "process.pid", os.Getpid(), "agent.version", version.GetAgentPackageVersion()) - marker, err := upgrade.LoadMarker() + marker, err := upgrade.LoadMarker(paths.Data()) if err != nil { log.Error("failed to load marker", err) return err @@ -76,6 +77,8 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { return nil } + log.Infof("Loaded update marker %+v", marker) + locker := filelock.NewAppLocker(paths.Top(), watcherLockFile) if err := locker.TryLock(); err != nil { if errors.Is(err, filelock.ErrAppAlreadyRunning) { @@ -97,7 +100,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { // if we're not within grace and marker is still there it might mean // that cleanup was not performed ok, cleanup everything except current version // hash is the same as hash of agent which initiated watcher. - if err := upgrade.Cleanup(log, release.ShortCommit(), true, false); err != nil { + if err := upgrade.Cleanup(log, paths.Top(), paths.VersionedHome(paths.Top()), release.ShortCommit(), true, false); err != nil { log.Error("clean up of prior watcher run failed", err) } // exit nicely @@ -114,7 +117,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { log.Error("Error detected, proceeding to rollback: %v", err) upgradeDetails.SetState(details.StateRollback) - err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) + err = upgrade.Rollback(ctx, log, client.New(), paths.Top(), marker.PrevVersionedHome, marker.PrevHash) if err != nil { log.Error("rollback failed", err) upgradeDetails.Fail(err) @@ -132,7 +135,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { // Why is this being skipped on Windows? The comment above is not clear. // issue: https://github.com/elastic/elastic-agent/issues/3027 removeMarker := !isWindows() - err = upgrade.Cleanup(log, marker.Hash, removeMarker, false) + err = upgrade.Cleanup(log, paths.Top(), marker.VersionedHome, marker.Hash, removeMarker, false) if err != nil { log.Error("cleanup after successful watch failed", err) } diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 2ff0ef7dacc..f196942a552 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -5,6 +5,7 @@ package install import ( + goerrors "errors" "fmt" "os" "path/filepath" @@ -20,6 +21,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/cli" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" "github.com/elastic/elastic-agent/pkg/utils" ) @@ -109,35 +111,20 @@ func Install(cfgFile, topPath string, unprivileged bool, log *logp.Logger, pt *p errors.M("directory", filepath.Dir(topPath))) } - // copy source into install path - // - // Try to detect if we are running with SSDs. If we are increase the copy concurrency, - // otherwise fall back to the default. - copyConcurrency := 1 - hasSSDs, detectHWErr := HasAllSSDs() - if detectHWErr != nil { - fmt.Fprintf(streams.Out, "Could not determine block hardware type, disabling copy concurrency: %s\n", detectHWErr) - } - if hasSSDs { - copyConcurrency = runtime.NumCPU() * 4 + manifest, err := readPackageManifest(dir) + if err != nil { + return utils.FileOwner{}, fmt.Errorf("reading package manifest: %w", err) } + pathMappings := manifest.Package.PathMappings + pt.Describe("Copying install files") - err = copy.Copy(dir, topPath, copy.Options{ - OnSymlink: func(_ string) copy.SymlinkAction { - return copy.Shallow - }, - Sync: true, - NumOfWorkers: int64(copyConcurrency), - }) + err = copyFiles(streams, pathMappings, dir, topPath) if err != nil { pt.Describe("Error copying files") - return utils.FileOwner{}, errors.New( - err, - fmt.Sprintf("failed to copy source directory (%s) to destination (%s)", dir, topPath), - errors.M("source", dir), errors.M("destination", topPath), - ) + return utils.FileOwner{}, err } + pt.Describe("Successfully copied files") // place shell wrapper, if present on platform @@ -229,6 +216,136 @@ func Install(cfgFile, topPath string, unprivileged bool, log *logp.Logger, pt *p return ownership, nil } +func readPackageManifest(extractedPackageDir string) (*v1.PackageManifest, error) { + manifestFilePath := filepath.Join(extractedPackageDir, v1.ManifestFileName) + manifestFile, err := os.Open(manifestFilePath) + if err != nil { + return nil, fmt.Errorf("failed to open package manifest file (%s): %w", manifestFilePath, err) + } + defer manifestFile.Close() + manifest, err := v1.ParseManifest(manifestFile) + if err != nil { + return nil, fmt.Errorf("failed to parse package manifest file %q contents: %w", manifestFilePath, err) + } + + return manifest, nil +} + +func copyFiles(streams *cli.IOStreams, pathMappings []map[string]string, srcDir string, topPath string) error { + // copy source into install path + // + // Try to detect if we are running with SSDs. If we are increase the copy concurrency, + // otherwise fall back to the default. + copyConcurrency := 1 + hasSSDs, detectHWErr := HasAllSSDs() + if detectHWErr != nil { + fmt.Fprintf(streams.Out, "Could not determine block hardware type, disabling copy concurrency: %s\n", detectHWErr) + } + if hasSSDs { + copyConcurrency = runtime.NumCPU() * 4 + } + + // these are needed to keep track of what we already copied + copiedFiles := map[string]struct{}{} + // collect any symlink we found that need remapping + symlinks := map[string]string{} + + var copyErrors []error + + // Start copying the remapped paths first + for _, pathMapping := range pathMappings { + for packagePath, installedPath := range pathMapping { + // flag the original path as handled + copiedFiles[packagePath] = struct{}{} + srcPath := filepath.Join(srcDir, packagePath) + dstPath := filepath.Join(topPath, installedPath) + err := copy.Copy(srcPath, dstPath, copy.Options{ + OnSymlink: func(_ string) copy.SymlinkAction { + return copy.Shallow + }, + Sync: true, + NumOfWorkers: int64(copyConcurrency), + }) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to copy source directory (%s) to destination (%s)", packagePath, installedPath), + errors.M("source", packagePath), errors.M("destination", installedPath), + ) + } + } + } + + // copy the remaining files excluding overlaps with the mapped paths + err := copy.Copy(srcDir, topPath, copy.Options{ + OnSymlink: func(source string) copy.SymlinkAction { + target, err := os.Readlink(source) + if err != nil { + // error reading the link, not much choice to leave it unchanged and collect the error + copyErrors = append(copyErrors, fmt.Errorf("unable to read link %q for remapping", source)) + return copy.Skip + } + + // if we find a link, check if its target need to be remapped, in which case skip it for now and save it for + // later creation with the remapped target + for _, pathMapping := range pathMappings { + for srcPath, dstPath := range pathMapping { + srcPathLocal := filepath.FromSlash(srcPath) + dstPathLocal := filepath.FromSlash(dstPath) + if strings.HasPrefix(target, srcPathLocal) { + newTarget := strings.Replace(target, srcPathLocal, dstPathLocal, 1) + rel, err := filepath.Rel(srcDir, source) + if err != nil { + copyErrors = append(copyErrors, fmt.Errorf("extracting relative path for %q using %q as base: %w", source, srcDir, err)) + return copy.Skip + } + symlinks[rel] = newTarget + return copy.Skip + } + } + } + + return copy.Shallow + }, + Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { + relPath, err := filepath.Rel(srcDir, src) + if err != nil { + return false, fmt.Errorf("calculating relative path for %s: %w", src, err) + } + // check if we already handled this path as part of the mappings: if we did, skip it + relPath = filepath.ToSlash(relPath) + _, ok := copiedFiles[relPath] + return ok, nil + }, + Sync: true, + NumOfWorkers: int64(copyConcurrency), + }) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to copy source directory (%s) to destination (%s)", srcDir, topPath), + errors.M("source", srcDir), errors.M("destination", topPath), + ) + } + + if len(copyErrors) > 0 { + return fmt.Errorf("errors encountered during copy from %q to %q: %w", srcDir, topPath, goerrors.Join(copyErrors...)) + } + + // Create the remapped symlinks + for src, target := range symlinks { + absSrcPath := filepath.Join(topPath, src) + err := os.Symlink(target, absSrcPath) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to link source %q to destination %q", absSrcPath, target), + ) + } + } + return nil +} + // StartService starts the installed service. // // This should only be called after Install is successful. diff --git a/internal/pkg/agent/install/install_test.go b/internal/pkg/agent/install/install_test.go index dd73cac17a8..0fc70105666 100644 --- a/internal/pkg/agent/install/install_test.go +++ b/internal/pkg/agent/install/install_test.go @@ -5,11 +5,17 @@ package install import ( + "os" + "path/filepath" "testing" "github.com/jaypipes/ghw" "github.com/jaypipes/ghw/pkg/block" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/cli" + v1 "github.com/elastic/elastic-agent/pkg/api/v1" ) func TestHasAllSSDs(t *testing.T) { @@ -57,3 +63,125 @@ func TestHasAllSSDs(t *testing.T) { }) } } + +var sampleManifestContent = ` +version: co.elastic.agent/v1 +kind: PackageManifest +package: + version: 8.13.0 + snapshot: true + versioned-home: elastic-agent-fb7370 + path-mappings: + - data/elastic-agent-fb7370: data/elastic-agent-8.13.0-SNAPSHOT-fb7370 + manifest.yaml: data/elastic-agent-8.13.0-SNAPSHOT-fb7370/manifest.yaml +` + +type testLogWriter struct { + t *testing.T +} + +func (tlw testLogWriter) Write(b []byte) (n int, err error) { + tlw.t.Log(b) + return len(b), nil +} + +func TestCopyFiles(t *testing.T) { + type fileType uint + + const ( + REGULAR fileType = iota + DIRECTORY + SYMLINK + ) + + type files struct { + fType fileType + path string + content []byte + } + + type testcase struct { + name string + setupFiles []files + expectedFiles []files + mappings []map[string]string + } + + testcases := []testcase{ + { + name: "simple install package mockup", + setupFiles: []files{ + {fType: REGULAR, path: v1.ManifestFileName, content: []byte(sampleManifestContent)}, + {fType: DIRECTORY, path: filepath.Join("data", "elastic-agent-fb7370"), content: nil}, + {fType: REGULAR, path: filepath.Join("data", "elastic-agent-fb7370", "elastic-agent"), content: []byte("this is an elastic-agent wannabe")}, + {fType: SYMLINK, path: "elastic-agent", content: []byte(filepath.Join("data", "elastic-agent-fb7370", "elastic-agent"))}, + }, + expectedFiles: []files{ + {fType: DIRECTORY, path: filepath.Join("data", "elastic-agent-8.13.0-SNAPSHOT-fb7370"), content: nil}, + {fType: REGULAR, path: filepath.Join("data", "elastic-agent-8.13.0-SNAPSHOT-fb7370", v1.ManifestFileName), content: []byte(sampleManifestContent)}, + {fType: REGULAR, path: filepath.Join("data", "elastic-agent-8.13.0-SNAPSHOT-fb7370", "elastic-agent"), content: []byte("this is an elastic-agent wannabe")}, + {fType: SYMLINK, path: "elastic-agent", content: []byte(filepath.Join("data", "elastic-agent-8.13.0-SNAPSHOT-fb7370", "elastic-agent"))}, + }, + mappings: []map[string]string{ + { + "data/elastic-agent-fb7370": "data/elastic-agent-8.13.0-SNAPSHOT-fb7370", + v1.ManifestFileName: "data/elastic-agent-8.13.0-SNAPSHOT-fb7370/" + v1.ManifestFileName, + }, + }}, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tmpSrc := t.TempDir() + tmpDst := t.TempDir() + + for _, sf := range tc.setupFiles { + switch sf.fType { + case REGULAR: + err := os.WriteFile(filepath.Join(tmpSrc, sf.path), sf.content, 0o644) + require.NoErrorf(t, err, "error writing setup file %s in tempDir %s", sf.path, tmpSrc) + + case DIRECTORY: + err := os.MkdirAll(filepath.Join(tmpSrc, sf.path), 0o755) + require.NoErrorf(t, err, "error creating setup directory %s in tempDir %s", sf.path, tmpSrc) + + case SYMLINK: + err := os.Symlink(string(sf.content), filepath.Join(tmpSrc, sf.path)) + require.NoErrorf(t, err, "error creating symlink %s in tempDir %s", sf.path, tmpSrc) + } + } + outWriter := &testLogWriter{t: t} + ioStreams := &cli.IOStreams{ + In: nil, + Out: outWriter, + Err: outWriter, + } + err := copyFiles(ioStreams, tc.mappings, tmpSrc, tmpDst) + assert.NoError(t, err) + + for _, ef := range tc.expectedFiles { + switch ef.fType { + case REGULAR: + if assert.FileExistsf(t, filepath.Join(tmpDst, ef.path), "file %s does not exist in output directory %s", ef.path, tmpDst) { + // check contents + actualContent, err := os.ReadFile(filepath.Join(tmpDst, ef.path)) + if assert.NoErrorf(t, err, "error reading expected file %s content", ef.path) { + assert.Equal(t, ef.content, actualContent, "content of expected file %s does not match", ef.path) + } + } + + case DIRECTORY: + assert.DirExistsf(t, filepath.Join(tmpDst, ef.path), "directory %s does not exist in output directory %s", ef.path, tmpDst) + + case SYMLINK: + actualTarget, err := os.Readlink(filepath.Join(tmpDst, ef.path)) + if assert.NoErrorf(t, err, "error readling expected symlink %s in output dir %s", ef.path, tmpDst) { + assert.Equal(t, string(ef.content), actualTarget, "unexpected target for symlink %s", ef.path) + } + } + + } + }) + } + +} diff --git a/internal/pkg/agent/install/install_windows.go b/internal/pkg/agent/install/install_windows.go index 9822b79d2fd..c589ccaf384 100644 --- a/internal/pkg/agent/install/install_windows.go +++ b/internal/pkg/agent/install/install_windows.go @@ -7,12 +7,10 @@ package install import ( - "fmt" "os" "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/utils" "github.com/elastic/elastic-agent/version" ) @@ -37,7 +35,7 @@ func postInstall(topPath string) error { } // create top-level symlink to nested binary - realBinary := filepath.Join(topPath, "data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()), paths.BinaryName) + realBinary := paths.BinaryPath(paths.VersionedHome(topPath), paths.BinaryName) err = os.Symlink(realBinary, binary) if err != nil { return err diff --git a/pkg/api/v1/manifest.go b/pkg/api/v1/manifest.go index bc103b46e32..f4c65def87b 100644 --- a/pkg/api/v1/manifest.go +++ b/pkg/api/v1/manifest.go @@ -12,10 +12,12 @@ import ( ) const ManifestKind = "PackageManifest" +const ManifestFileName = "manifest.yaml" type PackageDesc struct { Version string `yaml:"version,omitempty" json:"version,omitempty"` Snapshot bool `yaml:"snapshot,omitempty" json:"snapshot,omitempty"` + Hash string `yaml:"hash,omitempty" json:"hash,omitempty"` VersionedHome string `yaml:"versioned-home,omitempty" json:"versionedHome,omitempty"` PathMappings []map[string]string `yaml:"path-mappings,omitempty" json:"pathMappings,omitempty"` }