diff --git a/debian/rules b/debian/rules index 821aabf9a..5d321df5c 100644 --- a/debian/rules +++ b/debian/rules @@ -62,6 +62,7 @@ override_dh_virtualenv: dh_virtualenv --python python3 \ --use-system-packages \ --extra-pip-arg "--ignore-installed" \ + --extra-pip-arg "--no-cache-dir" \ --extra-pip-arg "--no-index" \ --extra-pip-arg "--find-links=${LOCAL_DEB_PYINDEX}" \ --skip-install \ diff --git a/grr/client/grr_response_client/client_actions/linux/linux.py b/grr/client/grr_response_client/client_actions/linux/linux.py index 9fb7c8a9c..5059440a0 100644 --- a/grr/client/grr_response_client/client_actions/linux/linux.py +++ b/grr/client/grr_response_client/client_actions/linux/linux.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# Lint as: python3 """Linux specific actions.""" from __future__ import absolute_import from __future__ import division @@ -17,7 +18,6 @@ from future.utils import iteritems from grr_response_client import actions -from grr_response_client import client_utils_common from grr_response_client.client_actions import standard from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils @@ -352,21 +352,32 @@ def ProcessFile(self, path, args): raise ValueError("Unknown suffix for file %s." % path) def _InstallDeb(self, path, args): - cmd = "/usr/bin/dpkg" - cmd_args = ["-i", path] - time_limit = args.time_limit - - client_utils_common.Execute( - cmd, - cmd_args, - time_limit=time_limit, - bypass_whitelist=True, - daemon=True) - - # The installer will run in the background and kill the main process - # so we just wait. If something goes wrong, the nanny will restart the - # service after a short while and the client will come back to life. - time.sleep(1000) + pid = os.fork() + if pid == 0: + # This is the child that will become the installer process. + + # We call os.setsid here to become the session leader of this new session + # and the process group leader of the new process group so we don't get + # killed when the main process exits. + try: + os.setsid() + except OSError: + # This only works if the process is running as root. + pass + + env = os.environ.copy() + env.pop("LD_LIBRARY_PATH", None) + env.pop("PYTHON_PATH", None) + + cmd = "/usr/bin/dpkg" + cmd_args = [cmd, "-i", path] + + os.execve(cmd, cmd_args, env) + else: + # The installer will run in the background and kill the main process + # so we just wait. If something goes wrong, the nanny will restart the + # service after a short while and the client will come back to life. + time.sleep(1000) def _InstallRpm(self, path): """Client update for rpm based distros. diff --git a/grr/client/grr_response_client/client_actions/osquery.py b/grr/client/grr_response_client/client_actions/osquery.py index 9db69b8ef..9841a7bbb 100644 --- a/grr/client/grr_response_client/client_actions/osquery.py +++ b/grr/client/grr_response_client/client_actions/osquery.py @@ -237,7 +237,6 @@ def Query(args): TimeoutError: If a call to the osquery executable times out. Error: If anything else goes wrong with the subprocess call. """ - query = args.query.encode("utf-8") timeout = args.timeout_millis / 1000 # `subprocess.run` uses seconds. # TODO: pytype is not aware of the backport. # pytype: disable=module-attr @@ -254,7 +253,7 @@ def Query(args): "--logger_min_status=3", # Disable status logs. "--logger_min_stderr=2", # Only ERROR-level logs to stderr. "--json", # Set output format to JSON. - query, + args.query, ] proc = subprocess.run( command, diff --git a/grr/client/grr_response_client/client_actions/osquery_test.py b/grr/client/grr_response_client/client_actions/osquery_test.py index 0e721a77b..d929a1bcc 100644 --- a/grr/client/grr_response_client/client_actions/osquery_test.py +++ b/grr/client/grr_response_client/client_actions/osquery_test.py @@ -10,6 +10,7 @@ import hashlib import io import os +import platform import socket import time @@ -104,6 +105,9 @@ def testFile(self): ]) self.assertEqual(list(table.Column("size")), ["3", "6", "4"]) + # TODO(hanuszczak): https://github.com/osquery/osquery/issues/4150 + @skip.If(platform.system() == "Windows", + "osquery ignores files with unicode characters.") def testFileUnicode(self): with temp.AutoTempFilePath(prefix="zółć", suffix="💰") as filepath: with io.open(filepath, "wb") as filedesc: @@ -156,12 +160,14 @@ def testSystemInfo(self): results = _Query("SELECT hostname FROM system_info;") self.assertLen(results, 1) - # Apparently osquery returns FQDN in "hostname" column. - hostname = socket.getfqdn() - table = results[0].table self.assertLen(table.rows, 1) - self.assertEqual(list(table.Column("hostname")), [hostname]) + + # osquery sometimes returns FQDN and sometimes real hostname as the result + # and it is unclear what determines this. This is why instead of precise + # equality we test for either of them. + hostname = list(table.Column("hostname"))[0] + self.assertIn(hostname, [socket.gethostname(), socket.getfqdn()]) def testMultipleResults(self): with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: diff --git a/grr/client/grr_response_client/client_actions/timeline.py b/grr/client/grr_response_client/client_actions/timeline.py new file mode 100644 index 000000000..0eae4565f --- /dev/null +++ b/grr/client/grr_response_client/client_actions/timeline.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +"""A module with a client action for timeline collection.""" +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import hashlib +import os +import stat as stat_mode + +from typing import Iterator + +from grr_response_client import actions +from grr_response_core.lib import rdfvalue +from grr_response_core.lib.rdfvalues import protodict as rdf_protodict +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline + + +class Timeline(actions.ActionPlugin): + """A client action for timeline collection.""" + + in_rdfvalue = rdf_timeline.TimelineArgs + out_rdfvalues = [rdf_timeline.TimelineResult] + + _TRANSFER_STORE_ID = rdfvalue.SessionID(flow_name="TransferStore") + + def Run(self, args): + """Executes the client action.""" + result = rdf_timeline.TimelineResult() + + entries = Walk(args.root) + for entry_batch in rdf_timeline.TimelineEntry.SerializeStream(entries): + entry_batch_blob = rdf_protodict.DataBlob(data=entry_batch) + self.SendReply(entry_batch_blob, session_id=self._TRANSFER_STORE_ID) + + entry_batch_blob_id = hashlib.sha256(entry_batch).digest() + result.entry_batch_blob_ids.append(entry_batch_blob_id) + + self.Progress() + + self.SendReply(result) + + +def Walk(root): + """Walks the filesystem collecting stat information. + + This method will recursively descend to all sub-folders and sub-sub-folders + and so on. It will stop the recursion at device boundaries and will not follow + any symlinks (to avoid cycles and virtual filesystems that may be potentially + infinite). + + Args: + root: A path to the root folder at which the recursion should start. + + Returns: + An iterator over timeline entries with stat information about each file. + """ + try: + dev = os.lstat(root).st_dev + except OSError: + return iter([]) + + def Recurse(path): + """Performs the recursive walk over the file hierarchy.""" + try: + stat = os.lstat(path) + except OSError: + return + + yield rdf_timeline.TimelineEntry.FromStat(path, stat) + + # We want to recurse only to folders on the same device. + if not stat_mode.S_ISDIR(stat.st_mode) or stat.st_dev != dev: + return + + try: + childnames = os.listdir(path) + except OSError: + childnames = [] + + # TODO(hanuszczak): Implement more efficient auto-batcher instead of having + # multi-level iterators. + for childname in childnames: + for entry in Recurse(os.path.join(path, childname)): + yield entry + + return Recurse(root) diff --git a/grr/client/grr_response_client/client_actions/timeline_test.py b/grr/client/grr_response_client/client_actions/timeline_test.py new file mode 100644 index 000000000..d9dfd9aa6 --- /dev/null +++ b/grr/client/grr_response_client/client_actions/timeline_test.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import hashlib +import io +import os +import platform +import random +import stat as stat_mode + +from absl.testing import absltest +from typing import Text + +from grr_response_client.client_actions import timeline +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline +from grr_response_core.lib.util import temp +from grr.test_lib import client_test_lib +from grr.test_lib import skip +from grr.test_lib import testing_startup + + +# TODO(hanuszczak): `GRRBaseTest` is terrible, try to avoid it in any new code. +class TimelineTest(client_test_lib.EmptyActionTest): + + @classmethod + def setUpClass(cls): + super(TimelineTest, cls).setUpClass() + testing_startup.TestInit() + + def testRun(self): + with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath: + for idx in range(64): + temp_filepath = os.path.join(temp_dirpath, "foo{}".format(idx)) + _Touch(temp_filepath, content=os.urandom(random.randint(0, 1024))) + + args = rdf_timeline.TimelineArgs() + args.root = temp_dirpath.encode("utf-8") + + results = self.RunAction(timeline.Timeline, args) + + self.assertNotEmpty(results) + self.assertNotEmpty(results[-1].entry_batch_blob_ids) + + blob_ids = results[-1].entry_batch_blob_ids + for blob in results[:-1]: + self.assertIn(hashlib.sha256(blob.data).digest(), blob_ids) + + +class WalkTest(absltest.TestCase): + + def testSingleFile(self): + with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: + filepath = os.path.join(dirpath, "foo") + _Touch(filepath, content=b"foobar") + + entries = list(timeline.Walk(dirpath.encode("utf-8"))) + self.assertLen(entries, 2) + + self.assertTrue(stat_mode.S_ISDIR(entries[0].mode)) + self.assertEqual(entries[0].path, dirpath.encode("utf-8")) + + self.assertTrue(stat_mode.S_ISREG(entries[1].mode)) + self.assertEqual(entries[1].path, filepath.encode("utf-8")) + self.assertEqual(entries[1].size, 6) + + def testMultipleFiles(self): + with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: + foo_filepath = os.path.join(dirpath, "foo") + bar_filepath = os.path.join(dirpath, "bar") + baz_filepath = os.path.join(dirpath, "baz") + + _Touch(foo_filepath) + _Touch(bar_filepath) + _Touch(baz_filepath) + + entries = list(timeline.Walk(dirpath.encode("utf-8"))) + self.assertLen(entries, 4) + + paths = [_.path for _ in entries[1:]] + self.assertIn(foo_filepath.encode("utf-8"), paths) + self.assertIn(bar_filepath.encode("utf-8"), paths) + self.assertIn(baz_filepath.encode("utf-8"), paths) + + def testNestedDirectories(self): + with temp.AutoTempDirPath(remove_non_empty=True) as root_dirpath: + foobar_dirpath = os.path.join(root_dirpath, "foo", "bar") + os.makedirs(foobar_dirpath) + + foobaz_dirpath = os.path.join(root_dirpath, "foo", "baz") + os.makedirs(foobaz_dirpath) + + quuxnorfthud_dirpath = os.path.join(root_dirpath, "quux", "norf", "thud") + os.makedirs(quuxnorfthud_dirpath) + + entries = list(timeline.Walk(root_dirpath.encode("utf-8"))) + self.assertLen(entries, 7) + + paths = [_.path.decode("utf-8") for _ in entries] + self.assertCountEqual(paths, [ + os.path.join(root_dirpath), + os.path.join(root_dirpath, "foo"), + os.path.join(root_dirpath, "foo", "bar"), + os.path.join(root_dirpath, "foo", "baz"), + os.path.join(root_dirpath, "quux"), + os.path.join(root_dirpath, "quux", "norf"), + os.path.join(root_dirpath, "quux", "norf", "thud"), + ]) + + for entry in entries: + self.assertTrue(stat_mode.S_ISDIR(entry.mode)) + + @skip.If( + platform.system() == "Windows", + reason="Symlinks are not supported on Windows.") + def testSymlinks(self): + with temp.AutoTempDirPath(remove_non_empty=True) as root_dirpath: + sub_dirpath = os.path.join(root_dirpath, "foo", "bar", "baz") + link_path = os.path.join(sub_dirpath, "quux") + + # This creates a cycle, walker should be able to cope with that. + os.makedirs(sub_dirpath) + os.symlink(root_dirpath, os.path.join(sub_dirpath, link_path)) + + entries = list(timeline.Walk(root_dirpath.encode("utf-8"))) + self.assertLen(entries, 5) + + paths = [_.path.decode("utf-8") for _ in entries] + self.assertEqual(paths, [ + os.path.join(root_dirpath), + os.path.join(root_dirpath, "foo"), + os.path.join(root_dirpath, "foo", "bar"), + os.path.join(root_dirpath, "foo", "bar", "baz"), + os.path.join(root_dirpath, "foo", "bar", "baz", "quux") + ]) + + for entry in entries[:-1]: + self.assertTrue(stat_mode.S_ISDIR(entry.mode)) + self.assertTrue(stat_mode.S_ISLNK(entries[-1].mode)) + + def testIncorrectPath(self): + not_existing_path = os.path.join("some", "not", "existing", "path") + + entries = list(timeline.Walk(not_existing_path.encode("utf-8"))) + self.assertEmpty(entries) + + +def _Touch(filepath, content = b""): + with io.open(filepath, mode="wb") as filedesc: + filedesc.write(content) + + +if __name__ == "__main__": + absltest.main() diff --git a/grr/client/setup.py b/grr/client/setup.py index 321ecd62c..e3121da2e 100644 --- a/grr/client/setup.py +++ b/grr/client/setup.py @@ -91,9 +91,7 @@ def make_release_tree(self, base_dir, files): ) if platform.system() == "Linux": - # TODO(user): change 'extras_require' to 'install_requires' if/when - # chipsec driver-less PIP package shows up on PyPI. - setup_args["extras_require"]["chipsec"] = ["chipsec==1.4.3"] + setup_args["install_requires"].append("chipsec==1.4.4") if platform.system() != "Windows": setup_args["install_requires"].append("xattr==0.9.6") diff --git a/grr/client_builder/grr_response_client_builder/build_helpers.py b/grr/client_builder/grr_response_client_builder/build_helpers.py index 113660e05..18f094e14 100644 --- a/grr/client_builder/grr_response_client_builder/build_helpers.py +++ b/grr/client_builder/grr_response_client_builder/build_helpers.py @@ -174,7 +174,8 @@ def BuildWithPyInstaller(context=None): except IOError: logging.error("Unable to create file: %s", file_path) - version_ini = version.VersionPath() + version_ini = config.CONFIG.Get( + "ClientBuilder.version_ini_path", default=version.VersionPath()) shutil.copy(version_ini, os.path.join(output_dir, "version.ini")) with io.open(os.path.join(output_dir, "build.yaml"), "wb") as fd: diff --git a/grr/core/grr_response_core/config/build.py b/grr/core/grr_response_core/config/build.py index abe7ae61b..4fb0afa5a 100644 --- a/grr/core/grr_response_core/config/build.py +++ b/grr/core/grr_response_core/config/build.py @@ -64,6 +64,12 @@ "grr_service_config.txt.in@grr-response-core|resource)", "Path to GRR's Fleetspeak service configuration.") +config_lib.DEFINE_string( + "ClientBuilder.version_ini_path", None, + "Path to the version.ini file to be used when building a client template. " + "If not specified, version.ini packaged with grr-response-core package " + "will be used.") + class PathTypeInfo(type_info.String): """A path to a file or a directory.""" diff --git a/grr/core/grr_response_core/lib/rdfvalues/stats.py b/grr/core/grr_response_core/lib/rdfvalues/stats.py index ca1219389..781b5ff85 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/stats.py +++ b/grr/core/grr_response_core/lib/rdfvalues/stats.py @@ -109,16 +109,21 @@ def RegisterValue(self, value): class RunningStats(rdf_structs.RDFProtoStruct): - """Class for collecting running stats: mean, stdev and histogram data.""" + """Class for collecting running stats: mean, stddev and histogram data.""" protobuf = jobs_pb2.RunningStats rdf_deps = [ StatsHistogram, ] + def __init__(self, *args, **kwargs): + super(RunningStats, self).__init__(*args, **kwargs) + self._sum_sq = 0 + def RegisterValue(self, value): self.num += 1 self.sum += value - self.sum_sq += value**2 + self._sum_sq += value**2 + self.stddev = math.sqrt(self._sum_sq / self.num - self.mean**2) self.histogram.RegisterValue(value) @@ -129,13 +134,6 @@ def mean(self): else: return self.sum / self.num - @property - def std(self): - if self.num == 0: - return 0 - else: - return math.sqrt(self.sum_sq / self.num - self.mean**2) - class ClientResourcesStats(rdf_structs.RDFProtoStruct): """RDF value representing clients' resources usage statistics for hunts.""" diff --git a/grr/core/grr_response_core/lib/rdfvalues/stats_test.py b/grr/core/grr_response_core/lib/rdfvalues/stats_test.py index 1dd59c6ce..1528b5b86 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/stats_test.py +++ b/grr/core/grr_response_core/lib/rdfvalues/stats_test.py @@ -42,7 +42,7 @@ def testStdDevIsCalculatedCorrectly(self): stats.RegisterValue(v) # Compare calculated standard deviation with a precalculated value. - self.assertAlmostEqual(stats.std, 28.86607004) + self.assertAlmostEqual(stats.stddev, 28.86607004) def testHistogramIsCalculatedCorrectly(self): stats = rdf_stats.RunningStats() diff --git a/grr/core/grr_response_core/lib/rdfvalues/timeline.py b/grr/core/grr_response_core/lib/rdfvalues/timeline.py new file mode 100644 index 000000000..302ec4e11 --- /dev/null +++ b/grr/core/grr_response_core/lib/rdfvalues/timeline.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +"""A module with RDF value wrappers for timeline protobufs.""" +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import os + +from future.builtins import map +from typing import Iterator + +from grr_response_core.lib.rdfvalues import structs as rdf_structs +from grr_response_core.lib.util import compatibility +from grr_response_core.lib.util import gzchunked +from grr_response_proto import timeline_pb2 + + +class TimelineArgs(rdf_structs.RDFProtoStruct): + """An RDF wrapper class for the timeline arguments message.""" + + protobuf = timeline_pb2.TimelineArgs + rdf_deps = [] + + +class TimelineResult(rdf_structs.RDFProtoStruct): + """An RDF wrapper class for the timeline result message.""" + + protobuf = timeline_pb2.TimelineResult + rdf_deps = [] + + +class TimelineEntry(rdf_structs.RDFProtoStruct): + """An RDF wrapper class for the timeline entry message.""" + + protobuf = timeline_pb2.TimelineEntry + rdf_deps = [] + + @classmethod + def FromStat(cls, path, stat): + entry = cls() + entry.path = path + + entry.mode = stat.st_mode + entry.size = stat.st_size + + entry.dev = stat.st_dev + entry.ino = stat.st_ino + + entry.uid = stat.st_uid + entry.gid = stat.st_gid + + if compatibility.PY2: + entry.atime_ns = round(stat.st_atime * 1e9) + entry.mtime_ns = round(stat.st_mtime * 1e9) + entry.ctime_ns = round(stat.st_ctime * 1e9) + else: + # pytype: disable=attribute-error + entry.atime_ns = stat.st_atime_ns + entry.mtime_ns = stat.st_mtime_ns + entry.ctime_ns = stat.st_ctime_ns + # pytype: enable=attribute-error + + return entry + + @classmethod + def SerializeStream( + cls, + entries, + ): + return gzchunked.Serialize(_.SerializeToBytes() for _ in entries) + + @classmethod + def DeserializeStream( + cls, + entries, + ): + return map(cls.FromSerializedBytes, gzchunked.Deserialize(entries)) diff --git a/grr/core/grr_response_core/lib/rdfvalues/timeline_test.py b/grr/core/grr_response_core/lib/rdfvalues/timeline_test.py new file mode 100644 index 000000000..3839325f7 --- /dev/null +++ b/grr/core/grr_response_core/lib/rdfvalues/timeline_test.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import io +import os +import random +import stat as stat_mode +import time + +from absl.testing import absltest + +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline +from grr_response_core.lib.util import temp + + +class TimelineEntryTest(absltest.TestCase): + + def testFromStat(self): + with temp.AutoTempFilePath() as filepath: + time_before = round(time.time()) + + with io.open(filepath, mode="wb") as filedesc: + filedesc.write(b"1234567") + + time_after = round(time.time()) + + # TODO(hanuszczak): `AutoTempFilePath` should return a `Path` object. + filepath_bytes = filepath.encode("utf-8") + filepath_stat = os.lstat(filepath) + + entry = rdf_timeline.TimelineEntry.FromStat(filepath_bytes, filepath_stat) + + self.assertEqual(entry.size, 7) + self.assertTrue(stat_mode.S_ISREG(entry.mode)) + + # TODO(hanuszczak): Switch this test to use nanosecond precision once we + # are Python 3.7-only. + self.assertBetween(round(entry.atime_ns / 1e9), time_before, time_after) + self.assertBetween(round(entry.mtime_ns / 1e9), time_before, time_after) + self.assertBetween(round(entry.ctime_ns / 1e9), time_before, time_after) + + self.assertEqual(entry.dev, filepath_stat.st_dev) + self.assertEqual(entry.ino, filepath_stat.st_ino) + self.assertEqual(entry.uid, filepath_stat.st_uid) + self.assertEqual(entry.gid, filepath_stat.st_gid) + + def testSerializeAndDeserializeStream(self): + serialize = rdf_timeline.TimelineEntry.SerializeStream + deserialize = rdf_timeline.TimelineEntry.DeserializeStream + + def RandomEntry(): + entry = rdf_timeline.TimelineEntry() + entry.path = os.urandom(4096) + entry.mode = random.randint(0x0000, 0xFFFF - 1) + entry.size = random.randint(0, 1e9) + return entry + + entries = [RandomEntry() for _ in range(3000)] + + self.assertEqual(list(deserialize(serialize(iter(entries)))), entries) + + +if __name__ == "__main__": + absltest.main() diff --git a/grr/core/grr_response_core/lib/util/gzchunked.py b/grr/core/grr_response_core/lib/util/gzchunked.py new file mode 100644 index 000000000..1fb2fe79e --- /dev/null +++ b/grr/core/grr_response_core/lib/util/gzchunked.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +"""A module with utilities for a very simple serialization format.""" +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import gzip +import io +import struct + +from typing import Iterator + +DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024 # 4 MiB. + + +def Serialize( + stream, + chunk_size = DEFAULT_CHUNK_SIZE, +): + """Serializes a stream of data to the stream of chunks. + + Args: + stream: A stream of data to serialize. + chunk_size: An (optional) approximate size of a chunk in bytes. Every non- + final chunk will be slightly bigger than the specified number, but this + should be negligible. + + Yields: + Serialized chunks (in the gzchunked format). + """ + while True: + buf = io.BytesIO() + buf_entry_count = 0 + + with gzip.GzipFile(fileobj=buf, mode="wb") as filedesc: + for data in stream: + filedesc.write(_UINT64.pack(len(data))) + filedesc.write(data) + buf_entry_count += 1 + + if len(buf.getvalue()) >= chunk_size: + break + + if buf_entry_count == 0: + break + + yield buf.getvalue() + + +def Deserialize(stream): + """Deserializes a stream a chunks into a stream of data. + + Args: + stream: A stream of serialized chunks (in the gzchunked format). + + Yields: + A stream of deserialized data. + """ + for chunk in stream: + buf = io.BytesIO(chunk) + + with gzip.GzipFile(fileobj=buf, mode="rb") as filedesc: + while True: + count = filedesc.read(_UINT64.size) + if not count: + break + elif len(count) != _UINT64.size: + raise ValueError("Incorrect gzchunked data size") + + (count,) = _UINT64.unpack(count) + + data = filedesc.read(count) + if len(data) != count: + raise ValueError("Content too short") + + yield data + + +_UINT64 = struct.Struct("!Q") # Network-endian 64-bit unsigned integer format. diff --git a/grr/core/grr_response_core/lib/util/gzchunked_test.py b/grr/core/grr_response_core/lib/util/gzchunked_test.py new file mode 100644 index 000000000..07f34fbb1 --- /dev/null +++ b/grr/core/grr_response_core/lib/util/gzchunked_test.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import gzip +import io +import os +import struct + +from absl.testing import absltest + +from grr_response_core.lib.util import gzchunked + + +class SerializeTest(absltest.TestCase): + + def testEmpty(self): + serialized = list(gzchunked.Serialize(iter([]))) + self.assertEmpty(serialized) + + def testSingleEntry(self): + data = [b"foo"] + + serialized = list(gzchunked.Serialize(iter(data), chunk_size=1024)) + self.assertLen(serialized, 1) + + def testMultipleSmallEntries(self): + data = [b"foo", b"bar", b"baz", b"quux"] + + serialized = list(gzchunked.Serialize(iter(data), chunk_size=1024)) + self.assertLen(serialized, 1) + + def testMultipleBigEntries(self): + data = [os.urandom(1024 * 1024) for _ in range(8)] + + serialized = list(gzchunked.Serialize(iter(data), chunk_size=(1024 * 1024))) + self.assertGreater(len(serialized), 1) + self.assertLessEqual(len(serialized), len(data)) + + +class DeserializeTest(absltest.TestCase): + + def testIncorrectSize(self): + buf = io.BytesIO() + with gzip.GzipFile(fileobj=buf, mode="wb") as filedesc: + filedesc.write(struct.pack("!I", 42)) + + with self.assertRaises(ValueError): + list(gzchunked.Deserialize(iter([buf.getvalue()]))) + + def testIncorrectData(self): + buf = io.BytesIO() + with gzip.GzipFile(fileobj=buf, mode="wb") as filedesc: + filedesc.write(struct.pack("!Q", 8)) + filedesc.write(b"quux") + + with self.assertRaises(ValueError): + list(gzchunked.Deserialize(iter([buf.getvalue()]))) + + def testEmpty(self): + serialized = list(gzchunked.Serialize(iter([]))) + deserialized = list(gzchunked.Deserialize(iter(serialized))) + + self.assertEqual(deserialized, []) + + def testSingleEntry(self): + data = [b"foo"] + + serialized = list(gzchunked.Serialize(iter(data))) + deserialized = list(gzchunked.Deserialize(iter(serialized))) + + self.assertEqual(deserialized, data) + + def testMultipleEntries(self): + data = [b"foo", b"bar", b"baz", b"quux", b"norf", b"thud"] + + serialized = list(gzchunked.Serialize(iter(data))) + deserialized = list(gzchunked.Deserialize(iter(serialized))) + + self.assertEqual(deserialized, data) + + def testEmptyData(self): + data = [b"", b"", b""] + + serialized = list(gzchunked.Serialize(iter(data))) + deserialized = list(gzchunked.Deserialize(iter(serialized))) + + self.assertEqual(deserialized, data) + + def testNoChunks(self): + deserialized = list(gzchunked.Deserialize(iter([]))) + self.assertEmpty(deserialized) + + def testMultipleChunks(self): + data = [os.urandom(1024 * 1024) for _ in range(8)] + + serialized = list(gzchunked.Serialize(iter(data), chunk_size=(1024 * 1024))) + self.assertGreater(len(serialized), 1) + + deserialized = list(gzchunked.Deserialize(iter(serialized))) + self.assertEqual(deserialized, data) + + +if __name__ == "__main__": + absltest.main() diff --git a/grr/core/install_data/systemd/client/grr-client.service b/grr/core/install_data/systemd/client/grr-client.service index deddb6675..e4279a10f 100644 --- a/grr/core/install_data/systemd/client/grr-client.service +++ b/grr/core/install_data/systemd/client/grr-client.service @@ -6,6 +6,7 @@ After=syslog.target network.target Type=simple Restart=always RestartSec=120 +KillMode=process LimitNOFILE=20000 Environment=LANG=en_US.UTF-8 ExecStart=%(ClientBuilder.daemon_link) --config=%(ClientBuilder.target_dir)/%(ClientBuilder.config_filename) diff --git a/grr/proto/grr_response_proto/flows.proto b/grr/proto/grr_response_proto/flows.proto index ec84c72a1..6cbfa4699 100644 --- a/grr/proto/grr_response_proto/flows.proto +++ b/grr/proto/grr_response_proto/flows.proto @@ -600,11 +600,12 @@ message OnlineNotificationArgs { }]; } -// Next field ID: 2 +// Next field ID: 3 message UpdateClientArgs { - optional string blob_path = 1 [(sem_type) = { - type: "RDFURN", - description: "An aff4 path to a GRRSignedBlob of a new client version.", + reserved 1; + optional string binary_path = 2 [(sem_type) = { + description: "Identifies the binary uploaded to GRR server that has " + "to be run on the client to perform the update.", }]; } diff --git a/grr/proto/grr_response_proto/jobs.proto b/grr/proto/grr_response_proto/jobs.proto index 5682ea4ad..36508196d 100644 --- a/grr/proto/grr_response_proto/jobs.proto +++ b/grr/proto/grr_response_proto/jobs.proto @@ -1641,7 +1641,8 @@ message RunningStats { optional uint64 num = 2; optional double sum = 3; - optional double sum_sq = 4; + reserved 4; + optional double stddev = 5; } message ClientResourcesStats { diff --git a/grr/proto/grr_response_proto/timeline.proto b/grr/proto/grr_response_proto/timeline.proto new file mode 100644 index 000000000..038801ea4 --- /dev/null +++ b/grr/proto/grr_response_proto/timeline.proto @@ -0,0 +1,59 @@ +syntax = "proto2"; + + +// A message describing input arguments for the timeline action. +message TimelineArgs { + // A path to the root folder in which the recursive timeline extraction should + // start. It is specified as `bytes` rather than `string` to support paths + // that contain non-unicode characters (which is allowed in most filesystems). + optional bytes root = 1; + + // TODO(hanuszczak): Add support for limits (e.g. max depth). +} + +// A message describing the result of the timeline action. +message TimelineResult { + // A list of blob ids that refer to batches of serialized and gzipped timeline + // entries. Because the entire timeline can have millions of entries, it could + // easily exceed the maximum allowed size for a message. This is why entries + // are batched, gzipped and then send as blobs to the blobstore and relevant + // ids are returned here for reference. + repeated bytes entry_batch_blob_ids = 1; +} + +// A message describing single entry of the timeline for particular file. It +// corresponds to the result of stating the file. It is based on the POSIX +// definition of stat [1]. +// +// [1]: https://pubs.opengroup.org/onlinepubs/007908799/xsh/sysstat.h.html +message TimelineEntry { + // An absolute path to the file this entry corresponds to. + optional bytes path = 1; + + // A mode of the file defined as standard POSIX bitmask. + optional uint32 mode = 2; + + // A size of the file in bytes. + optional uint64 size = 3; + + // An identifier of the device containing the file. + optional uint64 dev = 4; + + // A serial number of the file. + optional uint64 ino = 5; + + // A user identifier of the file. + optional int64 uid = 6; + + // A group identifier of the file. + optional int64 gid = 7; + + // A time of the last access of the file in nanoseconds since epoch. + optional uint64 atime_ns = 8; + + // A time of the last data change of the file in nanoseconds since epoch. + optional uint64 mtime_ns = 9; + + // A time of the last status change of the file in nanoseconds since epoch. + optional uint64 ctime_ns = 10; +} diff --git a/grr/proto/setup.py b/grr/proto/setup.py index dab859618..6e7f0ce59 100644 --- a/grr/proto/setup.py +++ b/grr/proto/setup.py @@ -21,7 +21,7 @@ os.chdir(THIS_DIRECTORY) GRPCIO_TOOLS = "grpcio-tools==1.24.1" -PROTOBUF = "protobuf==3.10.0" +PROTOBUF = "protobuf==3.11.1" def get_config(): diff --git a/grr/server/grr_response_server/action_registry.py b/grr/server/grr_response_server/action_registry.py index fbee88e45..cf45d6b40 100644 --- a/grr/server/grr_response_server/action_registry.py +++ b/grr/server/grr_response_server/action_registry.py @@ -56,6 +56,7 @@ "StatFS": server_stubs.StatFS, "StatFile": server_stubs.StatFile, "TransferBuffer": server_stubs.TransferBuffer, + "Timeline": server_stubs.Timeline, "Uninstall": server_stubs.Uninstall, "UpdateAgent": server_stubs.UpdateAgent, "UpdateConfiguration": server_stubs.UpdateConfiguration, diff --git a/grr/server/grr_response_server/bin/config_updater_util_test.py b/grr/server/grr_response_server/bin/config_updater_util_test.py index beef59c30..9d33e5722 100644 --- a/grr/server/grr_response_server/bin/config_updater_util_test.py +++ b/grr/server/grr_response_server/bin/config_updater_util_test.py @@ -148,7 +148,7 @@ def testUploadPythonHack(self): upload_subdirectory="test") python_hack_urn = rdfvalue.RDFURN( "aff4:/config/python_hacks/linux/test/hello_world.py") - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( python_hack_urn) uploaded_blobs = list( signed_binary_utils.StreamSignedBinaryContents(blob_iterator)) @@ -168,7 +168,7 @@ def testUploadExecutable(self): executable_urn = rdfvalue.RDFURN( "aff4:/config/executables/windows/anti-malware/registry-tools/" "foo.exe") - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( executable_urn) uploaded_blobs = list( signed_binary_utils.StreamSignedBinaryContents(blob_iterator)) diff --git a/grr/server/grr_response_server/databases/db_hunts_test.py b/grr/server/grr_response_server/databases/db_hunts_test.py index b3c9fb499..30123752f 100644 --- a/grr/server/grr_response_server/databases/db_hunts_test.py +++ b/grr/server/grr_response_server/databases/db_hunts_test.py @@ -1322,7 +1322,7 @@ def testReadHuntClientResourcesStatsCorrectlyAggregatesData(self): self.assertEqual(usage_stats.user_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 9) - self.assertAlmostEqual(usage_stats.user_cpu_stats.std, 2.8722813232690143) + self.assertAlmostEqual(usage_stats.user_cpu_stats.stddev, 2.87228, 5) self.assertLen(usage_stats.user_cpu_stats.histogram.bins, len(expected_user_cpu_histogram.bins)) for b, model_b in zip(usage_stats.user_cpu_stats.histogram.bins, @@ -1332,7 +1332,7 @@ def testReadHuntClientResourcesStatsCorrectlyAggregatesData(self): self.assertEqual(usage_stats.system_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 19) - self.assertAlmostEqual(usage_stats.system_cpu_stats.std, 5.744562646538029) + self.assertAlmostEqual(usage_stats.system_cpu_stats.stddev, 5.74456, 5) self.assertLen(usage_stats.system_cpu_stats.histogram.bins, len(expected_system_cpu_histogram.bins)) for b, model_b in zip(usage_stats.system_cpu_stats.histogram.bins, @@ -1342,8 +1342,8 @@ def testReadHuntClientResourcesStatsCorrectlyAggregatesData(self): self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10) self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean, 55.5) - self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.std, - 8.616843969807043) + self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.stddev, 8.6168, + 4) self.assertLen(usage_stats.network_bytes_sent_stats.histogram.bins, len(expected_network_histogram.bins)) for b, model_b in zip(usage_stats.network_bytes_sent_stats.histogram.bins, @@ -1365,6 +1365,35 @@ def testReadHuntClientResourcesStatsCorrectlyAggregatesData(self): self.assertEqual(worst_performer.session_id.Path(), "/%s/%s" % (client_id, flow_id)) + def testReadHuntClientResourcesStatsCorrectlyAggregatesVeryLargeNumbers(self): + hunt_obj = rdf_hunt_objects.Hunt(description="foo") + self.db.WriteHuntObject(hunt_obj) + + self._SetupHuntClientAndFlow( + flow_state=rdf_flow_objects.Flow.FlowState.FINISHED, + cpu_time_used=rdf_client_stats.CpuSeconds( + user_cpu_time=3810072130, system_cpu_time=3810072130), + network_bytes_sent=3810072130, + hunt_id=hunt_obj.hunt_id) + self._SetupHuntClientAndFlow( + flow_state=rdf_flow_objects.Flow.FlowState.FINISHED, + cpu_time_used=rdf_client_stats.CpuSeconds( + user_cpu_time=2143939532, system_cpu_time=2143939532), + network_bytes_sent=2143939532, + hunt_id=hunt_obj.hunt_id) + + usage_stats = self.db.ReadHuntClientResourcesStats(hunt_obj.hunt_id) + + self.assertEqual(usage_stats.user_cpu_stats.num, 2) + self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 2977005831, 5) + self.assertAlmostEqual(usage_stats.user_cpu_stats.stddev, 833066299, 5) + self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 2977005831, 5) + self.assertAlmostEqual(usage_stats.system_cpu_stats.stddev, 833066299, 5) + self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean, + 2977005831, 5) + self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.stddev, + 833066299, 5) + def testReadHuntFlowsStatesAndTimestampsWorksCorrectlyForMultipleFlows(self): hunt_obj = rdf_hunt_objects.Hunt(description="foo") self.db.WriteHuntObject(hunt_obj) diff --git a/grr/server/grr_response_server/databases/mysql_hunts.py b/grr/server/grr_response_server/databases/mysql_hunts.py index 758ee5853..186090624 100644 --- a/grr/server/grr_response_server/databases/mysql_hunts.py +++ b/grr/server/grr_response_server/databases/mysql_hunts.py @@ -744,11 +744,11 @@ def ReadHuntClientResourcesStats(self, hunt_id, cursor=None): SELECT COUNT(*), SUM(user_cpu_time_used_micros), - SUM((user_cpu_time_used_micros) * (user_cpu_time_used_micros)), + STDDEV_POP(user_cpu_time_used_micros), SUM(system_cpu_time_used_micros), - SUM((system_cpu_time_used_micros) * (system_cpu_time_used_micros)), + STDDEV_POP(system_cpu_time_used_micros), SUM(network_bytes_sent), - SUM(network_bytes_sent * network_bytes_sent), + STDDEV_POP(network_bytes_sent), """ scaled_bins = [ @@ -769,24 +769,24 @@ def ReadHuntClientResourcesStats(self, hunt_id, cursor=None): cursor.execute(query, [hunt_id_int]) response = cursor.fetchone() - (count, user_sum, user_sq_sum, system_sum, system_sq_sum, network_sum, - network_sq_sum) = response[:7] + (count, user_sum, user_stddev, system_sum, system_stddev, network_sum, + network_stddev) = response[:7] stats = rdf_stats.ClientResourcesStats( user_cpu_stats=rdf_stats.RunningStats( num=count, sum=db_utils.MicrosToSeconds(int(user_sum or 0)), - sum_sq=int(user_sq_sum or 0) / 1e12, + stddev=int(user_stddev or 0) / 1e6, ), system_cpu_stats=rdf_stats.RunningStats( num=count, sum=db_utils.MicrosToSeconds(int(system_sum or 0)), - sum_sq=int(system_sq_sum or 0) / 1e12, + stddev=int(system_stddev or 0) / 1e6, ), network_bytes_sent_stats=rdf_stats.RunningStats( num=count, sum=float(network_sum or 0), - sum_sq=float(network_sq_sum or 0), + stddev=float(network_stddev or 0), ), ) diff --git a/grr/server/grr_response_server/flow_responses.py b/grr/server/grr_response_server/flow_responses.py index 81ece35fc..6cc648654 100644 --- a/grr/server/grr_response_server/flow_responses.py +++ b/grr/server/grr_response_server/flow_responses.py @@ -5,7 +5,7 @@ from __future__ import unicode_literals -from typing import Iterable, Optional, TypeVar +from typing import Iterable, Iterator, Optional, TypeVar from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects diff --git a/grr/server/grr_response_server/flows/general/administrative.py b/grr/server/grr_response_server/flows/general/administrative.py index aa1a747b8..ee028f93a 100644 --- a/grr/server/grr_response_server/flows/general/administrative.py +++ b/grr/server/grr_response_server/flows/general/administrative.py @@ -6,6 +6,7 @@ from __future__ import unicode_literals import logging +import os import shlex import time @@ -37,6 +38,7 @@ from grr_response_server.databases import db from grr_response_server.flows.general import discovery from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects +from grr_response_server.rdfvalues import objects as rdf_objects GRR_CLIENT_CRASHES = metrics.Counter("grr_client_crashes") @@ -335,7 +337,7 @@ def Start(self): self.args.hack_name) try: - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( python_hack_urn) except signed_binary_utils.SignedBinaryNotFoundError: raise flow_base.FlowError("Python hack %s not found." % @@ -488,9 +490,7 @@ def SendMail(self, responses): class UpdateClientArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.UpdateClientArgs - rdf_deps = [ - rdfvalue.RDFURN, - ] + rdf_deps = [] class UpdateClient(flow_base.FlowBase): @@ -499,35 +499,41 @@ class UpdateClient(flow_base.FlowBase): This will execute the specified installer on the client and then run an Interrogate flow. - The new installer needs to be loaded into the database, generally in - /config/executables//installers and must be signed using the - exec signing key. + The new installer's binary has to be uploaded to GRR (as a binary, not as + a Python hack) and must be signed using the exec signing key. - Signing and upload of the file is done with config_updater. + Signing and upload of the file is done with grr_config_updater or through + the API. """ category = "/Administrative/" args_type = UpdateClientArgs - def _BlobIterator(self, binary_urn): + def _BlobIterator(self, binary_id): try: - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( - binary_urn) + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByID( + binary_id) except signed_binary_utils.SignedBinaryNotFoundError: raise flow_base.FlowError("%s is not a valid signed binary." % - self.args.blob_path) + self.args.binary_path) return blob_iterator + @property + def _binary_id(self): + return rdf_objects.SignedBinaryID( + binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE, + path=self.args.binary_path) + def Start(self): """Start.""" - if not self.args.blob_path: + if not self.args.binary_path: raise flow_base.FlowError("Installer binary path is not specified.") - binary_urn = rdfvalue.RDFURN(self.args.blob_path) - self.state.write_path = "%d_%s" % (int(time.time()), binary_urn.Basename()) + self.state.write_path = "%d_%s" % (int( + time.time()), os.path.basename(self.args.binary_path)) - blob_iterator = self._BlobIterator(binary_urn) + blob_iterator = self._BlobIterator(self._binary_id) try: first_blob = next(blob_iterator) except StopIteration: @@ -563,8 +569,7 @@ def SendBlobs(self, responses): raise flow_base.FlowError("Error while calling UpdateAgent: %s" % responses.status) - binary_urn = rdfvalue.RDFURN(self.args.blob_path) - blobs = list(self._BlobIterator(binary_urn)) + blobs = list(self._BlobIterator(self._binary_id)) to_send = blobs[1:-1] if not to_send: @@ -594,8 +599,7 @@ def SendLastBlob(self, responses): raise flow_base.FlowError("Error while calling UpdateAgent: %s" % responses.status) - binary_urn = rdfvalue.RDFURN(self.args.blob_path) - blobs = list(self._BlobIterator(binary_urn)) + blobs = list(self._BlobIterator(self._binary_id)) offset = 0 for b in blobs[:-1]: offset += len(b.data) @@ -829,7 +833,7 @@ class LaunchBinary(flow_base.FlowBase): def _BlobIterator(self, binary_urn): try: - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary( + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( binary_urn) except signed_binary_utils.SignedBinaryNotFoundError: raise flow_base.FlowError("Executable binary %s not found." % diff --git a/grr/server/grr_response_server/flows/general/administrative_test.py b/grr/server/grr_response_server/flows/general/administrative_test.py index d544e4413..4d3dbd9aa 100644 --- a/grr/server/grr_response_server/flows/general/administrative_test.py +++ b/grr/server/grr_response_server/flows/general/administrative_test.py @@ -5,6 +5,7 @@ from __future__ import division from __future__ import unicode_literals +import os import subprocess import sys @@ -274,7 +275,8 @@ def testExecuteBinariesWithArgs(self): maintenance_utils.UploadSignedConfigBlob(code, aff4_path=upload_path) binary_urn = rdfvalue.RDFURN(upload_path) - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(binary_urn) + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + binary_urn) # There should be only a single part to this binary. self.assertLen(list(blob_iterator), 1) @@ -318,7 +320,8 @@ def testExecuteLargeBinaries(self): binary_urn = rdfvalue.RDFURN(upload_path) binary_size = signed_binary_utils.FetchSizeOfSignedBinary(binary_urn) - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(binary_urn) + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + binary_urn) # Total size is 2400. self.assertEqual(binary_size, 2400) @@ -388,7 +391,8 @@ def testUpdateClient(self): maintenance_utils.UploadSignedConfigBlob( fake_installer, aff4_path=upload_path, limit=100) - blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinary(upload_path) + blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + upload_path) self.assertLen(list(blob_list), 4) acl_test_lib.CreateAdminUser(self.token.username) @@ -397,7 +401,7 @@ def testUpdateClient(self): administrative.UpdateClient.__name__, client_mock, client_id=self.SetupClient(0, system=""), - blob_path=upload_path, + binary_path=os.path.join(config.CONFIG["Client.platform"], "test.deb"), token=self.token) self.assertEqual(client_mock.GetDownloadedFileContents(), fake_installer) @@ -409,7 +413,8 @@ def testUpdateClientSingleBlob(self): maintenance_utils.UploadSignedConfigBlob( fake_installer, aff4_path=upload_path, limit=1000) - blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinary(upload_path) + blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + upload_path) self.assertLen(list(blob_list), 1) acl_test_lib.CreateAdminUser(self.token.username) @@ -418,7 +423,7 @@ def testUpdateClientSingleBlob(self): compatibility.GetName(administrative.UpdateClient), client_mock, client_id=self.SetupClient(0, system=""), - blob_path=upload_path, + binary_path=os.path.join(config.CONFIG["Client.platform"], "test.deb"), token=self.token) self.assertEqual(client_mock.GetDownloadedFileContents(), fake_installer) diff --git a/grr/server/grr_response_server/flows/general/file_finder.py b/grr/server/grr_response_server/flows/general/file_finder.py index 31810a63f..a3ed82474 100644 --- a/grr/server/grr_response_server/flows/general/file_finder.py +++ b/grr/server/grr_response_server/flows/general/file_finder.py @@ -11,6 +11,7 @@ from future.utils import itervalues from grr_response_core.lib import artifact_utils +from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths @@ -247,10 +248,29 @@ def ProcessAction(self, response): action = self.args.action.action_type if action == rdf_file_finder.FileFinderAction.Action.STAT: - # If action is STAT, we already have all the data we need to send the - # response. - self.state.files_found += 1 - self.SendReply(response) + # If we are dealing with the operating system file api, the stat action + # might need to collect extended attributes or gather information about + # links instead of their targets. In those cases, we need to issue more + # GetFileStatRequest client requests. In all other cases, we already have + # all the data we need to send the response. + s = self.args.action.stat + if (self.args.pathtype != rdf_paths.PathSpec.PathType.OS or + (s.resolve_links and not s.collect_ext_attrs)): + self.state.files_found += 1 + self.SendReply(response) + else: + if self.client_version < 3221: + self.Error("Client is too old to get requested stat information.") + request = rdf_client_action.GetFileStatRequest( + pathspec=response.stat_entry.pathspec, + collect_ext_attrs=s.collect_ext_attrs, + follow_symlink=s.resolve_links) + self.CallClient( + server_stubs.GetFileStat, + request, + next_state=compatibility.GetName(self.ReceiveFileStat), + request_data=dict(original_result=response)) + elif (self.args.process_non_regular_files or stat.S_ISREG(int(response.stat_entry.st_mode))): # Hashing and downloading are only safe for regular files. User has to @@ -322,16 +342,26 @@ def ProcessAction(self, response): response.stat_entry.pathspec, request_data=dict(original_result=response)) + def ReceiveFileStat(self, responses): + if "original_result" not in responses.request_data: + raise RuntimeError("Got stat information, but original result " + "is missing") + + for response in responses: + result = responses.request_data["original_result"] + result.stat_entry = response + self.SendReply(result) + def ReceiveFileFingerprint(self, urn, hash_obj, request_data=None): """Handle hash results from the FingerprintFileLogic.""" - if "original_result" in request_data: - result = request_data["original_result"] - result.hash_entry = hash_obj - self.SendReply(result) - else: + if "original_result" not in request_data: raise RuntimeError("Got a fingerprintfileresult, but original result " "is missing") + result = request_data["original_result"] + result.hash_entry = hash_obj + self.SendReply(result) + def ReceiveFetchedFile(self, unused_stat_entry, file_hash, request_data=None): """Handle downloaded file from MultiGetFileLogic.""" if "original_result" not in request_data: diff --git a/grr/server/grr_response_server/flows/general/file_finder_test.py b/grr/server/grr_response_server/flows/general/file_finder_test.py index 176db05f4..4673cbca6 100644 --- a/grr/server/grr_response_server/flows/general/file_finder_test.py +++ b/grr/server/grr_response_server/flows/general/file_finder_test.py @@ -9,6 +9,7 @@ import hashlib import io import os +import stat import struct from absl import app @@ -866,6 +867,48 @@ def testUseExternalStores(self): self.assertEqual(efs.call_count, 1) + def testFollowLinks(self): + with temp.AutoTempDirPath(remove_non_empty=True) as tempdir: + path = os.path.join(tempdir, "foo") + lnk_path = os.path.join(tempdir, "foo_lnk") + path_glob = os.path.join(tempdir, "*") + with io.open(path, "w") as fd: + fd.write("some content") + + os.symlink(path, lnk_path) + + results = self.RunFlow( + action=rdf_file_finder.FileFinderAction.Stat(resolve_links=False), + paths=[path_glob]) + + self.assertLen(results, 2) + + lnk_stats = [ + r.stat_entry + for r in results + if stat.S_ISLNK(int(r.stat_entry.st_mode)) + ] + self.assertNotEmpty(lnk_stats, "No stat entry containing a link found.") + + self.assertNotEqual(results[0].stat_entry.st_ino, + results[1].stat_entry.st_ino) + + results = self.RunFlow( + action=rdf_file_finder.FileFinderAction.Stat(resolve_links=True), + paths=[path_glob]) + + self.assertLen(results, 2) + + lnk_stats = [ + r.stat_entry + for r in results + if stat.S_ISLNK(int(r.stat_entry.st_mode)) + ] + self.assertEmpty(lnk_stats, "Stat entry containing a link found.") + + self.assertEqual(results[0].stat_entry.st_ino, + results[1].stat_entry.st_ino) + class TestClientFileFinderFlow(flow_test_lib.FlowTestsBaseclass): """Test the ClientFileFinder flow.""" diff --git a/grr/server/grr_response_server/flows/general/timeline.py b/grr/server/grr_response_server/flows/general/timeline.py new file mode 100644 index 000000000..2d46729aa --- /dev/null +++ b/grr/server/grr_response_server/flows/general/timeline.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +"""A module that defines the timeline flow.""" +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +from typing import Iterator +from typing import Text + +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline +from grr_response_server import data_store +from grr_response_server import flow_base +from grr_response_server import flow_responses +from grr_response_server import server_stubs +from grr_response_server.rdfvalues import objects as rdf_objects + + +class TimelineFlow(flow_base.FlowBase): + """A flow mixin wrapping the timeline client action.""" + + friendly_name = "Timeline" + category = "/Collectors/" + behaviours = flow_base.BEHAVIOUR_BASIC + + args_type = rdf_timeline.TimelineArgs + + def Start(self): + super(TimelineFlow, self).Start() + self.CallClient( + action_cls=server_stubs.Timeline, + request=self.args, + next_state=self.Process.__name__) + + def Process( + self, + responses, + ): + if not responses.success: + raise flow_base.FlowError(responses.status) + + for response in responses: + self.SendReply(response) + + +def Entries( + client_id, + flow_id, +): + """Retrieves timeline entries for the specified flow. + + Args: + client_id: An identifier of a client of the flow to retrieve the blobs for. + flow_id: An identifier of the flow to retrieve the blobs for. + + Returns: + An iterator over timeline entries for the specified flow. + """ + blobs = Blobs(client_id, flow_id) + return rdf_timeline.TimelineEntry.DeserializeStream(blobs) + + +def Blobs( + client_id, + flow_id, +): + """Retrieves timeline blobs for the specified flow. + + Args: + client_id: An identifier of a client of the flow to retrieve the blobs for. + flow_id: An identifier of the flow to retrieve the blobs for. + + Yields: + Blobs of the timeline data in the gzchunked format for the specified flow. + """ + results = data_store.REL_DB.ReadFlowResults( + client_id=client_id, + flow_id=flow_id, + offset=0, + count=_READ_FLOW_RESULTS_COUNT) + + for result in results: + payload = result.payload + + if not isinstance(payload, rdf_timeline.TimelineResult): + message = "Unexpected timeline result of type '{}'".format(type(payload)) + raise TypeError(message) + + for entry_batch_blob_id in payload.entry_batch_blob_ids: + blob_id = rdf_objects.BlobID(entry_batch_blob_id) + blob = data_store.BLOBS.ReadBlob(blob_id) + + if blob is None: + message = "Reference to non-existing blob: '{}'".format(blob_id) + raise AssertionError(message) + + yield blob + + +# Number of results should never be big, usually no more than 2 or 3 results +# per flow (because each result is just a block of references to much bigger +# blobs). Just to be on the safe side, we use a number two orders of magnitude +# bigger. +_READ_FLOW_RESULTS_COUNT = 1024 diff --git a/grr/server/grr_response_server/flows/general/timeline_test.py b/grr/server/grr_response_server/flows/general/timeline_test.py new file mode 100644 index 000000000..9d8999c06 --- /dev/null +++ b/grr/server/grr_response_server/flows/general/timeline_test.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +from __future__ import absolute_import +from __future__ import division + +from __future__ import unicode_literals + +import os +import stat as stat_mode + +from absl.testing import absltest +from typing import Iterator + +from grr_response_client.client_actions import timeline as timeline_action +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline +from grr_response_core.lib.util import temp +from grr_response_server.flows.general import timeline as timeline_flow +from grr.test_lib import action_mocks +from grr.test_lib import filesystem_test_lib +from grr.test_lib import flow_test_lib +from grr.test_lib import testing_startup + + +class TimelineTest(flow_test_lib.FlowTestsBaseclass): + + @classmethod + def setUpClass(cls): + super(TimelineTest, cls).setUpClass() + testing_startup.TestInit() + + def setUp(self): + super(TimelineTest, self).setUp() + self.client_id = self.SetupClient(0) + + def testSingleFile(self): + with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: + filepath = os.path.join(dirpath, "foo") + filesystem_test_lib.CreateFile(filepath, content=b"foobar") + + entries = list(self._Collect(dirpath.encode("utf-8"))) + self.assertLen(entries, 2) + + self.assertTrue(stat_mode.S_ISDIR(entries[0].mode)) + self.assertEqual(entries[0].path, dirpath.encode("utf-8")) + + self.assertTrue(stat_mode.S_ISREG(entries[1].mode)) + self.assertEqual(entries[1].path, filepath.encode("utf-8")) + self.assertEqual(entries[1].size, 6) + + def testMultipleFiles(self): + with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: + foo_filepath = os.path.join(dirpath, "foo") + filesystem_test_lib.CreateFile(foo_filepath) + + bar_filepath = os.path.join(dirpath, "bar") + filesystem_test_lib.CreateFile(bar_filepath) + + baz_filepath = os.path.join(dirpath, "baz") + filesystem_test_lib.CreateFile(baz_filepath) + + entries = list(self._Collect(dirpath.encode("utf-8"))) + self.assertLen(entries, 4) + + self.assertTrue(stat_mode.S_ISDIR(entries[0].mode)) + self.assertEqual(entries[0].path, dirpath.encode("utf-8")) + + paths = [_.path for _ in entries[1:]] + self.assertIn(foo_filepath.encode("utf-8"), paths) + self.assertIn(bar_filepath.encode("utf-8"), paths) + self.assertIn(baz_filepath.encode("utf-8"), paths) + + for entry in entries[1:]: + self.assertTrue(stat_mode.S_ISREG(entry.mode)) + + def testNestedHierarchy(self): + with temp.AutoTempDirPath(remove_non_empty=True) as dirpath: + thud_filepath = os.path.join(dirpath, "foo", "bar", "baz", "quux", "thud") + filesystem_test_lib.CreateFile(thud_filepath, content=b"thud") + + blargh_filepath = os.path.join(dirpath, "foo", "bar", "blargh") + filesystem_test_lib.CreateFile(blargh_filepath, content=b"blargh") + + entries = list(self._Collect(dirpath.encode("utf-8"))) + self.assertLen(entries, 7) + + paths = [_.path.decode("utf-8") for _ in entries] + self.assertCountEqual(paths, [ + os.path.join(dirpath), + os.path.join(dirpath, "foo"), + os.path.join(dirpath, "foo", "bar"), + os.path.join(dirpath, "foo", "bar", "baz"), + os.path.join(dirpath, "foo", "bar", "baz", "quux"), + os.path.join(dirpath, "foo", "bar", "baz", "quux", "thud"), + os.path.join(dirpath, "foo", "bar", "blargh"), + ]) + + entries_by_path = {entry.path.decode("utf-8"): entry for entry in entries} + self.assertEqual(entries_by_path[thud_filepath].size, 4) + self.assertEqual(entries_by_path[blargh_filepath].size, 6) + + # TODO(hanuszczak): Add tests for symlinks. + # TODO(hanuszczak): Add tests for timestamps. + + def _Collect(self, root): + args = rdf_timeline.TimelineArgs(root=root) + + flow_id = flow_test_lib.TestFlowHelper( + timeline_flow.TimelineFlow.__name__, + action_mocks.ActionMock(timeline_action.Timeline), + client_id=self.client_id, + token=self.token, + args=args) + + flow_test_lib.FinishAllFlowsOnClient(self.client_id) + + return timeline_flow.Entries(client_id=self.client_id, flow_id=flow_id) + + +if __name__ == "__main__": + absltest.main() diff --git a/grr/server/grr_response_server/gui/api_plugins/config.py b/grr/server/grr_response_server/gui/api_plugins/config.py index 859e6154c..f8008bc10 100644 --- a/grr/server/grr_response_server/gui/api_plugins/config.py +++ b/grr/server/grr_response_server/gui/api_plugins/config.py @@ -182,7 +182,7 @@ def _GetSignedBinaryMetadata(binary_type, relative_path): """ root_urn = _GetSignedBlobsRoots()[binary_type] binary_urn = root_urn.Add(relative_path) - blob_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinary( + blob_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinaryByURN( binary_urn) binary_size = 0 has_valid_signature = True @@ -256,7 +256,8 @@ def Handle(self, args, token=None): root_urn = _GetSignedBlobsRoots()[args.type] binary_urn = root_urn.Add(args.path) binary_size = signed_binary_utils.FetchSizeOfSignedBinary(binary_urn) - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(binary_urn) + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + binary_urn) chunk_iterator = signed_binary_utils.StreamSignedBinaryContents( blob_iterator, chunk_size=self.CHUNK_SIZE) return api_call_handler_base.ApiBinaryStream( diff --git a/grr/server/grr_response_server/gui/http_api.py b/grr/server/grr_response_server/gui/http_api.py index 5c931bb7c..0451b6281 100644 --- a/grr/server/grr_response_server/gui/http_api.py +++ b/grr/server/grr_response_server/gui/http_api.py @@ -300,7 +300,8 @@ def _FormatResultAsJson(self, result, format_mode=None): return dict(status="OK") if format_mode == JsonMode.PROTO3_JSON_MODE: - json_data = json_format.MessageToJson(result.AsPrimitiveProto()) + json_data = json_format.MessageToJson( + result.AsPrimitiveProto(), float_precision=8) if compatibility.PY2: json_data = json_data.decode("utf-8") return json.Parse(json_data) diff --git a/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py b/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py index 597882744..e209a02e0 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py @@ -288,19 +288,19 @@ def testHuntStatsView(self): self.WaitUntil(self.IsTextPresent, "User CPU mean") self.WaitUntil(self.IsTextPresent, "5.5") - self.WaitUntil(self.IsTextPresent, "User CPU stdev") + self.WaitUntil(self.IsTextPresent, "User CPU stddev") self.WaitUntil(self.IsTextPresent, "2.9") self.WaitUntil(self.IsTextPresent, "System CPU mean") self.WaitUntil(self.IsTextPresent, "11") - self.WaitUntil(self.IsTextPresent, "System CPU stdev") + self.WaitUntil(self.IsTextPresent, "System CPU stddev") self.WaitUntil(self.IsTextPresent, "5.7") self.WaitUntil(self.IsTextPresent, "Network bytes sent mean") self.WaitUntil(self.IsTextPresent, "16.5") - self.WaitUntil(self.IsTextPresent, "Network bytes sent stdev") + self.WaitUntil(self.IsTextPresent, "Network bytes sent stddev") self.WaitUntil(self.IsTextPresent, "8.6") def testHuntNotificationIsShownAndClickable(self): diff --git a/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json b/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json index b6a2e9425..6823c55cd 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json +++ b/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json @@ -7290,13 +7290,13 @@ "type": "long", "value": 1 }, - "sum": { + "stddev": { "type": "float", - "value": 3.0 + "value": 0.0 }, - "sum_sq": { + "sum": { "type": "float", - "value": 9.0 + "value": 3.0 } } }, @@ -7498,13 +7498,13 @@ "type": "long", "value": 1 }, - "sum": { + "stddev": { "type": "float", - "value": 2.0 + "value": 0.0 }, - "sum_sq": { + "sum": { "type": "float", - "value": 4.0 + "value": 2.0 } } }, @@ -7706,11 +7706,11 @@ "type": "long", "value": 1 }, - "sum": { + "stddev": { "type": "float", - "value": 1.0 + "value": 0.0 }, - "sum_sq": { + "sum": { "type": "float", "value": 1.0 } @@ -7815,8 +7815,8 @@ ] }, "num": 1, - "sum": 3.0, - "sum_sq": 9.0 + "stddev": 0.0, + "sum": 3.0 }, "system_cpu_stats": { "histogram": { @@ -7885,8 +7885,8 @@ ] }, "num": 1, - "sum": 2.0, - "sum_sq": 4.0 + "stddev": 0.0, + "sum": 2.0 }, "user_cpu_stats": { "histogram": { @@ -7955,8 +7955,8 @@ ] }, "num": 1, - "sum": 1.0, - "sum_sq": 1.0 + "stddev": 0.0, + "sum": 1.0 }, "worst_performers": [ { diff --git a/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json b/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json index 7c6489fd5..51aff5a42 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json +++ b/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json @@ -2258,23 +2258,23 @@ ] }, "num": "1", - "sum": 3.0, - "sumSq": 9.0 + "stddev": 0.0, + "sum": 3.0 }, "systemCpuStats": { "histogram": { "bins": [ { - "rangeMaxValue": 0.10000000149011612 + "rangeMaxValue": 0.1 }, { - "rangeMaxValue": 0.20000000298023224 + "rangeMaxValue": 0.2 }, { - "rangeMaxValue": 0.30000001192092896 + "rangeMaxValue": 0.30000001 }, { - "rangeMaxValue": 0.4000000059604645 + "rangeMaxValue": 0.40000001 }, { "rangeMaxValue": 0.5 @@ -2328,23 +2328,23 @@ ] }, "num": "1", - "sum": 2.0, - "sumSq": 4.0 + "stddev": 0.0, + "sum": 2.0 }, "userCpuStats": { "histogram": { "bins": [ { - "rangeMaxValue": 0.10000000149011612 + "rangeMaxValue": 0.1 }, { - "rangeMaxValue": 0.20000000298023224 + "rangeMaxValue": 0.2 }, { - "rangeMaxValue": 0.30000001192092896 + "rangeMaxValue": 0.30000001 }, { - "rangeMaxValue": 0.4000000059604645 + "rangeMaxValue": 0.40000001 }, { "rangeMaxValue": 0.5 @@ -2398,8 +2398,8 @@ ] }, "num": "1", - "sum": 1.0, - "sumSq": 1.0 + "stddev": 0.0, + "sum": 1.0 }, "worstPerformers": [ { diff --git a/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats-directive.js b/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats-directive.js index f9c8e4453..2ebe9359e 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats-directive.js +++ b/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats-directive.js @@ -82,7 +82,7 @@ HuntStatsController.prototype.convertHistogramToComparisonChart_ = function( data, labelFormatFn) { const series = []; let mean = undefined; - let stdev = undefined; + let stddev = undefined; if (data !== undefined) { const bins = data['value']['histogram']['value']['bins']; @@ -108,13 +108,11 @@ HuntStatsController.prototype.convertHistogramToComparisonChart_ = function( if (data['value']['num']) { mean = data['value']['sum']['value'] / data['value']['num']['value']; - stdev = Math.sqrt( - data['value']['sum_sq']['value'] / data['value']['num']['value'] - - Math.pow(mean, 2)); + stddev = data['value']['stddev']['value']; } } - return {mean, stdev, value: {data: series}}; + return {mean, stddev, value: {data: series}}; }; /** diff --git a/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats.html b/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats.html index e4c87a31f..f517b0416 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats.html +++ b/grr/server/grr_response_server/gui/static/angular-components/hunt/hunt-stats.html @@ -5,8 +5,8 @@

User CPU seconds

User CPU mean
{$ ::controller.userCpuStats.mean | number:1 $}
-
User CPU stdev
-
{$ ::controller.userCpuStats.stdev | number:1 $}
+
User CPU stddev
+
{$ ::controller.userCpuStats.stddev | number:1 $}
Clients Histogram
@@ -21,8 +21,8 @@

System CPU seconds

System CPU mean
{$ ::controller.systemCpuStats.mean | number:1 $}
-
System CPU stdev
-
{$ ::controller.systemCpuStats.stdev | number:1 $}
+
System CPU stddev
+
{$ ::controller.systemCpuStats.stddev | number:1 $}
Clients Histogram
@@ -37,8 +37,8 @@

Network bytes sent

Network bytes sent mean
{$ ::controller.networkBytesStats.mean | number:1 $}
-
Network bytes sent stdev
-
{$ ::controller.networkBytesStats.stdev | number:1 $}
+
Network bytes sent stddev
+
{$ ::controller.networkBytesStats.stddev | number:1 $}
Clients Histogram
diff --git a/grr/server/grr_response_server/gui/ui/lib/api/client_api.ts b/grr/server/grr_response_server/gui/ui/lib/api/client_api.ts index f4a0d3386..5201ae52d 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api/client_api.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api/client_api.ts @@ -39,7 +39,7 @@ export declare interface ApiClient { readonly lastSeenAt?: string; readonly lastBootedAt?: string; readonly lastClock?: string; - readonly labels: ReadonlyArray; + readonly labels?: ReadonlyArray; } /** @@ -57,3 +57,26 @@ export declare interface ApiSearchClientArgs { export declare interface ApiSearchClientResult { readonly items: ReadonlyArray; } + +/** /config/Email.approval_optional_cc_address proto mapping. */ +export declare interface ApiApprovalOptionalCcAddressResult { + readonly value?: { + value?: string, + }; +} + +/** ApiClientApproval proto mapping */ +export declare interface ApiClientApproval { + readonly subject?: ApiClient; + readonly id?: string; + readonly reason?: string; + readonly isValid?: boolean; + readonly isValidMessage?: string; + readonly notifiedUsers?: string[]; + readonly approvers?: string[]; +} + +/** ApiListClientApprovalsResult proto mapping */ +export declare interface ApiListClientApprovalsResult { + readonly items: ApiClientApproval[]; +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/lib/api/client_api_service.ts b/grr/server/grr_response_server/gui/ui/lib/api/client_api_service.ts index 084bb76b4..81d08b36f 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api/client_api_service.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api/client_api_service.ts @@ -1,7 +1,10 @@ import {HttpClient, HttpParams} from '@angular/common/http'; import {Injectable} from '@angular/core'; +import {ApprovalConfig, ApprovalRequest} from '@app/lib/models/client'; import {Observable} from 'rxjs'; -import {ApiClient, ApiSearchClientArgs, ApiSearchClientResult} from './client_api'; +import {map} from 'rxjs/operators'; + +import {ApiApprovalOptionalCcAddressResult, ApiClient, ApiClientApproval, ApiListClientApprovalsResult, ApiSearchClientArgs, ApiSearchClientResult} from './client_api'; /** @@ -39,4 +42,46 @@ export class ClientApiService { return this.http.get( `${URL_PREFIX}/clients/${id}`, {withCredentials: true}); } + + /** Requests approval to give the current user access to a client. */ + requestApproval(args: ApprovalRequest): Observable { + const request = { + approval: { + reason: args.reason, + notified_users: args.approvers, + email_cc_addresses: args.cc, + }, + }; + + return this.http + .post( + `${URL_PREFIX}/users/me/approvals/client/${args.clientId}`, request, + {withCredentials: true}) + .pipe( + map(() => undefined), // The returned Client Approval is unused. + ); + } + + fetchApprovalConfig(): Observable { + return this.http + .get( + `${URL_PREFIX}/config/Email.approval_optional_cc_address`, + {withCredentials: true}) + .pipe( + // Replace empty string (protobuf default) with undefined. + map(res => (res.value || {}).value || undefined), + map(optionalCcEmail => ({optionalCcEmail})), + ); + } + + /** Lists ClientApprovals in reversed chronological order. */ + listApprovals(clientId: string): Observable { + return this.http + .get( + `${URL_PREFIX}/users/me/approvals/client/${clientId}`, + {withCredentials: true}) + .pipe( + map(res => res.items), + ); + } } diff --git a/grr/server/grr_response_server/gui/ui/lib/api/module.ts b/grr/server/grr_response_server/gui/ui/lib/api/module.ts index f6d8c2c0c..f2898443e 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api/module.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api/module.ts @@ -1,4 +1,4 @@ -import {HttpClientModule} from '@angular/common/http'; +import {HttpClientModule, HttpClientXsrfModule} from '@angular/common/http'; import {NgModule} from '@angular/core'; import {ClientApiService} from './client_api_service'; @@ -12,6 +12,10 @@ import {ClientApiService} from './client_api_service'; ], imports: [ HttpClientModule, + HttpClientXsrfModule.withOptions({ + cookieName: 'csrftoken', + headerName: 'X-CSRFToken', + }), ], }) export class ApiModule { diff --git a/grr/server/grr_response_server/gui/ui/lib/models/client.ts b/grr/server/grr_response_server/gui/ui/lib/models/client.ts index 2d7ca554a..528c60969 100644 --- a/grr/server/grr_response_server/gui/ui/lib/models/client.ts +++ b/grr/server/grr_response_server/gui/ui/lib/models/client.ts @@ -41,3 +41,40 @@ export interface Client { /** List of ClientLabels */ readonly labels: ReadonlyArray; } + +/** Approval Request. */ +export interface ApprovalRequest { + readonly clientId: string; + readonly approvers: string[]; + readonly reason: string; + readonly cc: string[]; +} + +/** Configuration for Client Approvals. */ +export interface ApprovalConfig { + readonly optionalCcEmail?: string; +} + +/** Indicates that a ClientApproval has been granted and is valid. */ +export interface Valid { + readonly valid: true; +} + +/** Indicates that a ClientApproval is invalid for a specific reason. */ +export interface Invalid { + readonly valid: false; + readonly reason: string; +} + +/** Status of a ClientApproval. */ +export type ClientApprovalStatus = Valid|Invalid; + +/** Approval for Client access. */ +export interface ClientApproval { + readonly approvalId: string; + readonly clientId: string; + readonly reason: string; + readonly status: ClientApprovalStatus; + readonly requestedApprovers: ReadonlyArray; + readonly approvers: ReadonlyArray; +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/store/client_facade.ts b/grr/server/grr_response_server/gui/ui/store/client_facade.ts index be5f398e8..89ad9a733 100644 --- a/grr/server/grr_response_server/gui/ui/store/client_facade.ts +++ b/grr/server/grr_response_server/gui/ui/store/client_facade.ts @@ -1,28 +1,54 @@ import {Injectable} from '@angular/core'; import {Store} from '@ngrx/store'; import {Observable} from 'rxjs'; -import {filter} from 'rxjs/operators'; +import {filter, map, withLatestFrom} from 'rxjs/operators'; + +import {ApprovalConfig, ApprovalRequest, Client, ClientApproval} from '../lib/models/client'; -import {Client} from '../lib/models/client'; import * as actions from './client/client_actions'; import {ClientState} from './client/client_reducers'; import * as selectors from './client/client_selectors'; -function notUndefined(client?: Client): client is Client { - return client !== undefined; -} - /** Facade for client-related API calls. */ @Injectable() export class ClientFacade { constructor(private readonly store: Store) {} - /** An observable emitting the client loaded by `fetchClient`. */ - readonly client$: Observable = - this.store.select(selectors.client).pipe(filter(notUndefined)); + /** An observable emitting the client loaded by `selectClient`. */ + readonly selectedClient$: Observable = + this.store.select(selectors.clients) + .pipe( + withLatestFrom(this.store.select(selectors.selectedClientId)), + map(([clients, clientId]) => clientId && clients[clientId]), + filter((client): client is Client => client !== undefined), + ); + + /** Loads a client, marks it as selected, and emits it to selectedClient$. */ + selectClient(clientId: string): void { + this.store.dispatch(actions.select({clientId})); + } + + requestApproval(args: ApprovalRequest): void { + this.store.dispatch(actions.requestApproval(args)); + } + + readonly approvalConfig$: Observable = + this.store.select(selectors.approvalConfig); + + fetchApprovalConfig(): void { + this.store.dispatch(actions.fetchApprovalConfig()); + } + + // Approvals are expected to be in reversed chronological order. + readonly latestApproval$: Observable = + this.store.select(selectors.approvals) + .pipe( + withLatestFrom(this.selectedClient$), + map(([approvals, client]) => approvals.find( + approval => approval.clientId === client.clientId)), + ); - /** Loads a client by its ID, to be emitted in `client$`. */ - fetchClient(id: string): void { - this.store.dispatch(actions.fetch({id})); + listClientApprovals(clientId: string) { + this.store.dispatch(actions.listApprovals({clientId})); } } diff --git a/grr/server/grr_response_server/gui/ui/store/store_module.ts b/grr/server/grr_response_server/gui/ui/store/store_module.ts index fd637c8f0..7bea2d868 100644 --- a/grr/server/grr_response_server/gui/ui/store/store_module.ts +++ b/grr/server/grr_response_server/gui/ui/store/store_module.ts @@ -4,7 +4,7 @@ import {Action, StoreModule, StoreRootModule} from '@ngrx/store'; import {ApiModule} from '../lib/api/module'; import {ClientEffects} from './client/client_effects'; -import {clientFetchReducer} from './client/client_reducers'; +import {clientReducer} from './client/client_reducers'; import {CLIENT_FEATURE} from './client/client_selectors'; import {ClientFacade} from './client_facade'; import {ClientSearchEffects} from './client_search/client_search_effects'; @@ -31,7 +31,7 @@ export function clientSearchReducerWrapper( }, }), StoreModule.forFeature('clientSearch', clientSearchReducerWrapper), - StoreModule.forFeature(CLIENT_FEATURE, clientFetchReducer), + StoreModule.forFeature(CLIENT_FEATURE, clientReducer), EffectsModule.forRoot([ClientSearchEffects, ClientEffects]), ], providers: [ diff --git a/grr/server/grr_response_server/gui/wsgiapp.py b/grr/server/grr_response_server/gui/wsgiapp.py index f1d967b61..43d3eba65 100644 --- a/grr/server/grr_response_server/gui/wsgiapp.py +++ b/grr/server/grr_response_server/gui/wsgiapp.py @@ -130,6 +130,9 @@ class RequestHasNoUser(AttributeError): class HttpRequest(werkzeug_wrappers.Request): """HTTP request object to be used in GRR.""" + charset = "utf-8" + encoding_errors = "strict" + def __init__(self, *args, **kwargs): super(HttpRequest, self).__init__(*args, **kwargs) diff --git a/grr/server/grr_response_server/hunt_test.py b/grr/server/grr_response_server/hunt_test.py index b0927b856..4f6981bd6 100644 --- a/grr/server/grr_response_server/hunt_test.py +++ b/grr/server/grr_response_server/hunt_test.py @@ -855,19 +855,20 @@ def testResourceUsageStatsAreReportedCorrectly(self): # Values below are calculated based on SampleHuntMock's behavior. self.assertEqual(usage_stats.user_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 5.5) - self.assertAlmostEqual(usage_stats.user_cpu_stats.std, 2.8722813) + self.assertAlmostEqual(usage_stats.user_cpu_stats.stddev, 2.8722813) self.assertEqual(usage_stats.system_cpu_stats.num, 10) self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 11) - self.assertAlmostEqual(usage_stats.system_cpu_stats.std, 5.7445626) + self.assertAlmostEqual(usage_stats.system_cpu_stats.stddev, 5.7445626) self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10) self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean, 16.5) - self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.std, 8.61684396) + self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.stddev, + 8.61684396) # NOTE: Not checking histograms here. RunningStatsTest tests that mean, # standard deviation and histograms are calculated correctly. Therefore - # if mean/stdev values are correct histograms should be ok as well. + # if mean/stddev values are correct histograms should be ok as well. self.assertLen(usage_stats.worst_performers, 10) diff --git a/grr/server/grr_response_server/server_stubs.py b/grr/server/grr_response_server/server_stubs.py index b0f07379c..ccf9014fc 100644 --- a/grr/server/grr_response_server/server_stubs.py +++ b/grr/server/grr_response_server/server_stubs.py @@ -28,6 +28,7 @@ from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import plist as rdf_plist from grr_response_core.lib.rdfvalues import protodict as rdf_protodict +from grr_response_core.lib.rdfvalues import timeline as rdf_timeline class ClientActionStub(with_metaclass(registry.MetaclassRegistry, object)): @@ -426,3 +427,10 @@ class Osquery(ClientActionStub): in_rdfvalue = rdf_osquery.OsqueryArgs out_rdfvalues = [rdf_osquery.OsqueryResult] + + +class Timeline(ClientActionStub): + """A stub class for the timeline client action.""" + + in_rdfvalue = rdf_timeline.TimelineArgs + out_rdfvalues = [rdf_timeline.TimelineResult] diff --git a/grr/server/grr_response_server/signed_binary_utils.py b/grr/server/grr_response_server/signed_binary_utils.py index 587dd740a..c6823bdb9 100644 --- a/grr/server/grr_response_server/signed_binary_utils.py +++ b/grr/server/grr_response_server/signed_binary_utils.py @@ -144,13 +144,13 @@ def FetchURNsForAllSignedBinaries(): ] -def FetchBlobsForSignedBinary( - binary_urn +def FetchBlobsForSignedBinaryByID( + binary_id ): """Retrieves blobs for the given binary from the datastore. Args: - binary_urn: RDFURN that uniquely identifies the binary. + binary_id: An ID of the binary to be fetched. Returns: A tuple containing an iterator for all the binary's blobs and an @@ -162,9 +162,9 @@ def FetchBlobsForSignedBinary( """ try: references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences( - _SignedBinaryIDFromURN(binary_urn)) + binary_id) except db.UnknownSignedBinaryError: - raise SignedBinaryNotFoundError(binary_urn) + raise SignedBinaryNotFoundError(_SignedBinaryURNFromID(binary_id)) blob_ids = [r.blob_id for r in references.items] raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids) blobs = ( @@ -173,6 +173,25 @@ def FetchBlobsForSignedBinary( return blobs, timestamp +def FetchBlobsForSignedBinaryByURN( + binary_urn +): + """Retrieves blobs for the given binary from the datastore. + + Args: + binary_urn: RDFURN that uniquely identifies the binary. + + Returns: + A tuple containing an iterator for all the binary's blobs and an + RDFDatetime representing when the binary's contents were saved + to the datastore. + + Raises: + SignedBinaryNotFoundError: If no signed binary with the given URN exists. + """ + return FetchBlobsForSignedBinaryByID(_SignedBinaryIDFromURN(binary_urn)) + + def FetchSizeOfSignedBinary(binary_urn): """Returns the size of the given binary (in bytes). diff --git a/grr/server/grr_response_server/signed_binary_utils_test.py b/grr/server/grr_response_server/signed_binary_utils_test.py index baa9d51a0..006f8194a 100644 --- a/grr/server/grr_response_server/signed_binary_utils_test.py +++ b/grr/server/grr_response_server/signed_binary_utils_test.py @@ -31,10 +31,10 @@ def testWriteSignedBinary(self): private_key=self._private_key, public_key=self._public_key, chunk_size=3) - blobs_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinary( + blobs_iter, timestamp = signed_binary_utils.FetchBlobsForSignedBinaryByURN( test_urn) self.assertGreater(timestamp.AsMicrosecondsSinceEpoch(), 0) - self.assertIsInstance(blobs_iterator, collections.Iterator) + self.assertIsInstance(blobs_iter, collections.Iterator) # We expect blobs to have at most 3 contiguous bytes of data. expected_blobs = [ rdf_crypto.SignedBlob().Sign(b"\x00\x11\x22", self._private_key), @@ -42,7 +42,7 @@ def testWriteSignedBinary(self): rdf_crypto.SignedBlob().Sign(b"\x66\x77\x88", self._private_key), rdf_crypto.SignedBlob().Sign(b"\x99", self._private_key) ] - self.assertCountEqual(list(blobs_iterator), expected_blobs) + self.assertCountEqual(list(blobs_iter), expected_blobs) def testWriteSignedBinaryBlobs(self): test_urn = rdfvalue.RDFURN("aff4:/config/executables/foo") @@ -53,10 +53,10 @@ def testWriteSignedBinaryBlobs(self): rdf_crypto.SignedBlob().Sign(b"\x99", self._private_key) ] signed_binary_utils.WriteSignedBinaryBlobs(test_urn, test_blobs) - blobs_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinary( + blobs_iter, timestamp = signed_binary_utils.FetchBlobsForSignedBinaryByURN( test_urn) self.assertGreater(timestamp.AsMicrosecondsSinceEpoch(), 0) - self.assertCountEqual(list(blobs_iterator), test_blobs) + self.assertCountEqual(list(blobs_iter), test_blobs) def testFetchSizeOfSignedBinary(self): binary1_urn = rdfvalue.RDFURN("aff4:/config/executables/foo1") @@ -96,7 +96,7 @@ def testMissingSignedBinary(self): with self.assertRaises(signed_binary_utils.SignedBinaryNotFoundError): signed_binary_utils.DeleteSignedBinary(missing_urn) with self.assertRaises(signed_binary_utils.SignedBinaryNotFoundError): - signed_binary_utils.FetchBlobsForSignedBinary(missing_urn) + signed_binary_utils.FetchBlobsForSignedBinaryByURN(missing_urn) with self.assertRaises(signed_binary_utils.SignedBinaryNotFoundError): signed_binary_utils.FetchSizeOfSignedBinary(missing_urn) @@ -108,7 +108,8 @@ def _WriteTestBinaryAndGetBlobIterator(self, binary_data, chunk_size): private_key=self._private_key, public_key=self._public_key, chunk_size=chunk_size) - blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(binary_urn) + blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( + binary_urn) return blob_iterator def testStreamSignedBinary_SmallBlobs(self): diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py b/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py index 85ce32011..8d1b40157 100644 --- a/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py +++ b/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py @@ -14,6 +14,7 @@ from grr_response_test.end_to_end_tests.tests import limits from grr_response_test.end_to_end_tests.tests import memory from grr_response_test.end_to_end_tests.tests import network +from grr_response_test.end_to_end_tests.tests import osquery from grr_response_test.end_to_end_tests.tests import processes from grr_response_test.end_to_end_tests.tests import registry from grr_response_test.end_to_end_tests.tests import transfer diff --git a/grr/test/grr_response_test/lib/api_helpers.py b/grr/test/grr_response_test/lib/api_helpers.py new file mode 100644 index 000000000..fa190e15f --- /dev/null +++ b/grr/test/grr_response_test/lib/api_helpers.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# Lint as: python3 +"""Helper API-client-based functions for self-contained tests.""" +import time + +import requests + +from grr_api_client import api +from grr_response_core import config +from grr_response_core.lib import config_lib + + +class Error(Exception): + """Module-specific base error class.""" + + +class APIEndpointTimeoutError(Error): + """Raised when API endpoint doesn't come online in time.""" + + +class ClientEnrollmentTimeoutError(Error): + """Raised when a client does not enroll in time.""" + + +class ClientVersionTimeoutError(Error): + """Raised then a client doesn't report a specific version in time.""" + + +def GetAdminUIPortFromConfig(config_path): + """Gets the AdminUI.port setting from a given config file.""" + conf = config_lib.LoadConfig(config.CONFIG.MakeNewConfig(), config_path) + return conf["AdminUI.port"] + + +_WAIT_TIMEOUT_SECS = 150 +_CHECK_INTERVAL = 1 + + +def WaitForAPIEndpoint(port): + """Waits for API endpoint to come online.""" + api_endpoint = "http://localhost:%d" % port + + start_time = time.time() + while time.time() - start_time < _WAIT_TIMEOUT_SECS: + try: + grrapi = api.InitHttp(api_endpoint=api_endpoint) + grrapi.ListGrrBinaries() + return grrapi + except (requests.exceptions.ConnectionError, ConnectionRefusedError): + print("Connection error (%s), waiting..." % api_endpoint) + time.sleep(_CHECK_INTERVAL) + continue + + raise APIEndpointTimeoutError("API endpoint %s didn't come up." % + api_endpoint) + + +def WaitForClientToEnroll(grrapi): + """Waits for an already started client to enroll. + + If the client doesn't enroll within ~100 seconds, main process gets killed. + + Args: + grrapi: GRR API object. + + Returns: + A string with an enrolled client's id. + + Raises: + ClientEnrollmentTimeoutError: if the client fails to enroll in time. + """ + start_time = time.time() + while time.time() - start_time < _WAIT_TIMEOUT_SECS: + clients = list(grrapi.SearchClients(query=".")) + + if clients: + return clients[0].client_id + + print("No clients enrolled, waiting...") + time.sleep(_CHECK_INTERVAL) + + raise ClientEnrollmentTimeoutError("Client didn't enroll.") + + +def KillClient(grrapi, client_id): + """Kills a given client.""" + + f = grrapi.Client(client_id).CreateFlow("Kill") + f.WaitUntilDone(timeout=60) + + +def WaitForClientVersionGreaterThan(api_client_obj, min_version): + """Waits until the client version becomes greater than a given value.""" + + start_time = time.time() + while time.time() - start_time < _WAIT_TIMEOUT_SECS: + version = api_client_obj.Get().data.agent_info.client_version + if version > min_version: + print("Got expected client version %d." % version) + return version + + print("Got client version: %d, must be > %d" % (version, min_version)) + time.sleep(_CHECK_INTERVAL) + + raise ClientVersionTimeoutError( + "Timed out while waiting for the client version > %d." % min_version) diff --git a/grr/test/grr_response_test/lib/self_contained_components.py b/grr/test/grr_response_test/lib/self_contained_components.py new file mode 100644 index 000000000..4b5e92a83 --- /dev/null +++ b/grr/test/grr_response_test/lib/self_contained_components.py @@ -0,0 +1,376 @@ +#!/usr/bin/env python +# Lint as: python3 +"""Functions to run individual GRR components during self-contained testing.""" + +import atexit +import os +import shutil +import signal +import subprocess +import sys +import tempfile +import threading +import time + +from typing import Dict, Iterable, List, Optional, Tuple, Union + +import portpicker + +from grr_response_core.lib import package +from grr_response_test.lib import api_helpers + +ComponentOptions = Dict[str, Union[int, str]] + + +class Error(Exception): + """Module-specific base error class.""" + + +class ConfigInitializationError(Error): + """Raised when a self-contained config can't be written.""" + + +def _ComponentOptionsToArgs(options): + if options is None: + return [] + + args = [] + for k, v in options.items(): + args.extend(["-p", "%s=%s" % (k, v)]) + return args + + +def _GetServerComponentArgs(config_path): + """Returns a set of command line arguments for server components. + + Args: + config_path: Path to a config path generated by + self_contained_config_writer. + + Returns: + An iterable with command line arguments to use. + """ + + primary_config_path = package.ResourcePath( + "grr-response-core", "install_data/etc/grr-server.yaml") + secondary_config_path = package.ResourcePath( + "grr-response-test", "grr_response_test/test_data/grr_test.yaml") + return [ + "--config", + primary_config_path, + "--secondary_configs", + ",".join([secondary_config_path, config_path]), + "-p", + "Monitoring.http_port=%d" % portpicker.pick_unused_port(), + "-p", + "AdminUI.webauth_manager=NullWebAuthManager", + ] + + +def _GetRunEndToEndTestsArgs( + client_id, + server_config_path, + tests = None, + manual_tests = None): + """Returns arguments needed to configure run_end_to_end_tests process. + + Args: + client_id: String with a client id pointing to an already running client. + server_config_path: Path to the server configuration file. + tests: (Optional) List of tests to run. + manual_tests: (Optional) List of manual tests to not skip. + + Returns: + An iterable with command line arguments. + """ + port = api_helpers.GetAdminUIPortFromConfig(server_config_path) + + api_endpoint = "http://localhost:%d" % port + args = [ + "--api_endpoint", + api_endpoint, + "--api_user", + "admin", + "--api_password", + "admin", + "--client_id", + client_id, + "--ignore_test_context", + "True", + ] + if tests is not None: + args += ["--whitelisted_tests", ",".join(tests)] + if manual_tests is not None: + args += ["--manual_tests", ",".join(manual_tests)] + + return args + + +def _StartComponent(main_package, args): + """Starts a new process with a given component. + + This starts a Python interpreter with a "-m" argument followed by + the main package name, thus effectively executing the main() + function of a given package. + + Args: + main_package: Main package path. + args: An iterable with program arguments (not containing the program + executable). + + Returns: + Popen object corresponding to a started process. + """ + popen_args = [sys.executable, "-m", main_package] + args + print("Starting %s component: %s" % (main_package, " ".join(popen_args))) + process = subprocess.Popen(popen_args) + print("Component %s pid: %d" % (main_package, process.pid)) + + def KillOnExit(): + if process.poll() is None: + print("Killing %s." % main_package) + process.kill() + process.wait() + + atexit.register(KillOnExit) + + return process + + +def InitConfigs(mysql_database, + mysql_username = None, + mysql_password = None, + logging_path = None, + osquery_path = None): + """Initializes server and client config files.""" + + # Create 2 temporary files to contain server and client configuration files + # that we're about to generate. + # + # TODO(user): migrate to TempFilePath as soon grr.test_lib is moved to + # grr_response_test. + fd, built_server_config_path = tempfile.mkstemp(".yaml") + os.close(fd) + print("Using temp server config path: %s" % built_server_config_path) + fd, built_client_config_path = tempfile.mkstemp(".yaml") + os.close(fd) + print("Using temp client config path: %s" % built_client_config_path) + + def CleanUpConfigs(): + os.remove(built_server_config_path) + os.remove(built_client_config_path) + + atexit.register(CleanUpConfigs) + + # Generate server and client configs. + config_writer_flags = [ + "--dest_server_config_path", + built_server_config_path, + "--dest_client_config_path", + built_client_config_path, + "--config_mysql_database", + mysql_database, + ] + + if mysql_username is not None: + config_writer_flags.extend(["--config_mysql_username", mysql_username]) + + if mysql_password is not None: + config_writer_flags.extend(["--config_mysql_password", mysql_password]) + + if logging_path is not None: + config_writer_flags.extend(["--config_logging_path", logging_path]) + + if osquery_path is not None: + config_writer_flags.extend(["--config_osquery_path", osquery_path]) + + p = _StartComponent( + "grr_response_test.lib.self_contained_config_writer", + config_writer_flags) + if p.wait() != 0: + raise ConfigInitializationError("ConfigWriter execution failed: {}".format( + p.returncode)) + + return (built_server_config_path, built_client_config_path) + + +def StartServerProcesses( + server_config_path, + component_options = None +): + + def Args(): + return _GetServerComponentArgs( + server_config_path) + _ComponentOptionsToArgs(component_options) + + return [ + _StartComponent( + "grr_response_server.gui.admin_ui", + Args()), + _StartComponent( + "grr_response_server.bin.frontend", + Args()), + _StartComponent( + "grr_response_server.bin.worker", + Args()), + ] + + +def StartClientProcess(client_config_path, + component_options = None, + verbose = False): + return _StartComponent( + "grr_response_client.client", + ["--config", client_config_path] + (["--verbose"] if verbose else []) + + _ComponentOptionsToArgs(component_options)) + + +def RunEndToEndTests(client_id, + server_config_path, + tests = None, + manual_tests = None): + """Runs end to end tests on a given client.""" + p = _StartComponent( + "grr_response_test.run_end_to_end_tests", + _GetServerComponentArgs(server_config_path) + _GetRunEndToEndTestsArgs( + client_id, server_config_path, tests=tests, + manual_tests=manual_tests)) + if p.wait() != 0: + raise RuntimeError("RunEndToEndTests execution failed.") + + +def RunBuildTemplate(server_config_path, + component_options = None, + version_ini = None): + """Runs end to end tests on a given client.""" + output_dir = tempfile.mkdtemp() + + def CleanUpTemplate(): + shutil.rmtree(output_dir) + + atexit.register(CleanUpTemplate) + + options = dict(component_options or {}) + if version_ini: + fd, version_ini_path = tempfile.mkstemp(".ini") + try: + os.write(fd, version_ini.encode("ascii")) + finally: + os.close(fd) + + options["ClientBuilder.version_ini_path"] = version_ini_path + + p = _StartComponent( + "grr_response_client_builder.client_build", + _GetServerComponentArgs(server_config_path) + + _ComponentOptionsToArgs(options) + ["build", "--output", output_dir]) + if p.wait() != 0: + raise RuntimeError("RunBuildTemplate execution failed.") + + return os.path.join(output_dir, os.listdir(output_dir)[0]) + + +def RunRepackTemplate( + server_config_path, + template_path, + component_options = None): + """Runs 'grr_client_builder repack' to repack a template.""" + output_dir = tempfile.mkdtemp() + + def CleanUpInstaller(): + shutil.rmtree(output_dir) + + atexit.register(CleanUpInstaller) + + p = _StartComponent( + "grr_response_client_builder.client_build", + _GetServerComponentArgs(server_config_path) + + _ComponentOptionsToArgs(component_options) + + ["repack", "--template", template_path, "--output_dir", output_dir]) + if p.wait() != 0: + raise RuntimeError("RunRepackTemplate execution failed.") + + # Repacking may apparently generate more than one file. Just select the + # biggest one: it's guaranteed to be the template. + paths = [os.path.join(output_dir, fname) for fname in os.listdir(output_dir)] + sizes = [os.path.getsize(p) for p in paths] + _, biggest_path = max(zip(sizes, paths)) + + return biggest_path + + +def RunUploadExe(server_config_path, + exe_path, + platform, + component_options = None): + """Runs 'grr_config_upater upload_exe' to upload a binary to GRR.""" + p = _StartComponent( + "grr_response_server.bin.config_updater", + _GetServerComponentArgs(server_config_path) + + _ComponentOptionsToArgs(component_options) + [ + "upload_exe", "--file", exe_path, "--platform", platform, + "--upload_subdirectory", "test" + ]) + if p.wait() != 0: + raise RuntimeError("RunUploadExe execution failed.") + + return "%s/test/%s" % (platform, os.path.basename(exe_path)) + + +_PROCESS_CHECK_INTERVAL = 0.1 + + +def _DieIfSubProcessDies(processes, + already_dead_event): + """Synchronously waits for processes and dies if one dies.""" + while True: + for p in processes: + if p.poll() not in [None, 0]: + # Prevent a double kill. When the main process exits, it kills the + # children. We don't want a child's death to cause a SIGTERM being + # sent to a process that's already exiting. + if already_dead_event.is_set(): + return + + # DieIfSubProcessDies runs in a background thread, raising an exception + # will just kill the thread while what we want is to fail the whole + # process. + print("Subprocess %s died unexpectedly. Killing main process..." % + p.pid) + for kp in processes: + try: + os.kill(kp.pid, signal.SIGTERM) + except OSError: + pass + # sys.exit only exits a thread when called from a thread. + # Killing self with SIGTERM to ensure the process runs necessary + # cleanups before exiting. + os.kill(os.getpid(), signal.SIGTERM) + time.sleep(_PROCESS_CHECK_INTERVAL) + + +def DieIfSubProcessDies( + processes): + """Kills the process if any of given processes dies. + + This function is supposed to run in a background thread and monitor provided + processes to ensure they don't die silently. + + Args: + processes: An iterable with multiprocessing.Process instances. + + Returns: + Background thread started to monitor the processes. + """ + already_dead_event = threading.Event() + t = threading.Thread( + target=_DieIfSubProcessDies, args=(processes, already_dead_event)) + t.daemon = True + t.start() + + def PreventDoubleDeath(): + already_dead_event.set() + + atexit.register(PreventDoubleDeath) + + return t diff --git a/grr/test/grr_response_test/lib/self_contained_config_writer.py b/grr/test/grr_response_test/lib/self_contained_config_writer.py index 57534701a..49ffa4385 100644 --- a/grr/test/grr_response_test/lib/self_contained_config_writer.py +++ b/grr/test/grr_response_test/lib/self_contained_config_writer.py @@ -37,6 +37,12 @@ flags.DEFINE_string("config_logging_path", None, "Base logging path for server components to use.") +flags.DEFINE_string( + name="config_osquery_path", + default="", + help="A path to the osquery executable.", +) + def main(argv): del argv # Unused. @@ -79,6 +85,9 @@ def main(argv): config.CONFIG.Set("Logging.path", flags.FLAGS.config_logging_path) config.CONFIG.Set("Logging.verbose", False) + if flags.FLAGS.config_osquery_path: + config.CONFIG.Set("Osquery.path", flags.FLAGS.config_osquery_path) + config_updater_keys_util.GenerateKeys(config.CONFIG) config.CONFIG.Write() diff --git a/grr/test/grr_response_test/run_end_to_end_tests.py b/grr/test/grr_response_test/run_end_to_end_tests.py index 991292a99..ca3073ce0 100644 --- a/grr/test/grr_response_test/run_end_to_end_tests.py +++ b/grr/test/grr_response_test/run_end_to_end_tests.py @@ -41,6 +41,12 @@ "(Optional) comma-separated list of tests to skip. Trumps " "--whitelisted_tests if there are any conflicts.") +flags.DEFINE_list( + name="manual_tests", + default=[], + help="(optional) A comma-separated list of manual tests to run.", +) + # We use a logging Filter to exclude noisy unwanted log output. flags.DEFINE_list("filenames_excluded_from_log", ["connectionpool.py"], "Files whose log messages won't get printed.") @@ -79,6 +85,7 @@ def main(argv): api_password=flags.FLAGS.api_password, whitelisted_tests=flags.FLAGS.whitelisted_tests, blacklisted_tests=flags.FLAGS.blacklisted_tests, + manual_tests=flags.FLAGS.manual_tests, upload_test_binaries=flags.FLAGS.upload_test_binaries) test_runner.Initialize() diff --git a/grr/test/grr_response_test/run_self_contained.py b/grr/test/grr_response_test/run_self_contained.py index a088216c6..379ee61f1 100644 --- a/grr/test/grr_response_test/run_self_contained.py +++ b/grr/test/grr_response_test/run_self_contained.py @@ -1,43 +1,15 @@ #!/usr/bin/env python +# Lint as: python3 """Helper script for running end-to-end tests.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals -import atexit -import os -import signal -import socket -import subprocess import sys -import tempfile -import threading -import time from absl import app from absl import flags -from future.builtins import str -import portpicker import psutil -import requests -from grr_api_client import api -from grr_response_core import config -from grr_response_core.lib import config_lib -from grr_response_core.lib import package - - -class Error(Exception): - """Module-specific base error class.""" - - -class TCPPortTimeout(Error): - """Raised when a TCP port didn't open in time.""" - - -class ClientEnrollmentTimeout(Error): - """Raised when a client does not enroll in time.""" +from grr_response_test.lib import api_helpers +from grr_response_test.lib import self_contained_components flags.DEFINE_list( @@ -61,195 +33,11 @@ class ClientEnrollmentTimeout(Error): flags.DEFINE_string("logging_path", None, "Base logging path for server components to use.") - -def GetServerComponentArgs(config_path): - """Returns a set of command line arguments for server components. - - Args: - config_path: Path to a config path generated by - self_contained_config_writer. - - Returns: - An iterable with command line arguments to use. - """ - - primary_config_path = package.ResourcePath( - "grr-response-core", "install_data/etc/grr-server.yaml") - secondary_config_path = package.ResourcePath( - "grr-response-test", "grr_response_test/test_data/grr_test.yaml") - return [ - "--config", - primary_config_path, - "--secondary_configs", - ",".join([secondary_config_path, config_path]), - "-p", - "Monitoring.http_port=%d" % portpicker.pick_unused_port(), - "-p", - "AdminUI.webauth_manager=NullWebAuthManager", - ] - - -def StartComponent(main_package, args): - """Starts a new process with a given component. - - This starts a Python interpreter with a "-m" argument followed by - the main package name, thus effectively executing the main() - function of a given package. - - Args: - main_package: Main package path. - args: An iterable with program arguments (not containing the program - executable). - - Returns: - Popen object corresponding to a started process. - """ - popen_args = [sys.executable, "-m", main_package] + args - print("Starting %s component: %s" % (main_package, " ".join(popen_args))) - process = subprocess.Popen(popen_args) - print("Component %s pid: %d" % (main_package, process.pid)) - return process - - -_PROCESS_CHECK_INTERVAL = 0.1 - - -def DieIfSubProcessDies(processes): - """Kills the process if any of given processes dies. - - This function is supposed to run in a background thread and monitor provided - processes to ensure they don't die silently. - - Args: - processes: An iterable with multiprocessing.Process instances. - """ - while True: - for p in processes: - if p.poll() not in [None, 0]: - # DieIfSubProcessDies runs in a background thread, raising an exception - # will just kill the thread while what we want is to fail the whole - # process. - print("Subprocess %s died unexpectedly. Killing main process..." % - p.pid) - for kp in processes: - try: - os.kill(kp.pid, signal.SIGTERM) - except OSError: - pass - # sys.exit only exits a thread when called from a thread. - # Killing self with SIGTERM to ensure the process runs necessary - # cleanups before exiting. - os.kill(os.getpid(), signal.SIGTERM) - time.sleep(_PROCESS_CHECK_INTERVAL) - - -_TCP_PORT_WAIT_TIMEOUT_SECS = 15 - - -def WaitForTCPPort(port): - """Waits for a given local TCP port to open. - - If the port in question does not open within ~10 seconds, main process gets - killed. - - Args: - port: An integer identifying the port. - - Raises: - TCPPortTimeout: if the port doesn't open. - """ - start_time = time.time() - while time.time() - start_time < _TCP_PORT_WAIT_TIMEOUT_SECS: - try: - sock = socket.create_connection(("localhost", port)) - sock.close() - return - except socket.error: - pass - time.sleep(_PROCESS_CHECK_INTERVAL) - - raise TCPPortTimeout("TCP port %d didn't open." % port) - - -_CLIENT_ENROLLMENT_WAIT_TIMEOUT_SECS = 15 -_CLIENT_ENROLLMENT_CHECK_INTERVAL = 1 - - -def WaitForClientToEnroll(admin_ui_port): - """Waits for an already started client to enroll. - - If the client doesn't enroll within ~100 seconds, main process gets killed. - - Args: - admin_ui_port: AdminUI port to be used with API client library to check for - an enrolled client. - - Returns: - A string with an enrolled client's client id. - - Raises: - ClientEnrollmentTimeout: if the client fails to enroll in time. - """ - api_endpoint = "http://localhost:%d" % admin_ui_port - - start_time = time.time() - while time.time() - start_time < _CLIENT_ENROLLMENT_WAIT_TIMEOUT_SECS * 10: - try: - api_client = api.InitHttp(api_endpoint=api_endpoint) - clients = list(api_client.SearchClients(query=".")) - except requests.exceptions.ConnectionError: - # print("Connection error (%s), waiting..." % api_endpoint) - time.sleep(_CLIENT_ENROLLMENT_CHECK_INTERVAL) - continue - - if clients: - return clients[0].client_id - - print("No clients enrolled, waiting...") - time.sleep(_CLIENT_ENROLLMENT_CHECK_INTERVAL) - - raise ClientEnrollmentTimeout("Client didn't enroll.") - - -def KillClient(admin_ui_port, client_id): - """Kills a given client.""" - - api_endpoint = "http://localhost:%d" % admin_ui_port - api_client = api.InitHttp(api_endpoint=api_endpoint) - - f = api_client.Client(client_id).CreateFlow("Kill") - f.WaitUntilDone(timeout=60) - - -def GetRunEndToEndTestsArgs(client_id, server_config): - """Returns arguments needed to configure run_end_to_end_tests process. - - Args: - client_id: String with a client id pointing to an already running client. - server_config: GRR configuration object with a server configuration. - - Returns: - An iterable with command line arguments. - """ - api_endpoint = "http://localhost:%d" % server_config["AdminUI.port"] - args = [ - "--api_endpoint", - api_endpoint, - "--api_user", - "admin", - "--api_password", - "admin", - "--client_id", - client_id, - "--ignore_test_context", - "True", - ] - if flags.FLAGS.tests: - args += ["--whitelisted_tests", ",".join(flags.FLAGS.tests)] - if flags.FLAGS.manual_tests: - args += ["--manual_tests", ",".join(flags.FLAGS.manual_tests)] - - return args +flags.DEFINE_string( + name="osquery_path", + default="", + help="A path to the osquery executable.", +) def main(argv): @@ -258,81 +46,30 @@ def main(argv): if flags.FLAGS.mysql_username is None: raise ValueError("--mysql_username has to be specified.") - # Create 2 temporary files to contain server and client configuration files - # that we're about to generate. - # - # TODO(user): migrate to TempFilePath as soon grr.test_lib is moved to - # grr_response_test. - fd, built_server_config_path = tempfile.mkstemp(".yaml") - os.close(fd) - print("Using temp server config path: %s" % built_server_config_path) - fd, built_client_config_path = tempfile.mkstemp(".yaml") - os.close(fd) - print("Using temp client config path: %s" % built_client_config_path) - - def CleanUpConfigs(): - os.remove(built_server_config_path) - os.remove(built_client_config_path) - - atexit.register(CleanUpConfigs) - # Generate server and client configs. - config_writer_flags = [ - "--dest_server_config_path", - built_server_config_path, - "--dest_client_config_path", - built_client_config_path, - "--config_mysql_database", + server_conf_path, client_conf_path = self_contained_components.InitConfigs( flags.FLAGS.mysql_database, - ] - - if flags.FLAGS.mysql_username is not None: - config_writer_flags.extend( - ["--config_mysql_username", flags.FLAGS.mysql_username]) - - if flags.FLAGS.mysql_password is not None: - config_writer_flags.extend( - ["--config_mysql_password", flags.FLAGS.mysql_password]) - - if flags.FLAGS.logging_path is not None: - config_writer_flags.extend( - ["--config_logging_path", flags.FLAGS.logging_path]) - - p = StartComponent( - "grr_response_test.lib.self_contained_config_writer", - config_writer_flags) - if p.wait() != 0: - raise RuntimeError("ConfigWriter execution failed: {}".format(p.returncode)) - - server_config = config_lib.LoadConfig(config.CONFIG.MakeNewConfig(), - built_server_config_path) - - # Start the client. - preliminary_client_p = StartComponent( - "grr_response_client.client", - ["--config", built_client_config_path]) + mysql_username=flags.FLAGS.mysql_username, + mysql_password=flags.FLAGS.mysql_password, + logging_path=flags.FLAGS.logging_path, + osquery_path=flags.FLAGS.osquery_path) # Start all remaining server components. - server_processes = [ - StartComponent( - "grr_response_server.gui.admin_ui", - GetServerComponentArgs(built_server_config_path)), - StartComponent( - "grr_response_server.bin.frontend", - GetServerComponentArgs(built_server_config_path)), - StartComponent( - "grr_response_server.bin.worker", - GetServerComponentArgs(built_server_config_path)), - ] - # Start a background thread that kills the main process if one of the # server subprocesses dies. - t = threading.Thread(target=DieIfSubProcessDies, args=(server_processes,)) - t.daemon = True - t.start() + server_processes = self_contained_components.StartServerProcesses( + server_conf_path) + self_contained_components.DieIfSubProcessDies(server_processes) + + api_port = api_helpers.GetAdminUIPortFromConfig(server_conf_path) + grrapi = api_helpers.WaitForAPIEndpoint(api_port) + + # Start the client. + preliminary_client_p = self_contained_components.StartClientProcess( + client_conf_path) # Wait for the client to enroll and get its id. - client_id = WaitForClientToEnroll(server_config["AdminUI.port"]) + client_id = api_helpers.WaitForClientToEnroll(grrapi) print("Found client id: %s" % client_id) # Python doesn't guarantee the process name of processes started by the Python @@ -341,31 +78,25 @@ def CleanUpConfigs(): # let's get the name via psutil, kill the client and set the # Config.binary_name explicitly. client_binary_name = str(psutil.Process(preliminary_client_p.pid).name()) - KillClient(server_config["AdminUI.port"], client_id) + api_helpers.KillClient(grrapi, client_id) preliminary_client_p.wait() print("Starting the client with Client.binary_name=%s" % client_binary_name) - client_p = StartComponent( - "grr_response_client.client", [ - "--config", built_client_config_path, "-p", - "Client.binary_name=%s" % client_binary_name - ]) - + client_p = self_contained_components.StartClientProcess( + client_conf_path, {"Client.binary_name": client_binary_name}) # Start a background thread that kills the main process if # client subprocess dies. - t = threading.Thread(target=DieIfSubProcessDies, args=([client_p],)) - t.daemon = True - t.start() + self_contained_components.DieIfSubProcessDies([client_p]) # Run the test suite against the enrolled client. - p = StartComponent( - "grr_response_test.run_end_to_end_tests", - GetServerComponentArgs(built_server_config_path) + - GetRunEndToEndTestsArgs(client_id, server_config)) - if p.wait() != 0: - raise RuntimeError("RunEndToEndTests execution failed.") + self_contained_components.RunEndToEndTests( + client_id, + server_conf_path, + tests=flags.FLAGS.tests, + manual_tests=flags.FLAGS.manual_tests) print("RunEndToEndTests execution succeeded.") + sys.exit(0) if __name__ == "__main__": diff --git a/grr/test/grr_response_test/run_self_update_test.py b/grr/test/grr_response_test/run_self_update_test.py new file mode 100644 index 000000000..f73e639f6 --- /dev/null +++ b/grr/test/grr_response_test/run_self_update_test.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# Lint as: python3 +"""Helper script for running end-to-end tests.""" + +import platform +import subprocess +import sys + +from absl import app +from absl import flags + +import distro + +from grr_api_client import errors +from grr_response_test.lib import api_helpers +from grr_response_test.lib import self_contained_components + +flags.DEFINE_string("mysql_database", "grr_test_db", + "MySQL database name to use.") + +flags.DEFINE_string("mysql_username", None, "MySQL username to use.") + +flags.DEFINE_string("mysql_password", None, "MySQL password to use.") + +flags.DEFINE_string("logging_path", None, + "Base logging path for server components to use.") + +_HIGHEST_VERSION_INI = """ +[Version] +major = 9 +minor = 9 +revision = 9 +release = 9 +packageversion = %(major)s.%(minor)s.%(revision)spost%(release)s +packagedepends = %(packageversion)s +""" + + +def main(argv): + del argv # Unused. + + if flags.FLAGS.mysql_username is None: + raise ValueError("--mysql_username has to be specified.") + + # Generate server and client configs. + server_conf_path, _ = self_contained_components.InitConfigs( + flags.FLAGS.mysql_database, + mysql_username=flags.FLAGS.mysql_username, + mysql_password=flags.FLAGS.mysql_password, + logging_path=flags.FLAGS.logging_path) + + print("Building the template.") + template_path = self_contained_components.RunBuildTemplate( + server_conf_path, component_options={"Logging.verbose": True}) + + print("Repack %s." % template_path) + installer_path = self_contained_components.RunRepackTemplate( + server_conf_path, template_path) + + version_overrides = { + "Source.version_major": 9, + "Source.version_minor": 9, + "Source.version_revision": 9, + "Source.version_release": 9, + "Source.version_string": "9.9.9.9", + "Source.version_numeric": 9999, + "Template.version_major": 9, + "Template.version_minor": 9, + "Template.version_revision": 9, + "Template.version_release": 9, + "Template.version_string": "9.9.9.9", + "Template.version_numeric": 9999, + } + + print("Building next ver. template.") + next_ver_template_path = self_contained_components.RunBuildTemplate( + server_conf_path, + component_options=version_overrides, + version_ini=_HIGHEST_VERSION_INI) + + print("Repack next ver. %s." % template_path) + next_ver_installer_path = self_contained_components.RunRepackTemplate( + server_conf_path, + next_ver_template_path, + component_options=version_overrides) + + print("First installer ready: %s. Next ver. installer ready: %s." % + (installer_path, next_ver_installer_path)) + + print("Starting the server.") + # Start all remaining server components. + # Start a background thread that kills the main process if one of the + # server subprocesses dies. + server_processes = self_contained_components.StartServerProcesses( + server_conf_path) + self_contained_components.DieIfSubProcessDies(server_processes) + + api_port = api_helpers.GetAdminUIPortFromConfig(server_conf_path) + grrapi = api_helpers.WaitForAPIEndpoint(api_port) + + print("Installing the client.") + system = platform.system().lower() + if system == "linux": + distro_id = distro.id() + if distro_id in ["ubuntu", "debian"]: + subprocess.check_call( + ["apt", "install", "--reinstall", "-y", installer_path]) + elif distro_id in ["centos", "rhel", "fedora"]: + subprocess.check_call(["rpm", "-Uvh", installer_path]) + else: + raise RuntimeError("Unsupported linux distro: %s" % distro_id) + elif system == "windows": + subprocess.check_call([installer_path]) + elif system == "darwin": + subprocess.check_call(["installer", "-pkg", installer_path, "-target", "/"]) + else: + raise RuntimeError("Unsupported platform for self-update tests: %s" % + system) + + # Wait for the client to enroll and get its id. + client_id = api_helpers.WaitForClientToEnroll(grrapi) + print("Found client id: %s" % client_id) + + print("Waiting for the client to report the initial version.") + prev_version = api_helpers.WaitForClientVersionGreaterThan( + grrapi.Client(client_id), 0) + + binary_id = self_contained_components.RunUploadExe(server_conf_path, + next_ver_installer_path, + system) + + args = grrapi.types.CreateFlowArgs(flow_name="UpdateClient") + args.binary_path = binary_id + f = grrapi.Client(client_id).CreateFlow(name="UpdateClient", args=args) + try: + f.WaitUntilDone(timeout=60) + print("Update flow finished successfully. This should never happen: " + "the client should have been restarted.") + sys.exit(-1) + except errors.PollTimeoutError: + print("Update flow timed out. This shouldn't happen: the flow should " + "fail explicitly due to a client restart.") + sys.exit(-1) + except errors.FlowFailedError: + print("Update flow failed (expected behavior, as the client got " + "restarted).") + + print("Update flow details:") + print(f.Get().data) + + print("Waiting for the client to report the updated version.") + api_helpers.WaitForClientVersionGreaterThan( + grrapi.Client(client_id), prev_version) + + print("Self-update test successful!") + + sys.exit(0) + + +if __name__ == "__main__": + app.run(main) diff --git a/travis/install.sh b/travis/install.sh index 5cf8c864d..e2cf23db1 100755 --- a/travis/install.sh +++ b/travis/install.sh @@ -7,9 +7,6 @@ set -ex source "${HOME}/INSTALL/bin/activate" pip install --upgrade pip wheel six setuptools nodeenv -# Skipping the driver build, since we expect the driver to be shipped separately from GRR. -pip install https://github.com/chipsec/chipsec/archive/6e35771cea2640e6367bd44c6f624f8292c58320.zip --install-option="--skip-driver" - # Install the latest version of nodejs. Some packages # may not be compatible with the version. nodeenv -p --prebuilt --node=12.11.1 diff --git a/travis/install_client_builder.sh b/travis/install_client_builder.sh index 26e3a7dd8..bd1931943 100755 --- a/travis/install_client_builder.sh +++ b/travis/install_client_builder.sh @@ -8,9 +8,6 @@ set -e source "${HOME}/INSTALL/bin/activate" pip install --upgrade pip wheel six setuptools -# Skipping the driver build, since we expect the driver to be shipped separately from GRR. -pip install https://github.com/chipsec/chipsec/archive/6e35771cea2640e6367bd44c6f624f8292c58320.zip --install-option="--skip-driver" - # Get around a Travis bug: https://github.com/travis-ci/travis-ci/issues/8315#issuecomment-327951718 unset _JAVA_OPTIONS diff --git a/travis/upload_build_results_to_gcs.py b/travis/upload_build_results_to_gcs.py index cf16e4dab..3840ff99e 100644 --- a/travis/upload_build_results_to_gcs.py +++ b/travis/upload_build_results_to_gcs.py @@ -131,13 +131,16 @@ def _UploadBuildResults(gcs_bucket, gcs_build_results_dir): os.environ[_GCS_BUCKET], gcs_build_results_dir) for build_result in os.listdir(flags.FLAGS.build_results_dir): - if not os.path.isfile( - os.path.join(flags.FLAGS.build_results_dir, build_result)): + path = os.path.join(flags.FLAGS.build_results_dir, build_result) + if not os.path.isfile(path): + logging.info("Skipping %s as it's not a file.", path) continue + logging.info("Uploading: %s", path) gcs_blob = gcs_bucket.blob("{}/{}".format(gcs_build_results_dir, build_result)) - gcs_blob.upload_from_filename( - os.path.join(flags.FLAGS.build_results_dir, build_result)) + gcs_blob.upload_from_filename(path) + + logging.info("GCS upload done.") def _TriggerAppveyorBuild(project_slug_var_name): @@ -173,6 +176,8 @@ def _TriggerAppveyorBuild(project_slug_var_name): def _UpdateLatestServerDebDirectory(gcs_bucket, gcs_build_results_dir): """Updates the '_latest_server_deb' GCS directory with the latest results.""" + logging.info("Updating latest server deb directory.") + old_build_results = list( gcs_bucket.list_blobs(prefix=_LATEST_SERVER_DEB_GCS_DIR)) new_build_results = list(gcs_bucket.list_blobs(prefix=gcs_build_results_dir)) @@ -181,12 +186,15 @@ def _UpdateLatestServerDebDirectory(gcs_bucket, "Failed to find build results for the server-deb Travis job.") for gcs_blob in old_build_results: + logging.info("Deleting previous blob: %s", gcs_blob) gcs_blob.delete() for gcs_blob in new_build_results: build_result_filename = gcs_blob.name.split("/")[-1] latest_build_result_path = "{}/{}".format(_LATEST_SERVER_DEB_GCS_DIR, build_result_filename) + logging.info("Copying blob %s (%s) -> %s", gcs_blob, gcs_bucket, + latest_build_result_path) gcs_bucket.copy_blob( gcs_blob, gcs_bucket, new_name=latest_build_result_path) diff --git a/version.ini b/version.ini index c52735ae1..261b6bd70 100644 --- a/version.ini +++ b/version.ini @@ -3,7 +3,7 @@ major = 3 minor = 4 revision = 0 -release = 0 +release = 1 packageversion = %(major)s.%(minor)s.%(revision)spost%(release)s packagedepends = %(packageversion)s