Skip to content

Commit

Permalink
Merge branch 'main' into deps-upgrade-grpc
Browse files Browse the repository at this point in the history
  • Loading branch information
sergiitk authored Feb 16, 2024
2 parents 55a01b4 + 8175a85 commit edbe2f9
Show file tree
Hide file tree
Showing 4 changed files with 160 additions and 54 deletions.
97 changes: 75 additions & 22 deletions framework/test_cases/base_testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base test case used for xds test suites."""

from typing import Optional
import inspect
import traceback
from typing import Optional, Union
import unittest

from absl import logging
from absl.testing import absltest


class BaseTestCase(absltest.TestCase):
# @override
def run(self, result: Optional[unittest.TestResult] = None) -> None:
super().run(result)
# TODO(sergiitk): should this method be returning result? See
Expand All @@ -44,10 +46,8 @@ def run(self, result: Optional[unittest.TestResult] = None) -> None:
self.test_name,
f" | Errors count: {total_errors}" if total_errors > 1 else "",
)
if test_errors:
self._print_error_list(test_errors, is_unexpected_error=True)
if test_failures:
self._print_error_list(test_failures)
self._log_test_errors(test_errors, is_unexpected=True)
self._log_test_errors(test_failures)
elif test_unexpected_successes:
logging.error(
"----- PSM Test Case UNEXPECTEDLY SUCCEEDED: %s -----\n",
Expand Down Expand Up @@ -85,22 +85,75 @@ def test_name(self) -> str:
"""
return self.id().removeprefix("__main__.").split(" ", 1)[0]

def _print_error_list(
self, errors: list[str], is_unexpected_error: bool = False
def _log_test_errors(self, errors: list[str], is_unexpected: bool = False):
for err in errors:
self._log_framed_test_failure(self.test_name, err, is_unexpected)

@classmethod
def _log_class_hook_failure(cls, error: Exception):
"""
Log error helper for failed unittest hooks, e.g. setUpClass.
Normally we don't want to make external calls in setUpClass.
But when we do, we want to wrap them into try/except, and call
_log_class_hook_failure, so the error is logged in our standard format.
Don't forget to re-raise!
Example:
@classmethod
def setUpClass(cls):
try:
# Making bad external calls that end up raising
raise OSError("Network bad!")
except Exception as error: # noqa pylint: disable=broad-except
cls._log_class_hook_failure(error)
raise
"""
caller: str
try:
caller_info: inspect.FrameInfo = inspect.stack()[1]
caller: str = caller_info.function
except (IndexError, AttributeError):
caller = "undefined_hook"

fake_test_id = f"{cls.__name__}.{caller}"
# The same test name transformation as in self.test_name().
# TODO(sergiitk): move the transformation to a classmethod.
test_name = fake_test_id.removeprefix("__main__.").split(" ", 1)[0]
logging.error("----- PSM Test Case FAILED: %s -----", test_name)
cls._log_framed_test_failure(test_name, error, is_unexpected=True)

@classmethod
def _log_framed_test_failure(
cls,
test_name: str,
error: Union[str, Exception],
is_unexpected: bool = False,
) -> None:
trace: str
if isinstance(error, Exception):
trace = cls._format_error_with_trace(error)
else:
trace = error

# FAILURE is an errors explicitly signalled using one of the
# TestCase.assert*() methods, while ERROR means an unexpected exception.
fail_type: str = "ERROR" if is_unexpected_error else "FAILURE"
for err in errors:
logging.error(
"(%(fail_type)s) PSM Interop Test Failed: %(test_id)s"
"\n^^^^^"
"\n[%(test_id)s] PSM Failed Test Traceback BEGIN"
"\n%(error)s"
"[%(test_id)s] PSM Failed Test Traceback END\n",
{
"test_id": self.test_name,
"fail_type": fail_type,
"error": err,
},
)
fail_type: str = "ERROR" if is_unexpected else "FAILURE"
logging.error(
"(%(fail_type)s) PSM Interop Test Failed: %(test_id)s"
"\n^^^^^"
"\n[%(test_id)s] PSM Failed Test Traceback BEGIN"
"\n%(error)s"
"[%(test_id)s] PSM Failed Test Traceback END\n",
{
"test_id": test_name,
"fail_type": fail_type,
"error": trace,
},
)

@classmethod
def _format_error_with_trace(cls, error: Exception) -> str:
return "".join(
traceback.TracebackException.from_exception(error).format()
)
48 changes: 27 additions & 21 deletions framework/xds_url_map_testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,27 +380,33 @@ def setUpClass(cls):
# whether setUpClass failed.
cls.addClassCleanup(cls.cleanupAfterTests)

if not cls.started_test_cases:
# Create the GCP resource once before the first test start
GcpResourceManager().setup(cls.test_case_classes)
cls.started_test_cases.add(cls.__name__)

# Create the test case's own client runner with it's own namespace,
# enables concurrent running with other test cases.
cls.test_client_runner = (
GcpResourceManager().create_test_client_runner()
)
# Start the client, and allow the test to override the initial RPC config.
rpc, metadata = cls.client_init_config(
rpc="UnaryCall,EmptyCall", metadata=""
)
cls.test_client = cls.test_client_runner.run(
server_target=f"xds:///{cls.hostname()}",
rpc=rpc,
metadata=metadata,
qps=QPS.value,
print_response=True,
)
# Normally we don't want to make external calls in setUpClass.
try:
if not cls.started_test_cases:
# Create the GCP resource once before the first test start
GcpResourceManager().setup(cls.test_case_classes)
cls.started_test_cases.add(cls.__name__)

# Create the test case's own client runner with it's own namespace,
# enables concurrent running with other test cases.
cls.test_client_runner = (
GcpResourceManager().create_test_client_runner()
)
# Start the client, and allow the test to override the initial
# RPC config.
rpc, metadata = cls.client_init_config(
rpc="UnaryCall,EmptyCall", metadata=""
)
cls.test_client = cls.test_client_runner.run(
server_target=f"xds:///{cls.hostname()}",
rpc=rpc,
metadata=metadata,
qps=QPS.value,
print_response=True,
)
except Exception as error: # noqa pylint: disable=broad-except
cls._log_class_hook_failure(error)
raise

@classmethod
def cleanupAfterTests(cls):
Expand Down
19 changes: 19 additions & 0 deletions tests/fake_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,5 +80,24 @@ def test_even(self):
self.fail(f"Integer {num} is odd")


class FakeSetupClassTest(xds_k8s_testcase.XdsKubernetesBaseTestCase):
"""A fake class to debug BaseTestCase logs produced by setupClassError.
See FakeTest for notes on provisioning.
"""

@classmethod
def setUpClass(cls):
try:
# Making bad external calls that end up raising
raise OSError("Network bad!")
except Exception as error: # noqa pylint: disable=broad-except
cls._log_class_hook_failure(error)
raise

def test_should_never_run(self):
self.fail("IF YOU SEE ME, SOMETHING IS WRONG!")


if __name__ == "__main__":
absltest.main()
50 changes: 39 additions & 11 deletions tests/gamma/csm_observability_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@

GammaServerRunner = gamma_server_runner.GammaServerRunner
KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
BuildQueryFn = Callable[[str], str]
BuildQueryFn = Callable[[str, str], str]
ANY = unittest.mock.ANY


Expand Down Expand Up @@ -201,13 +201,36 @@ def test_csm_observability(self):
start_time={"seconds": start_secs},
end_time={"seconds": end_secs},
)
histogram_results = self.query_metrics(
HISTOGRAM_METRICS, self.build_histogram_query, interval
server_histogram_results = self.query_metrics(
HISTOGRAM_SERVER_METRICS,
self.build_histogram_query,
self.server_namespace,
interval,
)
counter_results = self.query_metrics(
COUNTER_METRICS, self.build_counter_query, interval
client_histogram_results = self.query_metrics(
HISTOGRAM_CLIENT_METRICS,
self.build_histogram_query,
self.client_namespace,
interval,
)
all_results = {**histogram_results, **counter_results}
server_counter_results = self.query_metrics(
COUNTER_SERVER_METRICS,
self.build_counter_query,
self.server_namespace,
interval,
)
client_counter_results = self.query_metrics(
COUNTER_CLIENT_METRICS,
self.build_counter_query,
self.client_namespace,
interval,
)
all_results = {
**server_histogram_results,
**client_histogram_results,
**server_counter_results,
**client_counter_results,
}
self.assertNotEmpty(all_results, msg="No query metrics results")

with self.subTest("5_check_metrics_time_series"):
Expand Down Expand Up @@ -362,7 +385,7 @@ def test_csm_observability(self):
)

@classmethod
def build_histogram_query(cls, metric_type: str) -> str:
def build_histogram_query(cls, metric_type: str, namespace: str) -> str:
#
# The list_time_series API requires us to query one metric
# at a time.
Expand All @@ -375,25 +398,30 @@ def build_histogram_query(cls, metric_type: str) -> str:
# The 'grpc_method' filter condition is needed because the
# server metrics are also serving on the Channelz requests.
#
# The 'resource.labels.namespace' filter condition allows us to
# filter metrics just for the current test run.
return (
f'metric.type = "{metric_type}" AND '
'metric.labels.grpc_status = "OK" AND '
f'metric.labels.grpc_method = "{GRPC_METHOD_NAME}"'
f'metric.labels.grpc_method = "{GRPC_METHOD_NAME}" AND '
f'resource.labels.namespace = "{namespace}"'
)

@classmethod
def build_counter_query(cls, metric_type: str) -> str:
def build_counter_query(cls, metric_type: str, namespace: str) -> str:
# For these num rpcs started counter metrics, they do not have the
# 'grpc_status' label
return (
f'metric.type = "{metric_type}" AND '
f'metric.labels.grpc_method = "{GRPC_METHOD_NAME}"'
f'metric.labels.grpc_method = "{GRPC_METHOD_NAME}" AND '
f'resource.labels.namespace = "{namespace}"'
)

def query_metrics(
self,
metric_names: Iterable[str],
build_query_fn: BuildQueryFn,
namespace: str,
interval: monitoring_v3.TimeInterval,
) -> dict[str, MetricTimeSeries]:
"""
Expand Down Expand Up @@ -424,7 +452,7 @@ def query_metrics(
logger.info("Requesting list_time_series for metric %s", metric)
response = self.metric_client.list_time_series(
name=f"projects/{self.project}",
filter=build_query_fn(metric),
filter=build_query_fn(metric, namespace),
interval=interval,
view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
retry=retry_settings,
Expand Down

0 comments on commit edbe2f9

Please sign in to comment.