From 74b3bbf9d949e2f2225d4100976baf20098b5e7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 10:00:41 +0000 Subject: [PATCH 1/9] build(deps): bump actions/create-github-app-token from 1.11.3 to 1.11.5 (#4059) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1.11.3 to 1.11.5.
Release notes

Sourced from actions/create-github-app-token's releases.

v1.11.5

1.11.5 (2025-02-15)

Bug Fixes

v1.11.4

1.11.4 (2025-02-15)

Bug Fixes

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/create-github-app-token&package-manager=github_actions&previous-version=1.11.3&new-version=1.11.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ivana Kellyer --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ae9ae279c7..4d8c060f6a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Get auth token id: token - uses: actions/create-github-app-token@67e27a7eb7db372a1c61a7f9bdab8699e9ee57f7 # v1.11.3 + uses: actions/create-github-app-token@0d564482f06ca65fa9e77e2510873638c82206f2 # v1.11.5 with: app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }} private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }} From a5ce968d6542bdd486ab99ce00d756723d804cdc Mon Sep 17 00:00:00 2001 From: Tony Xiao Date: Tue, 18 Feb 2025 11:05:39 -0500 Subject: [PATCH 2/9] feat(profiling): Add new functions to start/stop continuous profiler (#4056) The `start_profiler` and `stop_profiler` functions were renamed to `start_profile_session` and `stop_profile_session` respectively. --- sentry_sdk/profiler/continuous_profiler.py | 14 ++++ tests/profiler/test_continuous_profiler.py | 86 ++++++++++++++++++++-- 2 files changed, 92 insertions(+), 8 deletions(-) diff --git a/sentry_sdk/profiler/continuous_profiler.py b/sentry_sdk/profiler/continuous_profiler.py index 1619925bd2..9e2aa35fc1 100644 --- a/sentry_sdk/profiler/continuous_profiler.py +++ b/sentry_sdk/profiler/continuous_profiler.py @@ -145,6 +145,13 @@ def try_profile_lifecycle_trace_start(): def start_profiler(): # type: () -> None + + # TODO: deprecate this as it'll be replaced by `start_profile_session` + start_profile_session() + + +def start_profile_session(): + # type: () -> None if _scheduler is None: return @@ -153,6 +160,13 @@ def start_profiler(): def stop_profiler(): # type: () -> None + + # TODO: deprecate this as it'll be replaced by `stop_profile_session` + stop_profile_session() + + +def stop_profile_session(): + # type: () -> None if _scheduler is None: return diff --git a/tests/profiler/test_continuous_profiler.py b/tests/profiler/test_continuous_profiler.py index 525616c9a8..78335d7b87 100644 --- a/tests/profiler/test_continuous_profiler.py +++ b/tests/profiler/test_continuous_profiler.py @@ -11,7 +11,9 @@ get_profiler_id, setup_continuous_profiler, start_profiler, + start_profile_session, stop_profiler, + stop_profile_session, ) from tests.conftest import ApproxDict @@ -207,6 +209,21 @@ def assert_single_transaction_without_profile_chunks(envelopes): pytest.param("gevent", marks=requires_gevent), ], ) +@pytest.mark.parametrize( + ["start_profiler_func", "stop_profiler_func"], + [ + pytest.param( + start_profile_session, + stop_profile_session, + id="start_profile_session/stop_profile_session", + ), + pytest.param( + start_profiler, + stop_profiler, + id="start_profiler/stop_profiler (deprecated)", + ), + ], +) @pytest.mark.parametrize( "make_options", [ @@ -219,6 +236,8 @@ def test_continuous_profiler_auto_start_and_manual_stop( sentry_init, capture_envelopes, mode, + start_profiler_func, + stop_profiler_func, make_options, teardown_profiling, ): @@ -239,7 +258,7 @@ def test_continuous_profiler_auto_start_and_manual_stop( assert_single_transaction_with_profile_chunks(envelopes, thread) for _ in range(3): - stop_profiler() + stop_profiler_func() envelopes.clear() @@ -249,7 +268,7 @@ def test_continuous_profiler_auto_start_and_manual_stop( assert_single_transaction_without_profile_chunks(envelopes) - start_profiler() + start_profiler_func() envelopes.clear() @@ -267,6 +286,21 @@ def test_continuous_profiler_auto_start_and_manual_stop( pytest.param("gevent", marks=requires_gevent), ], ) +@pytest.mark.parametrize( + ["start_profiler_func", "stop_profiler_func"], + [ + pytest.param( + start_profile_session, + stop_profile_session, + id="start_profile_session/stop_profile_session", + ), + pytest.param( + start_profiler, + stop_profiler, + id="start_profiler/stop_profiler (deprecated)", + ), + ], +) @pytest.mark.parametrize( "make_options", [ @@ -279,6 +313,8 @@ def test_continuous_profiler_manual_start_and_stop_sampled( sentry_init, capture_envelopes, mode, + start_profiler_func, + stop_profiler_func, make_options, teardown_profiling, ): @@ -295,7 +331,7 @@ def test_continuous_profiler_manual_start_and_stop_sampled( thread = threading.current_thread() for _ in range(3): - start_profiler() + start_profiler_func() envelopes.clear() @@ -309,7 +345,7 @@ def test_continuous_profiler_manual_start_and_stop_sampled( assert get_profiler_id() is not None, "profiler should be running" - stop_profiler() + stop_profiler_func() # the profiler stops immediately in manual mode assert get_profiler_id() is None, "profiler should not be running" @@ -332,6 +368,21 @@ def test_continuous_profiler_manual_start_and_stop_sampled( pytest.param("gevent", marks=requires_gevent), ], ) +@pytest.mark.parametrize( + ["start_profiler_func", "stop_profiler_func"], + [ + pytest.param( + start_profile_session, + stop_profile_session, + id="start_profile_session/stop_profile_session", + ), + pytest.param( + start_profiler, + stop_profiler, + id="start_profiler/stop_profiler (deprecated)", + ), + ], +) @pytest.mark.parametrize( "make_options", [ @@ -343,6 +394,8 @@ def test_continuous_profiler_manual_start_and_stop_unsampled( sentry_init, capture_envelopes, mode, + start_profiler_func, + stop_profiler_func, make_options, teardown_profiling, ): @@ -356,7 +409,7 @@ def test_continuous_profiler_manual_start_and_stop_unsampled( envelopes = capture_envelopes() - start_profiler() + start_profiler_func() with sentry_sdk.start_transaction(name="profiling"): with sentry_sdk.start_span(op="op"): @@ -364,7 +417,7 @@ def test_continuous_profiler_manual_start_and_stop_unsampled( assert_single_transaction_without_profile_chunks(envelopes) - stop_profiler() + stop_profiler_func() @pytest.mark.parametrize( @@ -485,6 +538,21 @@ def test_continuous_profiler_auto_start_and_stop_unsampled( ), ], ) +@pytest.mark.parametrize( + ["start_profiler_func", "stop_profiler_func"], + [ + pytest.param( + start_profile_session, + stop_profile_session, + id="start_profile_session/stop_profile_session", + ), + pytest.param( + start_profiler, + stop_profiler, + id="start_profiler/stop_profiler (deprecated)", + ), + ], +) @pytest.mark.parametrize( "make_options", [ @@ -495,6 +563,8 @@ def test_continuous_profiler_auto_start_and_stop_unsampled( def test_continuous_profiler_manual_start_and_stop_noop_when_using_trace_lifecyle( sentry_init, mode, + start_profiler_func, + stop_profiler_func, class_name, make_options, teardown_profiling, @@ -510,11 +580,11 @@ def test_continuous_profiler_manual_start_and_stop_noop_when_using_trace_lifecyl with mock.patch( f"sentry_sdk.profiler.continuous_profiler.{class_name}.ensure_running" ) as mock_ensure_running: - start_profiler() + start_profiler_func() mock_ensure_running.assert_not_called() with mock.patch( f"sentry_sdk.profiler.continuous_profiler.{class_name}.teardown" ) as mock_teardown: - stop_profiler() + stop_profiler_func() mock_teardown.assert_not_called() From 3745d9ad43d9cc925a72d98edaf712166cb6a1a1 Mon Sep 17 00:00:00 2001 From: Ivana Kellyer Date: Wed, 19 Feb 2025 11:53:40 +0100 Subject: [PATCH 3/9] ci: Fix API doc failure in CI (#4075) Sphinx 8.2 (see [changelog](https://www.sphinx-doc.org/en/master/changes/index.html#release-8-2-0-released-feb-18-2025)) seems to have broken our CI. Looks like an incompatibility between it and the autodoc-typehints extension, so hopefully the two catch up with one another -- I'll pin sphinx to <8.2 for now. --- requirements-docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-docs.txt b/requirements-docs.txt index 15f226aac7..81e04ba3ef 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,5 +1,5 @@ gevent shibuya -sphinx +sphinx<8.2 sphinx-autodoc-typehints[type_comments]>=1.8.0 typing-extensions From 67f04910a4b2d6928d4ea7d39d3ba5aea4f91d28 Mon Sep 17 00:00:00 2001 From: Ivana Kellyer Date: Wed, 19 Feb 2025 12:09:32 +0100 Subject: [PATCH 4/9] tests: Add `fail_on_changes` to toxgen (#4072) Add `fail_on_changes` to toxgen. The idea is that the script will now have two modes: - **Normal mode** (when `fail_on_changes` is `False`) that is used to actually generate the `tox.ini` file. This [will be](https://github.com/getsentry/sentry-python/issues/4050) run in a cron job in CI and create a PR with the updated test setup. - The newly added **fail-on-changes mode** (when `fail_on_changes` is `True`) that is used to detect manual changes to one of the affected files without updating the rest (e.g. making a manual change to `tox.ini` without updating the `tox.jinja` template). This will be run in CI similar to the `fail_on_changes` check of `split-tox-gh-actions`. The problem with detecting manual changes is that if we just reran the script on each PR, chances are it would pull in new releases that are not part of the `tox.ini` on master, making the file look different from what was committed as if it had unrelated manual changes. To counteract this, we now store the timestamp when the file was last generated in `tox.ini`. We use this in fail-on-changes mode to filter out releases that popped up after the file was last generated. This way, the package versions should be the same and if there is anything different in `tox.ini`, it's likely to be the manual changes that we want to detect. Closes https://github.com/getsentry/sentry-python/issues/4051 --- .github/workflows/ci.yml | 6 +- scripts/populate_tox/populate_tox.py | 127 +++++++++++++++++++++++++-- scripts/populate_tox/tox.jinja | 2 + tox.ini | 6 +- 4 files changed, 130 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8931e229e..03ed8de742 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,7 +44,11 @@ jobs: with: python-version: 3.12 - - run: | + - name: Detect unexpected changes to tox.ini or CI + run: | + pip install -e . + pip install -r scripts/populate_tox/requirements.txt + python scripts/populate_tox/populate_tox.py --fail-on-changes pip install -r scripts/split_tox_gh_actions/requirements.txt python scripts/split_tox_gh_actions/split_tox_gh_actions.py --fail-on-changes diff --git a/scripts/populate_tox/populate_tox.py b/scripts/populate_tox/populate_tox.py index 4bfce80ce7..5906eee5b4 100644 --- a/scripts/populate_tox/populate_tox.py +++ b/scripts/populate_tox/populate_tox.py @@ -3,15 +3,18 @@ """ import functools +import hashlib import os import sys import time from bisect import bisect_left from collections import defaultdict +from datetime import datetime, timezone from importlib.metadata import metadata from packaging.specifiers import SpecifierSet from packaging.version import Version from pathlib import Path +from textwrap import dedent from typing import Optional, Union # Adding the scripts directory to PATH. This is necessary in order to be able @@ -106,7 +109,9 @@ def fetch_release(package: str, version: Version) -> dict: return pypi_data.json() -def _prefilter_releases(integration: str, releases: dict[str, dict]) -> list[Version]: +def _prefilter_releases( + integration: str, releases: dict[str, dict], older_than: Optional[datetime] = None +) -> list[Version]: """ Filter `releases`, removing releases that are for sure unsupported. @@ -135,6 +140,10 @@ def _prefilter_releases(integration: str, releases: dict[str, dict]) -> list[Ver if meta["yanked"]: continue + if older_than is not None: + if datetime.fromisoformat(meta["upload_time_iso_8601"]) > older_than: + continue + version = Version(release) if min_supported and version < min_supported: @@ -160,19 +169,24 @@ def _prefilter_releases(integration: str, releases: dict[str, dict]) -> list[Ver return sorted(filtered_releases) -def get_supported_releases(integration: str, pypi_data: dict) -> list[Version]: +def get_supported_releases( + integration: str, pypi_data: dict, older_than: Optional[datetime] = None +) -> list[Version]: """ Get a list of releases that are currently supported by the SDK. This takes into account a handful of parameters (Python support, the lowest version we've defined for the framework, the date of the release). + + If an `older_than` timestamp is provided, no release newer than that will be + considered. """ package = pypi_data["info"]["name"] # Get a consolidated list without taking into account Python support yet # (because that might require an additional API call for some # of the releases) - releases = _prefilter_releases(integration, pypi_data["releases"]) + releases = _prefilter_releases(integration, pypi_data["releases"], older_than) # Determine Python support expected_python_versions = TEST_SUITE_CONFIG[integration].get("python") @@ -381,7 +395,9 @@ def _render_dependencies(integration: str, releases: list[Version]) -> list[str] return rendered -def write_tox_file(packages: dict) -> None: +def write_tox_file( + packages: dict, update_timestamp: bool, last_updated: datetime +) -> None: template = ENV.get_template("tox.jinja") context = {"groups": {}} @@ -400,6 +416,11 @@ def write_tox_file(packages: dict) -> None: } ) + if update_timestamp: + context["updated"] = datetime.now(tz=timezone.utc).isoformat() + else: + context["updated"] = last_updated.isoformat() + rendered = template.render(context) with open(TOX_FILE, "w") as file: @@ -453,7 +474,59 @@ def _add_python_versions_to_release( release.rendered_python_versions = _render_python_versions(release.python_versions) -def main() -> None: +def get_file_hash() -> str: + """Calculate a hash of the tox.ini file.""" + hasher = hashlib.md5() + + with open(TOX_FILE, "rb") as f: + buf = f.read() + hasher.update(buf) + + return hasher.hexdigest() + + +def get_last_updated() -> Optional[datetime]: + timestamp = None + + with open(TOX_FILE, "r") as f: + for line in f: + if line.startswith("# Last generated:"): + timestamp = datetime.fromisoformat(line.strip().split()[-1]) + break + + if timestamp is None: + print( + "Failed to find out when tox.ini was last generated; the timestamp seems to be missing from the file." + ) + + return timestamp + + +def main(fail_on_changes: bool = False) -> None: + """ + Generate tox.ini from the tox.jinja template. + + The script has two modes of operation: + - fail on changes mode (if `fail_on_changes` is True) + - normal mode (if `fail_on_changes` is False) + + Fail on changes mode is run on every PR to make sure that `tox.ini`, + `tox.jinja` and this script don't go out of sync because of manual changes + in one place but not the other. + + Normal mode is meant to be run as a cron job, regenerating tox.ini and + proposing the changes via a PR. + """ + print(f"Running in {'fail_on_changes' if fail_on_changes else 'normal'} mode.") + last_updated = get_last_updated() + if fail_on_changes: + # We need to make the script ignore any new releases after the `last_updated` + # timestamp so that we don't fail CI on a PR just because a new package + # version was released, leading to unrelated changes in tox.ini. + print( + f"Since we're in fail_on_changes mode, we're only considering releases before the last tox.ini update at {last_updated.isoformat()}." + ) + global MIN_PYTHON_VERSION, MAX_PYTHON_VERSION sdk_python_versions = _parse_python_versions_from_classifiers( metadata("sentry-sdk").get_all("Classifier") @@ -480,7 +553,9 @@ def main() -> None: pypi_data = fetch_package(package) # Get the list of all supported releases - releases = get_supported_releases(integration, pypi_data) + # If in check mode, ignore releases newer than `last_updated` + older_than = last_updated if fail_on_changes else None + releases = get_supported_releases(integration, pypi_data, older_than) if not releases: print(" Found no supported releases.") continue @@ -510,8 +585,44 @@ def main() -> None: } ) - write_tox_file(packages) + if fail_on_changes: + old_file_hash = get_file_hash() + + write_tox_file( + packages, update_timestamp=not fail_on_changes, last_updated=last_updated + ) + + if fail_on_changes: + new_file_hash = get_file_hash() + if old_file_hash != new_file_hash: + raise RuntimeError( + dedent( + """ + Detected that `tox.ini` is out of sync with + `scripts/populate_tox/tox.jinja` and/or + `scripts/populate_tox/populate_tox.py`. This might either mean + that `tox.ini` was changed manually, or the `tox.jinja` + template and/or the `populate_tox.py` script were changed without + regenerating `tox.ini`. + + Please don't make manual changes to `tox.ini`. Instead, make the + changes to the `tox.jinja` template and/or the `populate_tox.py` + script (as applicable) and regenerate the `tox.ini` file with: + + python -m venv toxgen.env + . toxgen.env/bin/activate + pip install -r scripts/populate_tox/requirements.txt + python scripts/populate_tox/populate_tox.py + """ + ) + ) + print("Done checking tox.ini. Looking good!") + else: + print( + "Done generating tox.ini. Make sure to also update the CI YAML files to reflect the new test targets." + ) if __name__ == "__main__": - main() + fail_on_changes = len(sys.argv) == 2 and sys.argv[1] == "--fail-on-changes" + main(fail_on_changes) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index 15119b4768..81ab17c919 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -9,6 +9,8 @@ # or in the script (if you want to change the auto-generated part). # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". +# +# Last generated: {{ updated }} [tox] requires = diff --git a/tox.ini b/tox.ini index 9ce3d40a21..0e41500fe1 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,8 @@ # or in the script (if you want to change the auto-generated part). # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". +# +# Last generated: 2025-02-18T12:57:32.874168+00:00 [tox] requires = @@ -290,7 +292,7 @@ envlist = {py3.6,py3.7,py3.8}-trytond-v5.8.16 {py3.8,py3.10,py3.11}-trytond-v6.8.17 {py3.8,py3.11,py3.12}-trytond-v7.0.9 - {py3.8,py3.11,py3.12}-trytond-v7.4.5 + {py3.8,py3.11,py3.12}-trytond-v7.4.6 {py3.7,py3.11,py3.12}-typer-v0.15.1 @@ -714,7 +716,7 @@ deps = trytond-v5.8.16: trytond==5.8.16 trytond-v6.8.17: trytond==6.8.17 trytond-v7.0.9: trytond==7.0.9 - trytond-v7.4.5: trytond==7.4.5 + trytond-v7.4.6: trytond==7.4.6 trytond: werkzeug trytond-v4.6.9: werkzeug<1.0 trytond-v4.8.18: werkzeug<1.0 From a3b6e5d9f3adc515548dabd73462e77bccc4d516 Mon Sep 17 00:00:00 2001 From: Ivana Kellyer Date: Wed, 19 Feb 2025 15:18:54 +0100 Subject: [PATCH 5/9] tests: Test relevant prereleases and allow to ignore releases (#4073) If a package has a prerelease of a higher version than the highest released stable version, make sure to test it, too. We consider alpha, beta, and RC releases. Also add an option to ignore specific releases (this is related to the above since the script now pulls in two irrelevant alpha releases of starlite). Closes https://github.com/getsentry/sentry-python/issues/4030 --------- Co-authored-by: Daniel Szoke <7881302+szokeasaurusrex@users.noreply.github.com> --- scripts/populate_tox/README.md | 35 ++++++++++++++ scripts/populate_tox/config.py | 1 + scripts/populate_tox/populate_tox.py | 72 ++++++++++++++++++++++------ tox.ini | 10 ++-- 4 files changed, 101 insertions(+), 17 deletions(-) diff --git a/scripts/populate_tox/README.md b/scripts/populate_tox/README.md index aa9884387e..c9a3b67ba0 100644 --- a/scripts/populate_tox/README.md +++ b/scripts/populate_tox/README.md @@ -45,9 +45,15 @@ integration_name: { rule2: [package3, package4, ...], }, "python": python_version_specifier, + "include": package_version_specifier, } ``` +When talking about version specifiers, we mean +[version specifiers as defined](https://packaging.python.org/en/latest/specifications/version-specifiers/#id5) +by the Python Packaging Authority. See also the actual implementation +in [packaging.specifiers](https://packaging.pypa.io/en/stable/specifiers.html). + ### `package` The name of the third party package as it's listed on PyPI. The script will @@ -118,6 +124,35 @@ metadata or the SDK is explicitly not supporting some packages on specific Python versions (because of, for example, broken context vars), the `python` key can be used. +### `include` + +Sometimes we only want to consider testing some specific versions of packages. +For example, the Starlite package has two alpha prereleases of version 2.0.0, but +we do not want to test these, since Starlite 2.0 was renamed to Litestar. + +The value of the `include` key expects a version specifier defining which +versions should be considered for testing. For example, since we only want to test +versions below 2.x in Starlite, we can use + +```python +"starlite": { + "include": "<2", + ... +} +``` + +The `include` key can also be used to exclude a set of specific versions by using +`!=` version specifiers. For example, the Starlite restriction above could equivalently +be expressed like so: + + +```python +"starlite": { + "include": "!=2.0.0a1,!=2.0.0a2", + ... +} +``` + ## How-Tos diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index 2c2920e7ac..b5da928d80 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -129,6 +129,7 @@ ], }, "python": "<=3.11", + "include": "!=2.0.0a1,!=2.0.0a2", # these are not relevant as there will never be a stable 2.0 release (starlite continues as litestar) }, "statsig": { "package": "statsig", diff --git a/scripts/populate_tox/populate_tox.py b/scripts/populate_tox/populate_tox.py index 5906eee5b4..544d4bdcb1 100644 --- a/scripts/populate_tox/populate_tox.py +++ b/scripts/populate_tox/populate_tox.py @@ -111,7 +111,7 @@ def fetch_release(package: str, version: Version) -> dict: def _prefilter_releases( integration: str, releases: dict[str, dict], older_than: Optional[datetime] = None -) -> list[Version]: +) -> tuple[list[Version], Optional[Version]]: """ Filter `releases`, removing releases that are for sure unsupported. @@ -120,6 +120,10 @@ def _prefilter_releases( they require additional API calls to be made. The purpose of this function is to slim down the list so that we don't have to make more API calls than necessary for releases that are for sure not supported. + + The function returns a tuple with: + - the list of prefiltered releases + - an optional prerelease if there is one that should be tested """ min_supported = _MIN_VERSIONS.get(integration) if min_supported is not None: @@ -129,7 +133,14 @@ def _prefilter_releases( f" {integration} doesn't have a minimum version defined in sentry_sdk/integrations/__init__.py. Consider defining one" ) + include_versions = None + if TEST_SUITE_CONFIG[integration].get("include") is not None: + include_versions = SpecifierSet( + TEST_SUITE_CONFIG[integration]["include"], prereleases=True + ) + filtered_releases = [] + last_prerelease = None for release, data in releases.items(): if not data: @@ -149,9 +160,15 @@ def _prefilter_releases( if min_supported and version < min_supported: continue - if version.is_prerelease or version.is_postrelease: - # TODO: consider the newest prerelease unless obsolete - # https://github.com/getsentry/sentry-python/issues/4030 + if version.is_postrelease or version.is_devrelease: + continue + + if include_versions is not None and version not in include_versions: + continue + + if version.is_prerelease: + if last_prerelease is None or version > last_prerelease: + last_prerelease = version continue for i, saved_version in enumerate(filtered_releases): @@ -166,18 +183,30 @@ def _prefilter_releases( else: filtered_releases.append(version) - return sorted(filtered_releases) + filtered_releases.sort() + + # Check if the latest prerelease is relevant (i.e., it's for a version higher + # than the last released version); if not, don't consider it + if last_prerelease is not None: + if not filtered_releases or last_prerelease > filtered_releases[-1]: + return filtered_releases, last_prerelease + + return filtered_releases, None def get_supported_releases( integration: str, pypi_data: dict, older_than: Optional[datetime] = None -) -> list[Version]: +) -> tuple[list[Version], Optional[Version]]: """ Get a list of releases that are currently supported by the SDK. This takes into account a handful of parameters (Python support, the lowest version we've defined for the framework, the date of the release). + We return the list of supported releases and optionally also the newest + prerelease, if it should be tested (meaning it's for a version higher than + the current stable version). + If an `older_than` timestamp is provided, no release newer than that will be considered. """ @@ -186,7 +215,9 @@ def get_supported_releases( # Get a consolidated list without taking into account Python support yet # (because that might require an additional API call for some # of the releases) - releases = _prefilter_releases(integration, pypi_data["releases"], older_than) + releases, latest_prerelease = _prefilter_releases( + integration, pypi_data["releases"], older_than + ) # Determine Python support expected_python_versions = TEST_SUITE_CONFIG[integration].get("python") @@ -210,14 +241,18 @@ def _supports_lowest(release: Version) -> bool: # version(s) that we do, cut off the rest releases = releases[i:] - return releases + return releases, latest_prerelease -def pick_releases_to_test(releases: list[Version]) -> list[Version]: +def pick_releases_to_test( + releases: list[Version], last_prerelease: Optional[Version] +) -> list[Version]: """Pick a handful of releases to test from a sorted list of supported releases.""" # If the package has majors (or major-like releases, even if they don't do # semver), we want to make sure we're testing them all. If not, we just pick # the oldest, the newest, and a couple in between. + # + # If there is a relevant prerelease, also test that in addition to the above. has_majors = len(set([v.major for v in releases])) > 1 filtered_releases = set() @@ -252,7 +287,11 @@ def pick_releases_to_test(releases: list[Version]) -> list[Version]: releases[-1], # latest } - return sorted(filtered_releases) + filtered_releases = sorted(filtered_releases) + if last_prerelease is not None: + filtered_releases.append(last_prerelease) + + return filtered_releases def supported_python_versions( @@ -553,9 +592,14 @@ def main(fail_on_changes: bool = False) -> None: pypi_data = fetch_package(package) # Get the list of all supported releases - # If in check mode, ignore releases newer than `last_updated` + + # If in fail-on-changes mode, ignore releases newer than `last_updated` older_than = last_updated if fail_on_changes else None - releases = get_supported_releases(integration, pypi_data, older_than) + + releases, latest_prerelease = get_supported_releases( + integration, pypi_data, older_than + ) + if not releases: print(" Found no supported releases.") continue @@ -563,9 +607,9 @@ def main(fail_on_changes: bool = False) -> None: _compare_min_version_with_defined(integration, releases) # Pick a handful of the supported releases to actually test against - # and fetch the PYPI data for each to determine which Python versions + # and fetch the PyPI data for each to determine which Python versions # to test it on - test_releases = pick_releases_to_test(releases) + test_releases = pick_releases_to_test(releases, latest_prerelease) for release in test_releases: _add_python_versions_to_release(integration, package, release) diff --git a/tox.ini b/tox.ini index 0e41500fe1..360d16342e 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-02-18T12:57:32.874168+00:00 +# Last generated: 2025-02-19T12:41:15.689786+00:00 [tox] requires = @@ -211,10 +211,11 @@ envlist = {py3.8,py3.10,py3.11}-ariadne-v0.20.1 {py3.8,py3.11,py3.12}-ariadne-v0.22 {py3.8,py3.11,py3.12}-ariadne-v0.24.0 - {py3.8,py3.11,py3.12}-ariadne-v0.25.2 + {py3.9,py3.12,py3.13}-ariadne-v0.26.0 {py3.6,py3.9,py3.10}-gql-v3.4.1 {py3.7,py3.11,py3.12}-gql-v3.5.0 + {py3.9,py3.12,py3.13}-gql-v3.6.0b4 {py3.6,py3.9,py3.10}-graphene-v3.3 {py3.8,py3.12,py3.13}-graphene-v3.4.3 @@ -236,6 +237,7 @@ envlist = {py3.6,py3.7,py3.8}-celery-v4.4.7 {py3.6,py3.7,py3.8}-celery-v5.0.5 {py3.8,py3.11,py3.12}-celery-v5.4.0 + {py3.8,py3.12,py3.13}-celery-v5.5.0rc4 {py3.6,py3.7}-dramatiq-v1.9.0 {py3.6,py3.8,py3.9}-dramatiq-v1.12.3 @@ -592,13 +594,14 @@ deps = ariadne-v0.20.1: ariadne==0.20.1 ariadne-v0.22: ariadne==0.22 ariadne-v0.24.0: ariadne==0.24.0 - ariadne-v0.25.2: ariadne==0.25.2 + ariadne-v0.26.0: ariadne==0.26.0 ariadne: fastapi ariadne: flask ariadne: httpx gql-v3.4.1: gql[all]==3.4.1 gql-v3.5.0: gql[all]==3.5.0 + gql-v3.6.0b4: gql[all]==3.6.0b4 graphene-v3.3: graphene==3.3 graphene-v3.4.3: graphene==3.4.3 @@ -630,6 +633,7 @@ deps = celery-v4.4.7: celery==4.4.7 celery-v5.0.5: celery==5.0.5 celery-v5.4.0: celery==5.4.0 + celery-v5.5.0rc4: celery==5.5.0rc4 celery: newrelic celery: redis py3.7-celery: importlib-metadata<5.0 From ccfd3a80da2fc2eacd95222ab0ac1a3cc720150b Mon Sep 17 00:00:00 2001 From: Tony Xiao Date: Thu, 20 Feb 2025 07:39:33 -0500 Subject: [PATCH 6/9] feat(profiling): Export start/stop profile session (#4079) Need to export these explicitly so it can be used. --- sentry_sdk/profiler/__init__.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/profiler/__init__.py b/sentry_sdk/profiler/__init__.py index 46382cc29d..d8d4e076d5 100644 --- a/sentry_sdk/profiler/__init__.py +++ b/sentry_sdk/profiler/__init__.py @@ -1,4 +1,9 @@ -from sentry_sdk.profiler.continuous_profiler import start_profiler, stop_profiler +from sentry_sdk.profiler.continuous_profiler import ( + start_profile_session, + start_profiler, + stop_profile_session, + stop_profiler, +) from sentry_sdk.profiler.transaction_profiler import ( MAX_PROFILE_DURATION_NS, PROFILE_MINIMUM_SAMPLES, @@ -20,8 +25,10 @@ ) __all__ = [ - "start_profiler", - "stop_profiler", + "start_profile_session", + "start_profiler", # TODO: Deprecate this in favor of `start_profile_session` + "stop_profile_session", + "stop_profiler", # TODO: Deprecate this in favor of `stop_profile_session` # DEPRECATED: The following was re-exported for backwards compatibility. It # will be removed from sentry_sdk.profiler in a future release. "MAX_PROFILE_DURATION_NS", From 4d64c4e7221ad48b2316c2a45dec57c6c4660402 Mon Sep 17 00:00:00 2001 From: Sviatoslav Abakumov Date: Thu, 20 Feb 2025 16:42:08 +0400 Subject: [PATCH 7/9] fix(typing): Add more typing info to Scope.update_from_kwargs's "contexts" (#4080) The original type hint could be understood as a one-level `dict` of `str` to `Any`, when in fact, it's a two-level dict. --- sentry_sdk/scope.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/scope.py b/sentry_sdk/scope.py index 4e3bb87489..fbe97ddf44 100644 --- a/sentry_sdk/scope.py +++ b/sentry_sdk/scope.py @@ -1568,7 +1568,7 @@ def update_from_kwargs( user=None, # type: Optional[Any] level=None, # type: Optional[LogLevelStr] extras=None, # type: Optional[Dict[str, Any]] - contexts=None, # type: Optional[Dict[str, Any]] + contexts=None, # type: Optional[Dict[str, Dict[str, Any]]] tags=None, # type: Optional[Dict[str, str]] fingerprint=None, # type: Optional[List[str]] ): From 24232993da9f1364e0064d155dfe7006ee9b74c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Friedrichs?= <2217052+itsbjoern@users.noreply.github.com> Date: Thu, 20 Feb 2025 13:38:17 +0000 Subject: [PATCH 8/9] AWS Lambda: Fix capturing errors during AWS Lambda INIT phase (#3943) The AWS integration fails to capture errors during the INIT phase (at least in Python 3.8 and above environments). It appears tests for this were disabled after a change in AWS' own runtime environment: https://github.com/getsentry/sentry-python/pull/3592 A change from a few months ago where it seems like string serialisation of the JSON payload was disabled and instead the `post_init_error` is invoked directly with the json payload: https://github.com/aws/aws-lambda-python-runtime-interface-client/commit/a37a43a48bc151c211ad72a6556044aa62b2c671#diff-4513a869520b19ae4e30058106d7c3b5ddbb79216b5e9bd922d83389fb86c603R483 This breaks and causes an error internally when trying to parse the string back into json, and the error is actually swallowed because of `with capture_internal_exceptions()`. Co-authored-by: Anton Pirker --- sentry_sdk/integrations/aws_lambda.py | 5 ++++- tests/integrations/aws_lambda/test_aws.py | 3 --- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/integrations/aws_lambda.py b/sentry_sdk/integrations/aws_lambda.py index 831cde8999..c232094256 100644 --- a/sentry_sdk/integrations/aws_lambda.py +++ b/sentry_sdk/integrations/aws_lambda.py @@ -61,7 +61,10 @@ def sentry_init_error(*args, **kwargs): else: # Fall back to AWS lambdas JSON representation of the error - sentry_event = _event_from_error_json(json.loads(args[1])) + error_info = args[1] + if isinstance(error_info, str): + error_info = json.loads(error_info) + sentry_event = _event_from_error_json(error_info) sentry_sdk.capture_event(sentry_event) return init_error(*args, **kwargs) diff --git a/tests/integrations/aws_lambda/test_aws.py b/tests/integrations/aws_lambda/test_aws.py index f60bedc846..8bbd33505b 100644 --- a/tests/integrations/aws_lambda/test_aws.py +++ b/tests/integrations/aws_lambda/test_aws.py @@ -316,9 +316,6 @@ def test_handler(event, context): } -@pytest.mark.xfail( - reason="Amazon changed something (2024-10-01) and on Python 3.9+ our SDK can not capture events in the init phase of the Lambda function anymore. We need to fix this somehow." -) def test_init_error(run_lambda_function, lambda_runtime): envelope_items, _ = run_lambda_function( LAMBDA_PRELUDE From 48ebd7321c6fb2fcc9ddbd2039b1211114532768 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 20 Feb 2025 15:56:22 +0000 Subject: [PATCH 9/9] fix(anthropic): Add partial json support to streams (#3674) Add `partial_json` for tool calling when streaming in Anthropic integrations. (This is an addition to https://github.com/getsentry/sentry-python/pull/3615 --------- Co-authored-by: Anton Pirker --- sentry_sdk/integrations/anthropic.py | 2 + .../integrations/anthropic/test_anthropic.py | 71 +++++++++++++++++-- 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index f06d8a14db..4cb54309c8 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -101,6 +101,8 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks): elif event.type == "content_block_delta": if hasattr(event.delta, "text"): content_blocks.append(event.delta.text) + elif hasattr(event.delta, "partial_json"): + content_blocks.append(event.delta.partial_json) elif event.type == "content_block_stop": pass elif event.type == "message_delta": diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index 8ce12e70f5..7f6622a1ba 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -1,5 +1,6 @@ from unittest import mock + try: from unittest.mock import AsyncMock except ImportError: @@ -10,7 +11,7 @@ async def __call__(self, *args, **kwargs): import pytest -from anthropic import AsyncAnthropic, Anthropic, AnthropicError, AsyncStream, Stream +from anthropic import Anthropic, AnthropicError, AsyncAnthropic, AsyncStream, Stream from anthropic.types import MessageDeltaUsage, TextDelta, Usage from anthropic.types.content_block_delta_event import ContentBlockDeltaEvent from anthropic.types.content_block_start_event import ContentBlockStartEvent @@ -19,6 +20,7 @@ async def __call__(self, *args, **kwargs): from anthropic.types.message_delta_event import MessageDeltaEvent from anthropic.types.message_start_event import MessageStartEvent +from sentry_sdk.integrations.anthropic import _add_ai_data_to_span, _collect_ai_data from sentry_sdk.utils import package_version try: @@ -42,7 +44,7 @@ async def __call__(self, *args, **kwargs): except ImportError: from anthropic.types.content_block import ContentBlock as TextBlock -from sentry_sdk import start_transaction +from sentry_sdk import start_transaction, start_span from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations.anthropic import AnthropicIntegration @@ -517,9 +519,8 @@ def test_streaming_create_message_with_input_json_delta( if send_default_pii and include_prompts: assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"text": "", "type": "text"} - ] # we do not record InputJSONDelta because it could contain PII - + {"text": "{'location': 'San Francisco, CA'}", "type": "text"} + ] else: assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] assert SPANDATA.AI_RESPONSES not in span["data"] @@ -654,8 +655,8 @@ async def test_streaming_create_message_with_input_json_delta_async( if send_default_pii and include_prompts: assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"text": "", "type": "text"} - ] # we do not record InputJSONDelta because it could contain PII + {"text": "{'location': 'San Francisco, CA'}", "type": "text"} + ] else: assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] @@ -757,3 +758,59 @@ async def test_span_origin_async(sentry_init, capture_events): assert event["contexts"]["trace"]["origin"] == "manual" assert event["spans"][0]["origin"] == "auto.ai.anthropic" + + +@pytest.mark.skipif( + ANTHROPIC_VERSION < (0, 27), + reason="Versions <0.27.0 do not include InputJSONDelta.", +) +def test_collect_ai_data_with_input_json_delta(): + event = ContentBlockDeltaEvent( + delta=InputJSONDelta(partial_json="test", type="input_json_delta"), + index=0, + type="content_block_delta", + ) + + input_tokens = 10 + output_tokens = 20 + content_blocks = [] + + new_input_tokens, new_output_tokens, new_content_blocks = _collect_ai_data( + event, input_tokens, output_tokens, content_blocks + ) + + assert new_input_tokens == input_tokens + assert new_output_tokens == output_tokens + assert new_content_blocks == ["test"] + + +@pytest.mark.skipif( + ANTHROPIC_VERSION < (0, 27), + reason="Versions <0.27.0 do not include InputJSONDelta.", +) +def test_add_ai_data_to_span_with_input_json_delta(sentry_init): + sentry_init( + integrations=[AnthropicIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + + with start_transaction(name="test"): + span = start_span() + integration = AnthropicIntegration() + + _add_ai_data_to_span( + span, + integration, + input_tokens=10, + output_tokens=20, + content_blocks=["{'test': 'data',", "'more': 'json'}"], + ) + + assert span._data.get(SPANDATA.AI_RESPONSES) == [ + {"type": "text", "text": "{'test': 'data','more': 'json'}"} + ] + assert span._data.get("ai.streaming") is True + assert span._measurements.get("ai_prompt_tokens_used")["value"] == 10 + assert span._measurements.get("ai_completion_tokens_used")["value"] == 20 + assert span._measurements.get("ai_total_tokens_used")["value"] == 30