From be8389817fd9f0c01f9bd66e5b534942cace58f0 Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Tue, 19 Nov 2024 00:18:02 +0000 Subject: [PATCH] New intro (#64) --- docs/examples/bank-support.md | 23 +++ docs/examples/chat-app.md | 4 +- docs/examples/index.md | 12 +- docs/examples/pydantic-model.md | 2 +- docs/examples/rag.md | 2 +- docs/examples/sql-gen.md | 6 +- docs/examples/weather-agent.md | 3 +- docs/index.md | 165 ++++++++++++++-------- docs/install.md | 4 +- mkdocs.yml | 1 + pydantic_ai_examples/README.md | 200 +-------------------------- pydantic_ai_examples/bank_support.py | 88 ++++++++++++ tests/test_examples.py | 61 +++++++- 13 files changed, 303 insertions(+), 268 deletions(-) create mode 100644 docs/examples/bank-support.md create mode 100644 pydantic_ai_examples/bank_support.py diff --git a/docs/examples/bank-support.md b/docs/examples/bank-support.md new file mode 100644 index 000000000..9068590be --- /dev/null +++ b/docs/examples/bank-support.md @@ -0,0 +1,23 @@ +Small but complete example of using PydanticAI to build a support agent for a bank. + +Demonstrates: + +* [dynamic system prompt](../agents.md#system-prompts) +* [structured `result_type`](../results.md#structured-result-validation) +* [retrievers](../agents.md#retrievers) + +## Running the Example + +With [dependencies installed and environment variables set](./index.md#usage), run: + +```bash +python/uv-run -m pydantic_ai_examples.bank_support +``` + +(or `PYDANTIC_AI_MODEL=gemini-1.5-flash ...`) + +## Example Code + +```py title="bank_support.py" +#! pydantic_ai_examples/bank_support.py +``` diff --git a/docs/examples/chat-app.md b/docs/examples/chat-app.md index b32bfd73d..d096a7b27 100644 --- a/docs/examples/chat-app.md +++ b/docs/examples/chat-app.md @@ -3,8 +3,8 @@ Simple chat app example build with FastAPI. Demonstrates: * [reusing chat history](../message-history.md) -* serializing messages -* streaming responses +* [serializing messages](../message-history.md#accessing-messages-from-results) +* [streaming responses](../results.md#streamed-results) This demonstrates storing chat history between requests and using it to give the model context for new responses. diff --git a/docs/examples/index.md b/docs/examples/index.md index fdbf7b755..71871d3f8 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -61,7 +61,17 @@ For examples, to run the very simple [`pydantic_model`](./pydantic-model.md) exa python/uv-run -m pydantic_ai_examples.pydantic_model ``` -But you'll probably want to edit examples in addition to just running them. You can copy the examples to a new directory with: +If you like on-liners and you're using uv, you can run a pydantic-ai example with zero setup: + +```bash +OPENAI_API_KEY='your-api-key' \ + uv run --with 'pydantic-ai[examples]' \ + -m pydantic_ai_examples.pydantic_model +``` + +--- + +You'll probably want to edit examples in addition to just running them. You can copy the examples to a new directory with: ```bash python/uv-run -m pydantic_ai_examples --copy-to examples/ diff --git a/docs/examples/pydantic-model.md b/docs/examples/pydantic-model.md index 4460e5e80..637bce859 100644 --- a/docs/examples/pydantic-model.md +++ b/docs/examples/pydantic-model.md @@ -2,7 +2,7 @@ Simple example of using Pydantic AI to construct a Pydantic model from a text in Demonstrates: -* custom `result_type` +* [structured `result_type`](../results.md#structured-result-validation) ## Running the Example diff --git a/docs/examples/rag.md b/docs/examples/rag.md index 7624c85b9..88b5b7625 100644 --- a/docs/examples/rag.md +++ b/docs/examples/rag.md @@ -4,7 +4,7 @@ RAG search example. This demo allows you to ask question of the [logfire](https: Demonstrates: -* retrievers +* [retrievers](../agents.md#retrievers) * [agent dependencies](../dependencies.md) * RAG search diff --git a/docs/examples/sql-gen.md b/docs/examples/sql-gen.md index be1ba00f2..da9ee2e05 100644 --- a/docs/examples/sql-gen.md +++ b/docs/examples/sql-gen.md @@ -4,9 +4,9 @@ Example demonstrating how to use Pydantic AI to generate SQL queries based on us Demonstrates: -* custom `result_type` -* dynamic system prompt -* result validation +* [dynamic system prompt](../agents.md#system-prompts) +* [structured `result_type`](../results.md#structured-result-validation) +* [result validation](../results.md#result-validators-functions) * [agent dependencies](../dependencies.md) ## Running the Example diff --git a/docs/examples/weather-agent.md b/docs/examples/weather-agent.md index d73f81ec5..1652935ab 100644 --- a/docs/examples/weather-agent.md +++ b/docs/examples/weather-agent.md @@ -2,8 +2,7 @@ Example of Pydantic AI with multiple tools which the LLM needs to call in turn t Demonstrates: -* retrievers -* multiple retrievers +* [retrievers](../agents.md#retrievers) * [agent dependencies](../dependencies.md) In this case the idea is a "weather" agent — the user can ask for the weather in multiple locations, diff --git a/docs/index.md b/docs/index.md index e0f107e70..5e3dbdb8c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,78 +1,135 @@ +# Introduction {.hide} + --8<-- "docs/.partials/index-header.html" -# PydanticAI {.hide} +When I first found FastAPI, I got it immediately, I was excited to find something so genuinely innovative and yet ergonomic built on Pydantic. + +Virtually every Agent Framework and LLM library in Python uses Pydantic, but when we came to use Gen AI in [Pydantic Logfire](https://pydantic.dev/logfire), I couldn't find anything that gave me the same feeling. + +PydanticAI is a Python Agent Framework designed to make it less painful to build production grade applications with Generative AI. -You can think of PydanticAI as an Agent Framework or a shim to use Pydantic with LLMs — they're the same thing. +## Why use PydanticAI -PydanticAI tries to make working with LLMs feel similar to building a web application. +* Built by the team behind Pydantic (the validation layer of the OpenAI SDK, the Anthropic SDK, Langchain, LlamaIndex, AutoGPT, Transformers, Instructor and many more) +* Multi-model — currently with OpenAI and Gemini are support, Anthropic [coming soon](https://github.com/pydantic/pydantic-ai/issues/63), simply interface to implement other models or adapt existing ones +* Type-safe +* Built on tried and tested best practices in Python +* Structured response validation with Pydantic +* Streamed responses, including validation of streamed structured responses with Pydantic +* Novel, type-safe dependency injection system +* Logfire integration !!! example "In Beta" PydanticAI is in early beta, the API is subject to change and there's a lot more to do. [Feedback](https://github.com/pydantic/pydantic-ai/issues) is very welcome! +## Example — Hello World + +Here's a very minimal example of PydanticAI. + +```py title="hello_world.py" +from pydantic_ai import Agent + +agent = Agent('gemini-1.5-flash', system_prompt='Be concise, reply with one sentence.') + +result = agent.run_sync('Where does "hello world" come from?') +print(result.data) +""" +The first known use of "hello, world" was in a 1974 textbook about the C programming language. +""" +``` +_(This example is complete, it can be run "as is")_ + +Not very interesting yet, but we can easily add retrievers, dynamic system prompts and structured responses to build more powerful agents. + ## Example — Retrievers and Dependency Injection -Partial example of using retrievers to help an LLM respond to a user's query about the weather: +Small but complete example of using PydanticAI to build a support agent for a bank. + +```py title="bank_support.py" +from dataclasses import dataclass -```py title="weather_agent.py" -import httpx +from pydantic import BaseModel, Field from pydantic_ai import Agent, CallContext -weather_agent = Agent( # (1)! +from bank_database import DatabaseConn + + +@dataclass +class SupportDependencies: # (3)! + customer_id: int + db: DatabaseConn + + +class SupportResult(BaseModel): + support_advice: str = Field(description='Advice returned to the customer') + block_card: bool = Field(description='Whether to block their') + risk: int = Field(description='Risk level of query', ge=0, le=10) + + +support_agent = Agent( # (1)! 'openai:gpt-4o', # (2)! - deps_type=httpx.AsyncClient, # (3)! - system_prompt='Be concise, reply with one sentence.', # (4)! + deps_type=SupportDependencies, + result_type=SupportResult, # (9)! + system_prompt=( # (4)! + 'You are a support agent in our bank, give the ' + 'customer support and judge the risk level of their query. ' + "Reply using the customer's name." + ), ) -@weather_agent.retriever_context # (5)! -async def get_location( - ctx: CallContext[httpx.AsyncClient], - location_description: str, -) -> dict[str, float]: - """Get the latitude and longitude of a location by its description.""" # (6)! - response = await ctx.deps.get('https://api.geolocation...') - ... - - -@weather_agent.retriever_context # (7)! -async def get_weather( - ctx: CallContext[httpx.AsyncClient], - lat: float, - lng: float, -) -> dict[str, str]: - """Get the weather at a location by its latitude and longitude.""" - response = await ctx.deps.get('https://api.weather...') - ... - - -async def main(): - async with httpx.AsyncClient() as client: - result = await weather_agent.run( # (8)! - 'What is the weather like in West London and in Wiltshire?', - deps=client, - ) - print(result.data) # (9)! - #> The weather in West London is raining, while in Wiltshire it is sunny. - - messages = result.all_messages() # (10)! +@support_agent.system_prompt # (5)! +async def add_customer_name(ctx: CallContext[SupportDependencies]) -> str: + customer_name = await ctx.deps.db.customer_name(id=ctx.deps.customer_id) + return f"The customer's name is {customer_name!r}" + + +@support_agent.retriever_context # (6)! +async def customer_balance( + ctx: CallContext[SupportDependencies], include_pending: bool +) -> str: + """Returns the customer's current account balance.""" # (7)! + balance = await ctx.deps.db.customer_balance( + id=ctx.deps.customer_id, + include_pending=include_pending, + ) + return f'${balance:.2f}' + + +... # (11)! + + +deps = SupportDependencies(customer_id=123, db=DatabaseConn()) +result = support_agent.run_sync('What is my balance?', deps=deps) # (8)! +print(result.data) # (10)! +""" +support_advice='Hello John, your current account balance, including pending transactions, is $123.45.' block_card=False risk=1 +""" + +result = support_agent.run_sync('I just lost my card!', deps=deps) +print(result.data) +""" +support_advice="I'm sorry to hear that, John. We are temporarily blocking your card to prevent unauthorized transactions." block_card=True risk=8 +""" ``` -1. An agent that can tell users about the weather in a particular location. Agents combine a system prompt, a response type (here `str`) and "retrievers" (aka tools). -2. Here we configure the agent to use OpenAI's GPT-4o model, you can also customise the model when running the agent. -3. We specify the type dependencies for the agent, in this case an HTTP client, which retrievers will use to make requests to external services. PydanticAI's system of dependency injection provides a powerful, type safe way to customise the behaviour of your agents, including for unit tests and evals. -4. Static system prompts can be registered as key word arguments to the agent, dynamic system prompts can be registered with the `@agent.system_prompot` decorator and benefit from dependency injection. -5. Retrievers let you register "tools" which the LLM may call while to respond to a user. You inject dependencies into the retriever with `CallContext`, any other arguments become the tool schema passed to the LLM, Pydantic is used to validate these arguments, errors are passed back to the LLM so it can retry. -6. This docstring is also passed to the LLM as a description of the tool. -7. Multiple retrievers can be registered with the same agent, the LLM can choose which (if any) retrievers to call in order to respond to a user. -8. Run the agent asynchronously, conducting a conversation with the LLM until a final response is reached. You can also run agents synchronously with `run_sync`. Internally agents are all async, so `run_sync` is a helper using `asyncio.run` to call `run()`. -9. The response from the LLM, in this case a `str`, Agents are generic in both the type of `deps` and `result_type`, so calls are typed end-to-end. -10. [`result.all_messages()`](message-history.md) includes details of messages exchanged, this is useful both to understand the conversation that took place and useful if you want to continue the conversation later — messages can be passed back to later `run/run_sync` calls. +1. An [agent](agents.md) that acts as first-tier support in a bank, agents are generic in the type of dependencies they take and the type of result they return, in this case `Deps` and `SupportResult`. +2. Here we configure the agent to use [OpenAI's GPT-4o model](api/models/openai.md), you can also customise the model when running the agent. +3. The `SupportDependencies` dataclass is used to pass data and connections into the model that will be needed when running [system prompts](agents.md#system-prompts) and [retrievers](agents.md#retrievers). PydanticAI's system of dependency injection provides a powerful, type safe way to customise the behaviour of your agents, including for unit tests and evals. +4. Static [system prompts](agents.md#system-prompts) can be registered as keyword arguments to the agent +5. dynamic [system prompts](agents.md#system-prompts) can be registered with the `@agent.system_prompot` decorator and benefit from dependency injection. +6. [Retrievers](agents.md#retrievers) let you register "tools" which the LLM may call while responding to a user. You inject dependencies into the retriever with [`CallContext`][pydantic_ai.dependencies.CallContext], any other arguments become the tool schema passed to the LLM, Pydantic is used to validate these arguments, errors are passed back to the LLM so it can retry. +7. The docstring is also passed to the LLM as a description of the tool. +8. [Run the agent](agents.md#running-agents) synchronously, conducting a conversation with the LLM until a final response is reached. +9. The response from the agent will, be guaranteed to be a `SupportResult`, if validation fails [reflection](agents.md#reflection-and-self-correction) will mean the agent is prompted to try again. +10. The result will be validated with Pydantic to guarantee it is a `SupportResult`, since the agent is generic, it'll also be typed as a `SupportResult` to aid with static type checking. +11. In real use case, you'd add many more retrievers to the agent to extend the context it's equipped with and support it can provide. -!!! tip "Complete `weather_agent.py` example" - This example is incomplete for the sake of brevity; you can find a complete `weather_agent.py` example [here](examples/weather-agent.md). +!!! tip "Complete `bank_support.py` example" + This example is incomplete for the sake of brevity (the definition of `DatabaseConn` is missing); you can find a complete `bank_support.py` example [here](examples/bank-support.md). -## Example — Result Validation +## Next Steps -TODO +To try PydanticAI yourself, follow instructions [in examples](examples/index.md). diff --git a/docs/install.md b/docs/install.md index b4bb26d55..1bf921113 100644 --- a/docs/install.md +++ b/docs/install.md @@ -36,4 +36,6 @@ To use Logfire with PydanticAI, install PydanticAI with the `logfire` optional g From there, follow the [Logfire documentation](https://logfire.pydantic.dev/docs/) to configure Logfire. -TODO screenshot of Logfire with PydanticAI in action. +## Next Steps + +To run PydanticAI, follow instructions [in examples](examples/index.md). diff --git a/mkdocs.yml b/mkdocs.yml index ad1b0e0ce..72d335cc4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,6 +23,7 @@ nav: - examples/index.md - examples/pydantic-model.md - examples/weather-agent.md + - examples/bank-support.md - examples/sql-gen.md - examples/rag.md - examples/stream-markdown.md diff --git a/pydantic_ai_examples/README.md b/pydantic_ai_examples/README.md index 3ef3dd720..62289d752 100644 --- a/pydantic_ai_examples/README.md +++ b/pydantic_ai_examples/README.md @@ -2,202 +2,4 @@ Examples of how to use Pydantic AI and what it can do. -## Usage - -These examples are distributed with `pydantic-ai` so you can run them either by cloning the [pydantic-ai repo](https://github.com/pydantic/pydantic-ai) or by simply installing `pydantic-ai` from PyPI with `pip` or `uv`. - -Either way you'll need to install extra dependencies to run some examples, you just need to install the `examples` optional dependency group. - -If you've cloned the repo, add the extra dependencies with: - -```bash -uv sync --extra examples -``` - -If you've installed `pydantic-ai` via pip/uv, you can install the extra dependencies with: - -```bash -pip install 'pydantic-ai[examples]' -# of if you're using uv -uv add 'pydantic-ai[examples]' -``` - -To run the examples, run: - -```bash -python -m pydantic_ai_examples. -``` -(replace `python` with just `uv run` if you're using `uv`) - -But you'll probably want to edit examples as well as just running them, so you can copy the examples to a new directory with: - -```bash -python -m pydantic_ai_examples --copy-to examples/ -``` - -### Setting model environment variables - -All these examples will need to set either: - -* `OPENAI_API_KEY` to use OpenAI models, go to [platform.openai.com](https://platform.openai.com/) and follow your nose until you find how to generate an API key -* `GEMINI_API_KEY` to use Gemini/Google models, go to [aistudio.google.com](https://aistudio.google.com/) and do the same to generate an API key - -Then set the API key as an environment variable with: - -```bash -export OPENAI_API_KEY=your-api-key -# or -export GEMINI_API_KEY=your-api-key -``` - -## Examples - -### `pydantic_model.py` - -(Demonstrates: custom `result_type`) - -Simple example of using Pydantic AI to construct a Pydantic model from a text input. - -```bash -(uv run/python) -m pydantic_ai_examples.pydantic_model -``` - -This examples uses `openai:gpt-4o` by default, but it works well with other models, e.g. you can run it -with Gemini using: - -```bash -PYDANTIC_AI_MODEL=gemini-1.5-pro (uv run/python) -m pydantic_ai_examples.pydantic_model -``` - -(or `PYDANTIC_AI_MODEL=gemini-1.5-flash ...`) - -### `sql_gen.py` - -(Demonstrates: custom `result_type`, dynamic system prompt, result validation, agent deps) - -Example demonstrating how to use Pydantic AI to generate SQL queries based on user input. - -The resulting SQL is validated by running it as an `EXPLAIN` query on PostgreSQL. To run the example, you first need to run PostgreSQL, e.g. via Docker: - -```bash -docker run --rm -e POSTGRES_PASSWORD=postgres -p 54320:5432 postgres -``` -_(we run postgres on port `54320` to avoid conflicts with any other postgres instances you may have running)_ - -Then to run the code - -```bash -(uv run/python) -m pydantic_ai_examples.sql_gen -``` - -or to use a custom prompt: - -```bash -(uv run/python) -m pydantic_ai_examples.sql_gen "find me whatever" -``` - -This model uses `gemini-1.5-flash` by default since Gemini is good at single shot queries. - -### `weather_agent.py` - -(Demonstrates: retrievers, multiple retrievers, agent deps) - -Example of Pydantic AI with multiple tools which the LLM needs to call in turn to answer a question. - -In this case the idea is a "weather" agent — the user can ask for the weather in multiple cities, -the agent will use the `get_lat_lng` tool to get the latitude and longitude of the locations, then use -the `get_weather` tool to get the weather. - -To run this example properly, you'll need two extra API keys: -* A weather API key from [tomorrow.io](https://www.tomorrow.io/weather-api/) set via `WEATHER_API_KEY` -* A geocoding API key from [geocode.maps.co](https://geocode.maps.co/) set via `GEO_API_KEY` - -**(Note if either key is missing, the code will fall back to dummy data.)** - -```bash -(uv run/python) -m pydantic_ai_examples.weather_agent -``` - -This example uses `openai:gpt-4o` by default. Gemini seems to be unable to handle the multiple tool -calls. - -### `rag.py` - -(Demonstrates: retrievers, agent deps, RAG search) - -RAG search example. This demo allows you to ask question of the [logfire](https://pydantic.dev/logfire) documentation. - -This is done by creating a database containing each section of the markdown documentation, then registering -the search tool as a retriever with the Pydantic AI agent. - -Logic for extracting sections from markdown files and a JSON file with that data is available in -[this gist](https://gist.github.com/samuelcolvin/4b5bb9bb163b1122ff17e29e48c10992). - -[PostgreSQL with pgvector](https://github.com/pgvector/pgvector) is used as the search database, the easiest way to download and run pgvector is using Docker: - -```bash -mkdir postgres-data -docker run --rm -e POSTGRES_PASSWORD=postgres -p 54320:5432 -v `pwd`/postgres-data:/var/lib/postgresql/data pgvector/pgvector:pg17 -``` - -As above, we run postgres on port `54320` to avoid conflicts with any other postgres instances you may have running. -We also mount the postgresql `data` directory locally to persist the data if you need to stop and restart the container. - -With that running, we can build the search database with (**WARNING**: this requires the `OPENAI_API_KEY` env variable and will calling the OpenAI embedding API around 300 times to generate embeddings for each section of the documentation): - -```bash -(uv run/python) -m pydantic_ai_examples.rag build -``` - -(Note building the database doesn't use Pydantic AI right now, instead it uses the OpenAI SDK directly.) - -You can then ask the agent a question with: - -```bash -(uv run/python) -m pydantic_ai_examples.rag search "How do I configure logfire to work with FastAPI?" -``` - -### `chat_app.py` - -(Demonstrates: reusing chat history, serializing messages) - -Simple chat app example build with FastAPI. - -This demonstrates storing chat history between requests and using it to give the model context for new responses. - -Most of the complex logic here is in `chat_app.html` which includes the page layout and JavaScript to handle the chat. - -Run the app with: - -```bash -(uv run/python) -m pydantic_ai_examples.chat_app -``` - -Then open the app at [localhost:8000](http://localhost:8000). - -### `stream_markdown.py` - -(Demonstrates: streaming text responses) - -This example shows how to stream markdown from an agent, using the `rich` library to display the markdown. - -Run with: - -```bash -(uv run/python) -m pydantic_ai_examples.stream_markdown -``` - -### `stream_whales.py` - -(Demonstrates: streaming structured responses) - -Information about whales — an example of streamed structured response validation. - -This script streams structured responses from GPT-4 about whales, validates the data -and displays it as a dynamic table using Rich as the data is received. - -Run with: - -```bash -(uv run/python) -m pydantic_ai_examples.stream_whales -``` +Full documentation of these examples and how to run them is available at [ai.pydantic.dev/examples/](https://ai.pydantic.dev/examples/). diff --git a/pydantic_ai_examples/bank_support.py b/pydantic_ai_examples/bank_support.py new file mode 100644 index 000000000..6872d01b0 --- /dev/null +++ b/pydantic_ai_examples/bank_support.py @@ -0,0 +1,88 @@ +"""Small but complete example of using PydanticAI to build a support agent for a bank. + +Run with: + + uv run -m pydantic_ai_examples.bank_supports +""" + +from dataclasses import dataclass + +from pydantic import BaseModel, Field + +from pydantic_ai import Agent, CallContext + + +class DatabaseConn: + """This is a fake database for example purposes. + + In reality, you'd be connecting to an external database + to get information about customers. + """ + + @classmethod + async def customer_name(cls, *, id: int) -> str | None: + if id == 123: + return 'John' + + @classmethod + async def customer_balance(cls, *, id: int, include_pending: bool) -> float: + if id == 123: + return 123.45 + else: + raise ValueError('Customer not found') + + +@dataclass +class SupportDependencies: + customer_id: int + db: DatabaseConn + + +class SupportResult(BaseModel): + support_advice: str = Field(description='Advice returned to the customer') + block_card: bool = Field(description='Whether to block their') + risk: int = Field(description='Risk level of query', ge=0, le=10) + + +support_agent = Agent( + 'openai:gpt-4o', + deps_type=SupportDependencies, + result_type=SupportResult, + system_prompt=( + 'You are a support agent in our bank, give the ' + 'customer support and judge the risk level of their query. ' + "Reply using the customer's name." + ), +) + + +@support_agent.system_prompt +async def add_customer_name(ctx: CallContext[SupportDependencies]) -> str: + customer_name = await ctx.deps.db.customer_name(id=ctx.deps.customer_id) + return f"The customer's name is {customer_name!r}" + + +@support_agent.retriever_context +async def customer_balance( + ctx: CallContext[SupportDependencies], include_pending: bool +) -> str: + """Returns the customer's current account balance.""" + balance = await ctx.deps.db.customer_balance( + id=ctx.deps.customer_id, + include_pending=include_pending, + ) + return f'${balance:.2f}' + + +deps = SupportDependencies(customer_id=123, db=DatabaseConn()) +result = support_agent.run_sync('What is my balance?', deps=deps) +print(result.data) +""" +support_advice='Hello John, your current account balance, including pending transactions, is $123.45.' block_card=False risk=1 +""" + +result = support_agent.run_sync('I just lost my card!', deps=deps) +print(result.data) +""" +support_advice="I'm sorry to hear that, John. We are temporarily blocking your card to prevent unauthorized transactions." block_card=True risk=8 +""" diff --git a/tests/test_examples.py b/tests/test_examples.py index 807da50ae..4baf77caa 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -29,7 +29,7 @@ @pytest.fixture(scope='module', autouse=True) -def register_modules(): +def register_fake_db(): class FakeTable: def get(self, name: str) -> int | None: if name == 'John Doe': @@ -48,6 +48,30 @@ class DatabaseConn: sys.modules.pop(module_name) +@pytest.fixture(scope='module', autouse=True) +def register_bank_db(): + class DatabaseConn: + @classmethod + async def customer_name(cls, *, id: int) -> str | None: + if id == 123: + return 'John' + + @classmethod + async def customer_balance(cls, *, id: int, include_pending: bool) -> float: + if id == 123: + return 123.45 + else: + raise ValueError('Customer not found') + + module_name = 'bank_database' + sys.modules[module_name] = module = ModuleType(module_name) + module.__dict__.update({'DatabaseConn': DatabaseConn}) + + yield + + sys.modules.pop(module_name) + + def find_filter_examples() -> Iterable[CodeExample]: for ex in find_examples('docs', 'pydantic_ai'): if ex.path.name != '_utils.py': @@ -70,8 +94,11 @@ def test_docs_examples( prefix_settings = example.prefix_settings() ruff_ignore: list[str] = ['D'] - if str(example.path).endswith('docs/index.md'): - ruff_ignore.append('F841') + # `from bank_database import DatabaseConn` wrongly sorted in imports + # waiting for https://github.com/pydantic/pytest-examples/issues/43 + if 'from bank_database import DatabaseConn' in example.source: + ruff_ignore.append('I001') + eval_example.set_config(ruff_ignore=ruff_ignore, target_version='py39') eval_example.print_callback = print_callback @@ -109,7 +136,9 @@ async def async_http_request(url: str, **kwargs: Any) -> httpx.Response: text_responses: dict[str, str | ToolCall] = { - 'What is the weather like in West London and in Wiltshire?': 'The weather in West London is raining, while in Wiltshire it is sunny.', + 'What is the weather like in West London and in Wiltshire?': ( + 'The weather in West London is raining, while in Wiltshire it is sunny.' + ), 'Tell me a joke.': 'Did you hear about the toothpaste scandal? They called it Colgate.', 'Explain?': 'This is an excellent joke invent by Samuel Colvin, it needs no explanation.', 'What is the capital of France?': 'Paris', @@ -125,6 +154,23 @@ async def async_http_request(url: str, **kwargs: Any) -> httpx.Response: tool_name='get_user_by_name', args=ArgsObject({'name': 'John'}) ), 'Please get me the volume of a box with size 6.': ToolCall(tool_name='calc_volume', args=ArgsObject({'size': 6})), + 'Where does "hello world" come from?': ( + 'The first known use of "hello, world" was in a 1974 textbook about the C programming language.' + ), + 'What is my balance?': ToolCall(tool_name='customer_balance', args=ArgsObject({'include_pending': True})), + 'I just lost my card!': ToolCall( + tool_name='final_result', + args=ArgsObject( + { + 'support_advice': ( + "I'm sorry to hear that, John. " + 'We are temporarily blocking your card to prevent unauthorized transactions.' + ), + 'block_card': True, + 'risk': 8, + } + ), + ), } @@ -156,6 +202,13 @@ async def model_logic(messages: list[Message], info: AgentInfo) -> ModelAnyRespo return ModelStructuredResponse(calls=[ToolCall(tool_name='final_result', args=ArgsObject(args))]) elif m.role == 'retry-prompt' and m.tool_name == 'calc_volume': return ModelStructuredResponse(calls=[ToolCall(tool_name='calc_volume', args=ArgsObject({'size': 6}))]) + elif m.role == 'tool-return' and m.tool_name == 'customer_balance': + args = { + 'support_advice': 'Hello John, your current account balance, including pending transactions, is $123.45.', + 'block_card': False, + 'risk': 1, + } + return ModelStructuredResponse(calls=[ToolCall(tool_name='final_result', args=ArgsObject(args))]) else: sys.stdout.write(str(debug.format(messages, info))) raise RuntimeError(f'Unexpected message: {m}')