From 44955c73a54c210d226d9b27b2c8b37ea302d8ce Mon Sep 17 00:00:00 2001 From: Kevin Rohling Date: Sat, 13 Jan 2024 15:39:20 -0800 Subject: [PATCH] update documentation, prep for 0.3.0 release --- .../agents/group_chat/group_conversation.py | 15 +-- bondai/agents/group_chat/user_proxy.py | 10 +- bondai/memory/memory_manager.py | 8 +- .../hierarchical_conversation.py | 16 ++- website/docs/agent-memory/agent-memory.md | 15 +++ website/docs/agent-memory/archival-memory.md | 117 ++++++++++++++++++ .../docs/agent-memory/conversation-memory.md | 117 ++++++++++++++++++ website/docs/agent-memory/core-memory.md | 104 ++++++++++++++++ website/docs/agent-memory/memory-manager.md | 50 ++++++++ website/docs/agents/agents.md | 9 ++ website/docs/agents/conversational-agent.md | 73 +++++++++++ website/docs/agents/react-agent.md | 102 +++++++++++++++ website/docs/api-spec/_category_.json | 2 +- website/docs/azure.md | 2 +- website/docs/cli.md | 2 +- website/docs/docker.md | 4 +- website/docs/examples/_category_.json | 2 +- website/docs/multi-agent-systems/examples.md | 110 ++++++++++++++++ .../multi-agent-systems/group-conversation.md | 65 ++++++++++ .../multi-agent-systems.md | 15 +++ .../team-conversation-config.md | 49 ++++++++ website/docs/tools/_category_.json | 2 +- 22 files changed, 852 insertions(+), 37 deletions(-) create mode 100644 website/docs/agent-memory/agent-memory.md create mode 100644 website/docs/agent-memory/archival-memory.md create mode 100644 website/docs/agent-memory/conversation-memory.md create mode 100644 website/docs/agent-memory/core-memory.md create mode 100644 website/docs/agent-memory/memory-manager.md create mode 100644 website/docs/agents/agents.md create mode 100644 website/docs/agents/conversational-agent.md create mode 100644 website/docs/agents/react-agent.md create mode 100644 website/docs/multi-agent-systems/examples.md create mode 100644 website/docs/multi-agent-systems/group-conversation.md create mode 100644 website/docs/multi-agent-systems/multi-agent-systems.md create mode 100644 website/docs/multi-agent-systems/team-conversation-config.md diff --git a/bondai/agents/group_chat/group_conversation.py b/bondai/agents/group_chat/group_conversation.py index bbbf2ab..cef4b75 100644 --- a/bondai/agents/group_chat/group_conversation.py +++ b/bondai/agents/group_chat/group_conversation.py @@ -250,7 +250,7 @@ def send_message( next_message = previous_message except AgentException as e: print("Error occurred, rewinding conversation...") - print(e) + # print(e) # The recipient agent has errored out. We will rewind the conversation and try again. previous_message = ( self._messages[-2] @@ -270,19 +270,6 @@ def send_message( finally: self._status = AgentStatus.IDLE - def send_message_async( - self, - recipient_name: str, - message: str, - sender_name: str = USER_MEMBER_NAME, - ): - async def send_message_coroutine(): - return self.send_message( - recipient_name=recipient_name, message=message, sender_name=sender_name - ) - - return asyncio.run(send_message_coroutine()) - def reset_memory(self): self._messages.clear() for member in self.members: diff --git a/bondai/agents/group_chat/user_proxy.py b/bondai/agents/group_chat/user_proxy.py index 07a7e0f..305463f 100644 --- a/bondai/agents/group_chat/user_proxy.py +++ b/bondai/agents/group_chat/user_proxy.py @@ -15,7 +15,12 @@ class UserProxy(EventMixin, ConversationMember): - def __init__(self, persona: str | None = None, parse_recipients: bool = True): + def __init__( + self, + persona: str | None = None, + parse_recipients: bool = True, + auto_exit: bool = False, + ): EventMixin.__init__( self, allowed_events=[ @@ -32,6 +37,7 @@ def __init__(self, persona: str | None = None, parse_recipients: bool = True): ) self._status = AgentStatus.IDLE self._parse_recipients = parse_recipients + self._auto_exit = auto_exit def send_message( self, @@ -67,7 +73,7 @@ def send_message( cprint("\n" + agent_message.message + "\n", "white") - if not agent_message.require_response: + if not agent_message.require_response or self._auto_exit: agent_message.success = True agent_message.cost = 0.0 agent_message.completed_at = datetime.now() diff --git a/bondai/memory/memory_manager.py b/bondai/memory/memory_manager.py index 279b580..78afc62 100644 --- a/bondai/memory/memory_manager.py +++ b/bondai/memory/memory_manager.py @@ -4,7 +4,6 @@ from bondai.util import load_local_resource from .archival.datasources import ( ArchivalMemoryDataSource, - InMemoryArchivalMemoryDataSource, PersistentArchivalMemoryDataSource, ) from .archival.tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool @@ -19,7 +18,6 @@ ) from .core.datasources import ( CoreMemoryDataSource, - InMemoryCoreMemoryDataSource, PersistentCoreMemoryDataSource, ) from .core.tools import CoreMemoryAppendTool, CoreMemoryReplaceTool @@ -35,10 +33,10 @@ def __init__( core_memory_datasource: CoreMemoryDataSource | None = None, conversation_memory_datasource: ConversationMemoryDataSource | None = None, archival_memory_datasource: ArchivalMemoryDataSource | None = None, - prompt_builder: Callable[..., str] = JinjaPromptBuilder( - DEFAULT_PROMPT_TEMPLATE - ), + prompt_builder: Callable[..., str] | None = None, ): + if prompt_builder is None: + prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE) self._core_memory_datasource = core_memory_datasource self._conversation_memory_datasource = conversation_memory_datasource self._archival_memory_datasource = archival_memory_datasource diff --git a/tests/conversational/hierarchical_conversation.py b/tests/conversational/hierarchical_conversation.py index 3cdc3d1..c79699d 100644 --- a/tests/conversational/hierarchical_conversation.py +++ b/tests/conversational/hierarchical_conversation.py @@ -1,6 +1,6 @@ from bondai.models.openai import get_total_cost, OpenAILLM, OpenAIModelNames from bondai.tools.file import FileWriteTool -from bondai.agents import ConversationalAgent +from bondai.agents import ConversationalAgent, ConversationMemberEventNames from bondai.agents.group_chat import ( GroupConversation, TeamConversationConfig, @@ -8,34 +8,28 @@ ) llm = OpenAILLM(OpenAIModelNames.GPT4_0613) - -user_proxy = UserProxy() +user_proxy = UserProxy(auto_exit=True) agent_a1 = ConversationalAgent( name="A1", instructions="You are a team leader A1, your team consists of A2, A3. You can talk to your team members as well as the other team leader B1, whose team member is B2. Your team members have the values for x and y.", - # llm=llm ) agent_a2 = ConversationalAgent( name="A2", instructions="You are team member A2, you know the secret value of x but not y, x = 9. Tell others x to cooperate.", - # llm=llm ) agent_a3 = ConversationalAgent( name="A3", instructions="You are team member A3, You know the secret value of y but not x, y = 5. Tell others y to cooperate.", - # llm=llm ) agent_b1 = ConversationalAgent( name="B1", instructions="You are a team leader B1, your team consists of B2. You can talk to your team members as wel as the other team leader A1, whose team members are A2, A3.", - # llm=llm ) agent_b2 = ConversationalAgent( name="B2", instructions="You are team member B2. Your task is to find out the value of x and y from the other agents and compute the product. Once you have the answer you must save the value to a file named 'answer.txt' and share the answer with the user", tools=[FileWriteTool()], - # llm=llm ) conversation = GroupConversation( @@ -47,8 +41,12 @@ ) ) +conversation.on( + ConversationMemberEventNames.MESSAGE_RECEIVED, + lambda _, m: print(f"{m.sender_name} to {m.recipient_name}: {m.message}"), +) + conversation.send_message( agent_b2.name, "Find the product of x and then notify the user. The other agents know x and y.", ) -print(f"Total Cost: {get_total_cost()}") diff --git a/website/docs/agent-memory/agent-memory.md b/website/docs/agent-memory/agent-memory.md new file mode 100644 index 0000000..44f7079 --- /dev/null +++ b/website/docs/agent-memory/agent-memory.md @@ -0,0 +1,15 @@ +--- +sidebar_position: 5 +--- + +# Memory Management + +Memory Management in BondAI is inspired by the tiered memory approach detailed in the [MemGPT: Towards LLMs as Operating Systems](https://arxiv.org/pdf/2310.08560.pdf) paper. This system mirrors operating systems' memory hierarchies, enhancing large language models' (LLMs) ability to handle extensive contexts and complex conversations. The memory system in BondAI consists of: + +- **Core Memory**: Directly integrated into the agent's system prompt, this memory system provides immediate access to essential, current information relevant to ongoing tasks but is limited in size. + +- **Conversation Memory**: Captures the complete history of conversational interactions, allowing agents to use keyword search to reference past dialogues. + +- **Archival Memory**: Effectively limitless in size, it stores extensive historical data and information. Using semantic search, enabled by the `faiss` library, Archival Memory allows agents to easily access extremely large datasets via what is effectively an implicit RAG pipeline. + +All of these memory systems are automatically managed by the **MemoryManager** class which automatically equips BondAI agents with the necessay tools for searching and editing their memory systems. Additionally, the **MemoryManager** is responsible for updating the Agent's system prompt to ensure the appopriate information is included. diff --git a/website/docs/agent-memory/archival-memory.md b/website/docs/agent-memory/archival-memory.md new file mode 100644 index 0000000..b3a7f5e --- /dev/null +++ b/website/docs/agent-memory/archival-memory.md @@ -0,0 +1,117 @@ +--- +sidebar_position: 3 +--- + +# Archival Memory + +Archival Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), represents an advanced memory layer that enables semantic search over a virtually infinite memory space. It utilizes embeddings and the faiss library to store and retrieve large volumes of data, making it particularly suitable for extensive historical information, comprehensive data sets, and long-term memory retention. This memory layer allows BondAI agents to access information beyond the immediate conversation or core memory. + +# ArchivalMemoryDataSource +**bondai.memory.ArchivalMemoryDataSource** + +The ArchivalMemoryDataSource class is an abstract base class defining the interface for archival memory. It allows for the insertion of content and provides a semantic search mechanism to retrieve relevant information based on query embeddings. + +``` +class ArchivalMemoryDataSource(ABC): + @property + @abstractmethod + def size(self) -> int: + pass + + @abstractmethod + def insert(self, content: str): + pass + + @abstractmethod + def insert_bulk(self, content: List[str]): + pass + + @abstractmethod + def search(self, query: str, page: int = 0) -> List[str]: + pass + + @abstractmethod + def clear(self): + pass +``` + + +### Key Features + +- **Semantic Search**: Leverages embeddings for deep semantic search, offering precise and relevant results. +- **Vast Memory Capacity**: Suitable for large-scale data storage, effectively handling extensive information. +- **Dynamic Data Management**: Supports insertion, bulk insertion, and deletion of memory content. + + +# InMemoryArchivalMemoryDataSource +**bondai.memory.InMemoryArchivalMemoryDataSource** + +The InMemoryArchivalMemoryDataSource class provides an in-memory implementation of ArchivalMemoryDataSource. This variant is designed for temporary storage and fast access to archival data, primarily used in testing or non-persistent applications. + +``` +class InMemoryArchivalMemoryDataSource(ArchivalMemoryDataSource): + def __init__(self, embedding_model: EmbeddingModel | None = None, page_size=10): + ... +``` + +### Usage Example + +```python +from bondai.memory.archival.datasources import InMemoryArchivalMemoryDataSource +from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames + +# Initialize an In-Memory Archival Memory Data Source +in_memory_archival = InMemoryArchivalMemoryDataSource( + embedding_model=OpenAIEmbeddingModel(OpenAIModelNames.TEXT_EMBEDDING_ADA_002) +) + +# Insert and search content +in_memory_archival.insert("Temporary archival data") +results = in_memory_archival.search("archival data") +print(results) +``` + +### Parameters + +- **embedding_model**: (EmbeddingModel): Model used for creating content embeddings. +- **page_size (int)**: Number of search results returned per page. + + +# PersistentArchivalMemoryDataSource +**bondai.memory.PersistentArchivalMemoryDataSource** + +PersistentArchivalMemoryDataSource is a concrete implementation of ArchivalMemoryDataSource. It stores data persistently, ensuring the archival memory is retained across sessions. + +``` +class PersistentArchivalMemoryDataSource(ArchivalMemoryDataSource): + def __init__( + self, + file_path: str = "./.memory/archival-memory.json", + embedding_model: EmbeddingModel | None = None, + page_size=10, + ): + ... +``` + +### Usage Example + +```python +from bondai.memory.archival.datasources import PersistentArchivalMemoryDataSource +from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames + +# Initialize a Persistent Archival Memory Data Source +archival_memory = PersistentArchivalMemoryDataSource( + embedding_model=OpenAIEmbeddingModel(OpenAIModelNames.TEXT_EMBEDDING_ADA_002) +) + +# Insert and search content +archival_memory.insert("Historical data on global trends") +results = archival_memory.search("global trends") +print(results) +``` + +### Parameters + +- **file_path (str)**: File path for storing archival memory data. +- **embedding_model (EmbeddingModel)**: Model used for creating content embeddings. +- **page_size (int)**: Number of search results returned per page. diff --git a/website/docs/agent-memory/conversation-memory.md b/website/docs/agent-memory/conversation-memory.md new file mode 100644 index 0000000..75447ae --- /dev/null +++ b/website/docs/agent-memory/conversation-memory.md @@ -0,0 +1,117 @@ +--- +sidebar_position: 2 +--- + +# Conversation Memory + +Conversation Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), assists with maintaining a coherent and continuous dialogue with users. It stores the complete history of interactions and messages, allowing agents to reference previous conversations and provide more relevant and personalized responses. This memory layer is crucial for tasks that require recalling past interactions that may no longer fit inside the LLM context window. + +# ConversationMemoryDataSource +**bondai.memory.ConversationMemoryDataSource** + +The ConversationMemoryDataSource class is an abstract base class in BondAI that defines the interface for conversation memory management. It outlines methods for adding, removing, searching, and clearing conversation messages, facilitating dynamic interaction history management. + +``` +class ConversationMemoryDataSource(ABC): + @property + @abstractmethod + def messages(self) -> List[AgentMessage]: + pass + + @abstractmethod + def add(self, message: AgentMessage): + pass + + @abstractmethod + def remove(self, message: AgentMessage): + pass + + def remove_after(self, timestamp: datetime, inclusive: bool = True): + pass + + @abstractmethod + def search( + self, + query: str, + start_date: datetime = None, + end_date: datetime = None, + page: int = 0, + ) -> List[str]: + pass + + @abstractmethod + def clear(self): + pass +``` + + +### Key Features + +- **Dynamic Interaction History**: Stores and manages the history of conversations between agents and users. +- **Search Functionality**: Provides methods to search through past messages based on queries or date ranges. +- **Message Management**: Offers functions to add new messages, remove specific messages, and clear the entire history. + + +# InMemoryConversationMemoryDataSource +**bondai.memory.InMemoryConversationMemoryDataSource** + +The InMemoryConversationMemoryDataSource class is an implementation of ConversationMemoryDataSource that stores conversation history in memory. This variant is suitable for temporary or testing environments where persistence of conversation history is not necessary. + +``` +class InMemoryConversationMemoryDataSource(ConversationMemoryDataSource): + def __init__(self, page_size=10): + ... +``` + +### Usage Example + +```python +from bondai.memory.conversation.datasources import InMemoryConversationMemoryDataSource + +# Initialize an In-Memory Conversation Memory Data Source +conversation_memory = InMemoryConversationMemoryDataSource() + +# Add messages +conversation_memory.add(ConversationMessage(message="My dog's name is Max.")) + +# Search messages +results = conversation_memory.search('dog') +print(results) +``` + +### Parameters + +- **page_size (int)**: Determines the number of messages to return per page during search operations. + + +# PersistentConversationMemoryDataSource +**bondai.memory.PersistentConversationMemoryDataSource** + +The PersistentConversationMemoryDataSource class offers a persistent approach to storing conversation history. It saves the interaction data to a file, ensuring that conversation history is maintained even after the agent or application restarts. + +``` +class PersistentConversationMemoryDataSource(InMemoryConversationMemoryDataSource): + def __init__( + self, + file_path: str = "./.memory/conversation-memory.json", + page_size=10 + ): + ... +``` + +### Usage Example + +```python +from bondai.memory.conversation.datasources import PersistentConversationMemoryDataSource + +# Initialize a Persistent Conversation Memory Data Source +persistent_memory = PersistentConversationMemoryDataSource() + +# Adding a message automatically saves it disk +persistent_memory.add(ConversationMessage(message="Persistent message")) +``` + +### Parameters + +- **file_path (str)**: Path to the file where conversation history is stored. +- **page_size (int)**: The number of messages to display per page in search results. diff --git a/website/docs/agent-memory/core-memory.md b/website/docs/agent-memory/core-memory.md new file mode 100644 index 0000000..9a8f71a --- /dev/null +++ b/website/docs/agent-memory/core-memory.md @@ -0,0 +1,104 @@ +--- +sidebar_position: 1 +--- + +# Core Memory + +Core Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), serves as a primary memory layer for retaining and accessing critical information relevant to an agent's ongoing tasks and user interactions. It acts as a dynamic, accessible storage that agents use to maintain continuity and context in their activities. The data stored in Core Memory is always available to the Agent via the system prompt. As such, it is important that the amount of information stored in Core Memory is limited. When using a CoreMemoryDataSource with a [MemoryManager](./memory-manager.md), Agents are automatically given access to a set of tools that allow editing of their Core Memory. By leveraging Core Memory, BondAI agents can maintain an understanding of the user's needs and preferences, adapt to changing requirements, and provide more personalized and effective responses + +# CoreMemoryDataSource +**bondai.memory.CoreMemoryDataSource** + +The CoreMemoryDataSource class in BondAI is an abstract class that describes the interface for managing core memory. It defines the structure and methods that any concrete core memory data source must implement. This makes implementation of custom DataSources straightforward (i.e. Databases). + +``` +class CoreMemoryDataSource(ABC): + @property + @abstractmethod + def sections(self) -> List[str]: + pass + + @abstractmethod + def get(self, section: str) -> str: + pass + + @abstractmethod + def set(self, section: str, content: str) -> None: + pass +``` + + +## Key Features + +- **Sectioned Memory**: Divides memory into sections for organized storage and retrieval. +- **Persistent and In-Memory Variants**: Offers flexibility in memory persistence, catering to different operational needs. +- **Memory Management**: Agents can interact with Core Memory via get/set tools, allowing them to store and retrieve task-relevant data. + + +# InMemoryCoreMemoryDataSource +**bondai.memory.InMemoryCoreMemoryDataSource** + +The InMemoryCoreMemoryDataSource class in BondAI is an implementation of the CoreMemoryDataSource interface that stores core memory data in memory. This class is suitable for scenarios where persistent storage of memory data is not required, such as temporary or test environments. + +``` +class InMemoryCoreMemoryDataSource(CoreMemoryDataSource): + def __init__( + self, + sections: Dict[str, str] | None = None, + max_section_size: int = 1024 + ): +``` + +## Usage Example + +```python +from bondai.memory.core.datasources import InMemoryCoreMemoryDataSource + +# Initialize an In-Memory Core Memory Data Source +core_memory = InMemoryCoreMemoryDataSource({ + "user": "Name is George. Lives in New York. Has a dog named Max." +}) + +print(core_memory.get('user')) +``` + +## Parameters + +- **sections (Dict[str, str])**: A dictionary specifying the initial sections and their content. +- **max_section_size (int)**: The maximum size of content that can be stored in each section. + + +# PersistentCoreMemoryDataSource +**bondai.memory.PersistentCoreMemoryDataSource** + +The PersistentCoreMemoryDataSource class is a concrete implementation of CoreMemoryDataSource in BondAI that provides persistent storage for core memory data, allowing the information to be retained across different sessions and agent restarts. + +``` +class PersistentCoreMemoryDataSource(CoreMemoryDataSource): + def __init__( + self, + file_path: str = "./.memory/core-memory.json", + sections: Dict[str, str] | None = None, + max_section_size: int = 1024, + ): +``` + +## Usage Example + +```python +from bondai.memory.core.datasources import PersistentCoreMemoryDataSource + +# Initialize a Persistent Core Memory Data Source +core_memory = PersistentCoreMemoryDataSource() + +# Set and retrieve data from a specific section +core_memory.set('user', 'User information') +print(core_memory.get('user')) +``` + +## Parameters + +- **file_path (str)**: The file path where the core memory data is stored. +- **sections (Dict[str, str])**: A dictionary specifying the initial sections and their content. +- **max_section_size (int)**: The maximum size of content that can be stored in each section. + diff --git a/website/docs/agent-memory/memory-manager.md b/website/docs/agent-memory/memory-manager.md new file mode 100644 index 0000000..faa7f07 --- /dev/null +++ b/website/docs/agent-memory/memory-manager.md @@ -0,0 +1,50 @@ +--- +sidebar_position: 4 +--- + +# MemoryManager + +The MemoryManager class in BondAI is designed to orchestrate the memory management strategy across different types of memory data sources. It used to manage the memory requirements of BondAI agents, including ConversationalAgent and ReAct Agents. + +```python +class MemoryManager: + def __init__( + self, + core_memory_datasource: CoreMemoryDataSource | None = None, + conversation_memory_datasource: ConversationMemoryDataSource | None = None, + archival_memory_datasource: ArchivalMemoryDataSource | None = None, + prompt_builder: Callable[..., str] | None = None, + ): + +``` + +# Usage Example + +```python +from bondai.agents import ConversationalAgent +from bondai.memory import MemoryManager, PersistentCoreMemoryDataSource, PersistentConversationMemoryDataSource, PersistentArchivalMemoryDataSource + +# Initialize the memory manager with persistent datasources +memory_manager = MemoryManager( + core_memory_datasource=PersistentCoreMemoryDataSource(), + conversation_memory_datasource=PersistentConversationMemoryDataSource(), + archival_memory_datasource=PersistentArchivalMemoryDataSource() +) + +# Configure an Agent to use this MemoryManager +agent = ConversationalAgent(memory_manager=memory_manager) +``` + +# Key Features + +- Manages different types of memory: core, conversation, and archival. +- Provides a unified interface for memory operations across different memory types. +- Automatically manages Agent access to memory systems via LLM tools. +- Updates the Agent system prompt to ensure it's always updated with the latest information. + +# Parameters + +- **core_memory_datasource**: Instance of CoreMemoryDataSource for core memory operations. +- **conversation_memory_datasource**: Instance of ConversationMemoryDataSource for managing conversation memory. +- **archival_memory_datasource**: Instance of ArchivalMemoryDataSource for long-term memory storage and retrieval. +- **prompt_builder**: Callable for customizing memory-related prompt sections. These are dynamically inserted into the Agent system prompt at runtime. diff --git a/website/docs/agents/agents.md b/website/docs/agents/agents.md new file mode 100644 index 0000000..74c6ba5 --- /dev/null +++ b/website/docs/agents/agents.md @@ -0,0 +1,9 @@ +--- +sidebar_position: 3 +--- + +# Agents in BondAI + +Agents in BondAI, encompassing both [ConversationalAgent](./conversational-agent.md) and [ReAct Agents](./react-agent.md) both draw inspiration from recent research into LLM/AI Agent architectures. The ConversationalAgent, influenced by the [AutoGen framework](https://arxiv.org/abs/2308.08155), excels in both individual interactions and the development of Multi-Agent Systems (MAS), where multiple agents collaborate to resolve complex tasks. These systems surpass the capabilities of single-agent models in certain classes of problems, offering customizable, conversational, and human-integrated solutions across various fields like coding and decision-making. + +ReAct Agents, based on the [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/pdf/2210.03629.pdf) paper, integrate reasoning and action in LLMs. This approach allows agents to generate reasoning traces and task-specific actions, enhancing their decision-making abilities. By interfacing with external environments and data sources, ReAct Agents excel in complex problem-solving and reasoning tasks, showcasing significant advancements over traditional methods. Together, these agent types in BondAI showcase a blend of advanced AI interactions, problem-solving prowess, and application versatility. \ No newline at end of file diff --git a/website/docs/agents/conversational-agent.md b/website/docs/agents/conversational-agent.md new file mode 100644 index 0000000..f6e4146 --- /dev/null +++ b/website/docs/agents/conversational-agent.md @@ -0,0 +1,73 @@ +--- +sidebar_position: 2 +--- + +# Conversational Agents + + + +# ConversationalAgent +**bondai.agents.ConversationalAgent** + +The ConversationalAgent in BondAI is inspired by the AutoGen framework as described in the paper [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155). The ConversationalAgent class in BondAI supports single agent interactions with an end user as well as the development of complex Multi-Agent Systems (MAS). MAS are a novel approach to developing LLM applications by employing multiple agents that communicate with each other to solve tasks. These agents are highly customizable, can engage in conversations, and allow for seamless human participation. This multi-agent system, unlike traditional models that rely on a single LLM agent, enables a more diverse and efficient approach to task resolution. It utilizes the strengths of LLMs while addressing their limitations through collaborative agent interactions and human input. This approach is particularly beneficial for a wide range of applications, including coding, mathematics, and online decision-making, by leveraging the power of multiple agents for complex problem-solving and improved reasoning capabilities + +```python +class ConversationalAgent(Agent, ConversationMember): + def __init__( + self, + llm: LLM | None = None, + ... + enable_conversation_tools: bool = True, + enable_conversational_content_responses: bool = True, + enable_exit_conversation: bool = True, + quiet: bool = True, + ): + +``` + +## Usage Example + +```python +from bondai.agents import ConversationalAgent +from bondai.models.openai import OpenAILLM, OpenAIModelNames + +# Initialize the conversational agent +conv_agent = ConversationalAgent(llm=OpenAILLM(OpenAIModelNames.GPT4_0613)) + +# Configure and run the conversational agent +response = conv_agent.send_message("Hello, how can I assist you today?") +``` + +## Key Features + +- Supports response streaming +- Specializes in conversational interactions. +- Supports asynchronous messaging. +- Extends Agent's capabilities with conversation-specific tools. +- Customizable persona and instructions for interactions (embedded in system prompt). +- Event-driven architecture with additional conversation-specific events. + +## Parameters + +- Inherits all parameters from [Agent](./react-agent.md). +- **enable_conversation_tools**: Flag to enable conversation-specific tools. +- **enable_conversational_content_responses**: Flag to enable responses based on conversational content. +- **enable_exit_conversation**: Flag to enable the functionality to exit a conversation. +- **quiet**: Controls verbosity, inherited from Agent. + +## Methods + +- Inherits all methods from [Agent](./react-agent.md). +- **send_message_async(message: str | ConversationMessage, sender_name: str = 'user', group_members: List[ConversationMember] | None = None, group_messages: List[AgentMessage] | None = None, max_attempts: int = 3, require_response: bool = True)**: Sends a message asynchronously. Allows specification of the message, sender name, group members, group messages, maximum send attempts, and whether a response is required. +- **send_message(message: str | ConversationMessage, sender_name: str = 'user', group_members: List[ConversationMember] | None = None, group_messages: List[AgentMessage] | None = None, max_attempts: int = 3, require_response: bool = True)**: Sends a message synchronously and processes the response. Accepts the same parameters as send_message_async. + +## Conversational Events + +The ConversationalAgent class in BondAI surfaces several key events relevant to conversation handling. These events provide hooks for custom behaviors or additional processing during different stages of a conversation. Here's a list of these events: + +- **message_received**: Triggered when a message is received by the agent. This event can be used to execute actions upon the receipt of a new message. +- **message_completed**: Occurs when the agent successfully processes and completes a message. It is useful for post-processing or logging after a message exchange. +- **message_error**: Fired when there is an error in processing a message. This event allows for handling exceptions or errors that occur during message processing. +- **conversation_exited**: Triggered when the agent exits a conversation. This can be used to clean up or reset the agent's state at the end of a conversation. + +These events enhance the ConversationalAgent's capabilities, allowing for a more dynamic and responsive conversational flow, and providing opportunities for custom handling and integration in conversational applications. \ No newline at end of file diff --git a/website/docs/agents/react-agent.md b/website/docs/agents/react-agent.md new file mode 100644 index 0000000..4bcd49b --- /dev/null +++ b/website/docs/agents/react-agent.md @@ -0,0 +1,102 @@ +--- +sidebar_position: 1 +--- + +# ReAct Agents + +ReAct Agents in BondAI are based on research findings in the [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/pdf/2210.03629.pdf) paper. The ReAct architecture bridges reasoning and actions in large language models (LLMs) via the use of tools that are able to interact with the Agent's environment. This methodology enables LLMs to generate both reasoning traces and task-specific actions in an intertwined manner, enhancing the interaction between the two. Reasoning traces aid the model in inducing, tracking, and updating action plans and handling exceptions, while the action component allows the model to interface with external sources like knowledge bases for additional information. ReAct demonstrates improved performance in various tasks, notably overcoming issues in chain-of-thought reasoning and outperforming other methods in decision-making benchmarks. + +# Agent +**bondai.agents.Agent** + +ReAct Agents are implemented using the Agent class in BondAI. + +```python +class Agent: + def __init__( + self, + llm: LLM | None = None, + embedding_model: EmbeddingModel | None = None, + tools: List[Tool] | None = None, + quiet: bool = True, + allowed_events: List[str] | None = None, + messages: List[AgentMessage] | None = None, + system_prompt_sections: List[Callable[[], str]] | None = None, + system_prompt_builder: Callable[..., str] = None, + message_prompt_builder: Callable[..., str] = None, + memory_manager: MemoryManager | None = None, + max_context_length: int = None, + max_context_pressure_ratio: float = 0.8, + max_tool_retries: int = 3, + max_tool_response_tokens=2000, + enable_context_compression: bool = False, + enable_final_answer_tool: bool = True, + ): +``` + +## Usage Example + +```python +from bondai.agents import Agent +from bondai.models.openai import OpenAILLM, OpenAIModelNames + +# Initialize the agent +agent = Agent(llm=OpenAILLM(OpenAIModelNames.GPT4_0613)) + +# Add tools and configure the agent +agent.add_tool(custom_tool) + +# Run the agent for a specific task +result = agent.run(task="Answer customer queries") + +``` + +## Key Features +- Event-driven architecture. +- Integration with large language models (LLMs). +- Integrated embedding models (semantic search). +- Tool management and execution. +- Context and message handling. +- Memory management. +- Context compression capabilities. + +## Parameters + +- **llm**: Instance of an LLM implementation (i.e. OpenAI GPT-N) +- **embedding_model**: Embedding model instance for handling embeddings. +- **tools**: List of Tool instances that the agent can use. +- **quiet**: Boolean flag for silent operation. Defaults to 'True'. +- **messages**: List of AgentMessage instances representing the agent's message memory. +- **system_prompt_sections**: List of callables that return sections of the system prompt. These are dynamically injected into the system prompt at runtime. +- **system_prompt_builder**: Callable for building the system prompt. +- **message_prompt_builder**: Callable for formatting messages. +- **memory_manager**: Instance of MemoryManager for memory management. +- **max_context_length**: Maximum allowed context length. This defaults to 95% of the LLM's maximum context size. +- **max_context_pressure_ratio**: Maximum context pressure allowed before context compression occurs. This defaults to 80% of the `max_content_length`. +- **max_tool_retries**: Maximum number of retries for tool execution. +- **max_tool_response_tokens**: Maximum number of tokens allowed for tool outputs. This defaults to 2000. +- **enable_context_compression**: Flag to enable/disable context compression. +- **enable_final_answer_tool**: Flag to include the FinalAnswerTool by default which allows the Agent to exit once it has completed it's task. + +## Methods + +- **id**: Property returning the unique identifier of the agent. No parameters. +- **status**: Property indicating the current status of the agent. No parameters. +- **tools**: Property listing the tools available to the agent. No parameters. +- **clear_messages**: Clears the agent's message history. No parameters. +- **add_tool(tool: Tool)**: Adds a tool to the agent's toolset. +- **remove_tool(tool_name: str)**: Removes a tool from the agent's toolset based on the tool_name. +- **to_dict**: Converts the agent's state into a dictionary. No parameters. +- **save_state**: Saves the current state of the agent. Optional parameter file_path: str for specifying the file path to save the state. +- **load_state(state: Dict)**: Loads the agent's state from a state dictionary. +- **run(task: str, max_steps: int = None, max_budget: float = None)**: Executes the agent's primary functionality for a task with optional parameters max_steps and max_budget. +- **run_async(task: str, max_steps: int = None, max_budget: float = None)**: Starts the agent's execution in a separate thread for a task with optional parameters max_steps and max_budget. +- **stop(timeout=10)**: Gracefully stops the agent's execution with a timeout duration in seconds. + +## Agent Events + +- **tool_selected**: Occurs when a tool within the agent's toolkit is selected for use. It allows for actions or logging upon tool activation. +- **tool_error**: Fired when an error occurs during the execution of a tool. This event facilitates error handling and debugging of tool-related issues. +- **tool_completed**: Triggered upon the successful completion of a tool's operation. Useful for post-processing steps or confirmation of task completion. +- **streaming_content_updated**: This is fired as new data chunks arrive from the LLM for a content response. This is very useful for streaming responses to an end user. +- **streaming_function_udpated**: This is fired as new data chunks are receied from the LLM for a function selection. This is allows for tool data logging without waiting for the LLM to finish it's response. \ No newline at end of file diff --git a/website/docs/api-spec/_category_.json b/website/docs/api-spec/_category_.json index 625e6ca..3c9e2a5 100644 --- a/website/docs/api-spec/_category_.json +++ b/website/docs/api-spec/_category_.json @@ -1,6 +1,6 @@ { "label": "API Specification", - "position": 6, + "position": 7, "link": { "type": "generated-index" } diff --git a/website/docs/azure.md b/website/docs/azure.md index e695525..cd668b4 100644 --- a/website/docs/azure.md +++ b/website/docs/azure.md @@ -1,5 +1,5 @@ --- -sidebar_position: 8 +sidebar_position: 11 --- # Azure OpenAI Services diff --git a/website/docs/cli.md b/website/docs/cli.md index 54cd9c1..b58c6d8 100644 --- a/website/docs/cli.md +++ b/website/docs/cli.md @@ -1,5 +1,5 @@ --- -sidebar_position: 3 +sidebar_position: 9 --- import googleLogo from './img/google-logo.png' diff --git a/website/docs/docker.md b/website/docs/docker.md index e3598d0..4e36611 100644 --- a/website/docs/docker.md +++ b/website/docs/docker.md @@ -1,8 +1,8 @@ --- -sidebar_position: 7 +sidebar_position: 10 --- -# BondAI With Docker +# Using Docker ## BondAI Docker Image diff --git a/website/docs/examples/_category_.json b/website/docs/examples/_category_.json index ba98efc..580c90d 100644 --- a/website/docs/examples/_category_.json +++ b/website/docs/examples/_category_.json @@ -1,6 +1,6 @@ { "label": "Examples", - "position": 5, + "position": 8, "link": { "type": "generated-index" } diff --git a/website/docs/multi-agent-systems/examples.md b/website/docs/multi-agent-systems/examples.md new file mode 100644 index 0000000..75d8a9c --- /dev/null +++ b/website/docs/multi-agent-systems/examples.md @@ -0,0 +1,110 @@ +--- +sidebar_position: 1 +--- + +# Multi-Agent Architectures + +## Example 1: Flat Multi-Agent Architecture + +In this example architecture, multiple ConversationalAgents are configured, each specializing in different aspects of customer support (e.g., technical, billing, general queries). They are managed under a GroupConversation system. This is an example of a flat conversational architecture where all agents are able to communicate directly with each other. + +```python +from bondai.agents import ConversationalAgent +from bondai.agents.group_chat import GroupConversation + +# Initialize multiple agents for different support aspects +tech_support = ConversationalAgent(name="TechSupport") +billing_support = ConversationalAgent(name="BillingSupport") +general_support = ConversationalAgent(name="GeneralSupport") + +# Create a group conversation with these agents +support_team = GroupConversation( + conversation_members=[ + tech_support, + billing_support, + general_support + ] +) + +# Code to route user queries to the appropriate agent +user_query = "How do I reset my password?" +response = support_team.send_message(tech_support.name, user_query) +print(response.message) +``` + +## Example 2: Hierarchical Conversational Architecture + +This example illustrates a hierarchical conversational architecture, characterized by structured agent interactions within a group. In this setup, the GroupConversation is configured using TeamConversationConfig to create distinct teams within the conversation. + +- **Agent Configuration**: Three ConversationalAgent's are initialized—`team_leader`, `coding_expert`, and `design_expert`. Each agent has a specific role, where the team leader can coordinate and delegate tasks between the two experts. + +- **Group Setup**: The GroupConversation is organized into teams: one team includes the `team_leader` and `coding_expert`, and another consists of the `team_leader` and `design_expert`. This configuration ensures that the team leader can communicate with both experts, but direct communication between the coding and design experts is not possible. + +- **Conversation Dynamics**: When the conversation starts, a message is sent to the `team_leader`. The hierarchical structure allows the team leader to relay information to the experts, orchestrate their collaboration, and provide combined insights from both agents. + +```python +from bondai.agents import ConversationalAgent +from bondai.agents.group_chat import GroupConversation, TeamConversationConfig + +# Initialize team agents +team_leader = ConversationalAgent(name="TeamLeader") +coding_expert = ConversationalAgent(name="CodingExpert") +design_expert = ConversationalAgent(name="DesignExpert") + +# Create a group conversation +problem_solving_team = GroupConversation( + conversation_config=TeamConversationConfig( + [team_leader, coding_expert], + [team_leader, design_expert] + ) +) + +# Simulate a problem-solving session +problem_description = "Develop a user-friendly app interface." +response = problem_solving_team.send_message(team_leader.name, problem_description) +print(response.message) +``` + + +## Example 3: Multi-Team, Heirarchical Conversational Architecture + +This example showcases a complex hierarchical architecture, involving multiple agents organized into distinct teams, each with specialized roles. In this model, communication channels are both vertical (within each team) and horizontal (across teams via team leaders). + +- **Agent Configuration**: The architecture involves a `product_manager`, two team leaders (`eng_leader` and `design_leader`), and various experts (`coding_expert`, `qa_expert`, `visual_designer`, `ux_designer`). The `product_manager` serves as a central figure overseeing the entire project. + +- **Group Setup**: This configuration organizes the agents into three teams: an **engineering team** (led by `eng_leader`), a **design team** (led by `design_leader`), and a **management team** consisting of the `product_manager` and both team leaders. + +- **Conversation Dynamics**: When a conversation is initiated, a message is sent to the `product_manager`, the product manager can facilitate the conversation by engaging with both team leaders, who in turn can collaborate with their respective team members. + + +```python +from bondai.agents import ConversationalAgent +from bondai.agents.group_chat import GroupConversation, TeamConversationConfig + +# Initialize product manager agent +product_manager = ConversationalAgent(name="ProductManager") + +# Initialize team 1 agents +eng_leader = ConversationalAgent(name="EngTeamLeader") +coding_expert = ConversationalAgent(name="CodingExpert") +qa_expert = ConversationalAgent(name="QAExpert") + +# Initialize team 2 agents +design_leader = ConversationalAgent(name="DesignTeamLeader") +visual_designer = ConversationalAgent(name="VisualDesigner") +ux_designer = ConversationalAgent(name="UXDesigner") + +# Create a group conversation +problem_solving_team = GroupConversation( + conversation_config=TeamConversationConfig( + [eng_leader, coding_expert, qa_expert], + [design_leader, visual_designer, ux_designer], + [product_manager, eng_leader, design_leader] + ) +) + +# Simulate a problem-solving session +problem_description = "Develop a user-friendly app interface." +response = problem_solving_team.send_message(product_manager.name, problem_description) +print(response.message) +``` \ No newline at end of file diff --git a/website/docs/multi-agent-systems/group-conversation.md b/website/docs/multi-agent-systems/group-conversation.md new file mode 100644 index 0000000..acce786 --- /dev/null +++ b/website/docs/multi-agent-systems/group-conversation.md @@ -0,0 +1,65 @@ +--- +sidebar_position: 2 +--- + + +# GroupConversation + +The GroupConversation class in BondAI facilitates the creation and management of conversations involving multiple agents, inspired by advanced multi-agent system research. + + +```python +class GroupConversation(EventMixin, Runnable): + def __init__( + self, + conversation_members: List[ConversationMember] | None = None, + conversation_config: BaseGroupConversationConfig | None = None, + filter_recipient_messages: bool = False, + ): +``` + +## Usage Example + +```python +from bondai.agents import ConversationalAgent +from bondai.agents.group_chat import GroupConversation + +# Initialize conversation members +members = [ConversationalAgent(...), ConversationalAgent(...)] + +# Create a group conversation +group_conversation = GroupConversation(conversation_members=members) + +# Conduct a group conversation +group_conversation.send_message("MemberName", "Hello, let's discuss.") +``` + +## Key Features +- Event-driven architecture. +- Manages multi-agent conversations. +- Supports dynamic interaction among multiple conversation participants. +- Facilitates complex conversational flows and decision-making processes. +- Allows for conversations with both predefined and dynamically determined members. + + +## Parameters + +- **conversation_members**: List of ConversationMember instances (i.e. UserProxy or ConversationalAgent) participating in the conversation. +- **conversation_config**: Configuration settings for managing group conversation dynamics. +- **filter_recipient_messages**: Boolean flag to determine whether the message history shown to each conversation member is inclusive of the entire group conversation or just the messages sent to/from that conversation member. + +## Methods + +- **id**: Property returning the unique identifier of the group conversation. No parameters. +- **status**: Property indicating the current status of the group conversation. No parameters. +- **members**: Property listing the conversation members participating in the group. No parameters. +- **remove_messages_after(timestamp: datetime, inclusive: bool = True)**: Removes messages from the conversation history that occurred after a specific timestamp. +- **send_message(recipient_name: str, message: str, sender_name: str = USER_MEMBER_NAME, require_response: bool = True)**: Sends a message within the group conversation. +- **reset_memory**: Clears the message history for all conversation members. + +## Group Conversation Events + +- **message_received**: Triggered when a message is received by a member of the conversation. +- **message_error**: Fired when an error occurs in message processing within the group. +- **message_completed**: Occurs when a message has been successfully processed by a member of the conversation. +- **conversation_exited**: Triggered when a member exits the conversation. diff --git a/website/docs/multi-agent-systems/multi-agent-systems.md b/website/docs/multi-agent-systems/multi-agent-systems.md new file mode 100644 index 0000000..915d68d --- /dev/null +++ b/website/docs/multi-agent-systems/multi-agent-systems.md @@ -0,0 +1,15 @@ +--- +sidebar_position: 4 +--- + +# Multi-Agent Systems + +Multi-Agent Systems (MAS) in BondAI represent a sophisticated approach to developing interactive, collaborative AI applications. At the core of MAS in BondAI are three primary classes: [ConversationalAgent](../agents/conversational-agent.md), [GroupConversation](./group-conversation.md), and [TeamConversationConfig](./team-conversation-config.md). + +**ConversationalAgent** acts as the fundamental building block of MAS, embodying individual agents with specific roles or expertise. These agents can engage in dialogues, process information, and perform tasks based on their programming and interactions. + +**GroupConversation** is the framework that orchestrates communication among multiple agents. It allows various ConversationalAgents to interact within a shared conversational space, enabling information exchange, collaborative problem-solving, and decision-making processes. This class manages the dynamics of the conversation, ensuring coherent interactions among all participating agents. + +**TeamConversationConfig** is crucial for structuring the conversation architecture within a MAS. It defines how agents are grouped and how they can communicate with each other. This configuration can set up hierarchical structures, dividing agents into teams or layers, and determining the flow of information between them. It plays a pivotal role in managing complex conversations where different agents contribute distinct insights or skills towards a common goal. + +Together, these classes enable the development of complex MAS architectures in BondAI, where agents can work in unison or independently, mimicking real-world team dynamics and collaborative environments. This system opens up possibilities for applications requiring nuanced interactions and emergent intelligence. \ No newline at end of file diff --git a/website/docs/multi-agent-systems/team-conversation-config.md b/website/docs/multi-agent-systems/team-conversation-config.md new file mode 100644 index 0000000..62bc73c --- /dev/null +++ b/website/docs/multi-agent-systems/team-conversation-config.md @@ -0,0 +1,49 @@ +--- +sidebar_position: 3 +--- + + +# TeamConversationConfig + +The TeamConversationConfig class in BondAI structures the conversations in a Multi-Agent System, enabling complex conversational patterns among different agents. + + +```python +class TeamConversationConfig(BaseGroupConversationConfig): + def __init__(self, *args: List[ConversationMember]): + ... +``` + +## Usage Example + +```python +from bondai.agents import ConversationalAgent +from bondai.agents.group_chat import GroupConversation, TeamConversationConfig + +# Initialize team 1 +agent1 = ConversationalAgent(...) +agent2 = ConversationalAgent(...) +team1 = [agent1, agent2] + +# Initialize team 2 +agent3 = ConversationalAgent(...) +agent4 = ConversationalAgent(...) +team2 = [agent3, agent4] + +# Allow agent1 and agent 3 to communicate +team3 = [agent1, agent3] + +# Configure teams +team_config = TeamConversationConfig(team1, team2, team3) + +# Use in GroupConversation +group_conversation = GroupConversation(conversation_config=team_config) +``` + + +## Key Features + +- Facilitates hierarchical and structured team-based conversations. +- Allows the creation of any number of teams with specific member agents. +- Supports dynamic interactions within and across teams. +- Enhances the control over communication flow in multi-agent setups. diff --git a/website/docs/tools/_category_.json b/website/docs/tools/_category_.json index 9f0b121..c540408 100644 --- a/website/docs/tools/_category_.json +++ b/website/docs/tools/_category_.json @@ -1,6 +1,6 @@ { "label": "Tools", - "position": 4, + "position": 6, "link": { "type": "generated-index" }