Skip to content

Commit

Permalink
Simplifing Detect decorator. Added more documentation. (#20)
Browse files Browse the repository at this point in the history
Adding the Aimon Rely README, images, the postman collection, a simple client and examples.

A few small changes for error handling in the client and the example application.

Getting the Aimon API key from the streamlit app

updating README

Updating langchain example gif

Updating API endpoint

Adding V2 API with support for conciseness, completeness and toxicity checks (#1)

* Adding V2 API with support for conciseness, completeness and toxicity checks.

* Removing prints and updating config for the example application.

* Updating README

---------

Updating postman collection

Fixed the simple aimon client's handling of batch requests.

Updated postman collection. Added support for a user_query parameter in the input data dictionary.

Updating readme

Fixed bug in the example app

Uploading client code

Adding more convenience APIs

Fixing bug in create_dataset

Added Github actions config to publish to PyPI. Cleaned up dependencies and updated documentation.

Fixing langchain example

Fixing doc links

Formatting changes

Changes for aimon-rely

* Adding instruction adherence and hallucination v0.2 to the client

Updating git ignore

Adding more to gitignore

Removing .idea files

* Fixing doc string

* Updating documentation

* Updating Client to use V3 API

* Fixing test

* Updating tests

* Updating documentation in the client

* Adding .streamlit dir to .gitignore

* initial version of decorators for syntactic sugar

* A few more changes

* updating analyze and detect decorators

* Adding new notebooks

* Fixing bug in analyze decorator

* Updating Detect decorator to make it simpler. Adding Metaflow example. Adding documentation for the chatbot.

---------

Co-authored-by: Preetam Joshi <preetam@aimon.ai>
  • Loading branch information
pjoshi30 and Preetam Joshi authored Jul 29, 2024
1 parent 642c883 commit 44342ad
Show file tree
Hide file tree
Showing 10 changed files with 181 additions and 109 deletions.
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,8 @@ lib64
# Installer logs
pip-log.txt

.streamlit/*
.streamlit
.streamlit/
.metaflow
.metaflow/
.ipynb_checkpoints
2 changes: 1 addition & 1 deletion aimon/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,5 +80,5 @@
# Some of our exported symbols are builtins which we can't set attributes for.
pass

from .decorators.detect import DetectWithContextQuery, DetectWithContextQueryInstructions, DetectWithQueryFuncReturningContext, DetectWithQueryInstructionsFuncReturningContext
from .decorators.detect import Detect
from .decorators.analyze import Analyze, Application, Model
114 changes: 27 additions & 87 deletions aimon/decorators/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,105 +3,45 @@
from .common import AimonClientSingleton


class DetectWithQueryFuncReturningContext(object):
class Detect:
DEFAULT_CONFIG = {'hallucination': {'detector_name': 'default'}}

def __init__(self, api_key=None, config=None):
def __init__(self, values_returned, api_key=None, config=None):
"""
:param values_returned: A list of values in the order returned by the decorated function
Acceptable values are 'generated_text', 'context', 'user_query', 'instructions'
"""
self.client = AimonClientSingleton.get_instance(api_key)
self.config = config if config else self.DEFAULT_CONFIG
self.values_returned = values_returned
if self.values_returned is None or len(self.values_returned) == 0:
raise ValueError("Values returned by the decorated function must be specified")

def __call__(self, func):
@wraps(func)
def wrapper(user_query, *args, **kwargs):
result, context = func(user_query, *args, **kwargs)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)

if result is None or context is None:
raise ValueError("Result and context must be returned by the decorated function")
# Handle the case where the result is a single value
if not isinstance(result, tuple):
result = (result,)

data_to_send = [{
"user_query": user_query,
"context": context,
"generated_text": result,
"config": self.config
}]
# Create a dictionary mapping output names to results
result_dict = {name: value for name, value in zip(self.values_returned, result)}

aimon_response = self.client.inference.detect(body=data_to_send)[0]
return result, context, aimon_response

return wrapper


class DetectWithQueryInstructionsFuncReturningContext(DetectWithQueryFuncReturningContext):
def __call__(self, func):
@wraps(func)
def wrapper(user_query, instructions, *args, **kwargs):
result, context = func(user_query, instructions, *args, **kwargs)

if result is None or context is None:
raise ValueError("Result and context must be returned by the decorated function")

data_to_send = [{
"user_query": user_query,
"context": context,
"generated_text": result,
"instructions": instructions,
"config": self.config
}]

aimon_response = self.client.inference.detect(body=data_to_send)[0]
return result, context, aimon_response

return wrapper


# Another class but does not include instructions in the wrapper call
class DetectWithContextQuery(object):
DEFAULT_CONFIG = {'hallucination': {'detector_name': 'default'}}

def __init__(self, api_key=None, config=None):
self.client = AimonClientSingleton.get_instance(api_key)
self.config = config if config else self.DEFAULT_CONFIG

def __call__(self, func):
@wraps(func)
def wrapper(context, user_query, *args, **kwargs):
result = func(context, user_query, *args, **kwargs)
aimon_payload = {}
if 'generated_text' in result_dict:
aimon_payload['generated_text'] = result_dict['generated_text']
if 'context' in result_dict:
aimon_payload['context'] = result_dict['context']
if 'user_query' in result_dict:
aimon_payload['user_query'] = result_dict['user_query']
if 'instructions' in result_dict:
aimon_payload['instructions'] = result_dict['instructions']

if result is None:
raise ValueError("Result must be returned by the decorated function")

data_to_send = [{
"context": context,
"user_query": user_query,
"generated_text": result,
"config": self.config
}]
data_to_send = [aimon_payload]

aimon_response = self.client.inference.detect(body=data_to_send)[0]
return result, aimon_response
return result + (aimon_response,)

return wrapper


class DetectWithContextQueryInstructions(DetectWithContextQuery):
def __call__(self, func):
@wraps(func)
def wrapper(context, user_query, instructions, *args, **kwargs):
result = func(context, user_query, instructions, *args, **kwargs)

if result is None:
raise ValueError("Result must be returned by the decorated function")

data_to_send = [{
"context": context,
"user_query": user_query,
"generated_text": result,
"instructions": instructions,
"config": self.config
}]

aimon_response = self.client.inference.detect(body=data_to_send)[0]
return result, aimon_response

return wrapper

36 changes: 36 additions & 0 deletions examples/chatbot/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# AIMon Chatbot Demo

This is a simple chatbot demo that uses AIMon to check responses to user queries.
The chatbot is built using LLamaIndex. This chatbot application intentionally crawls a [single webpage](http://paulgraham.com/worked.html).
This way we can demonstrate how AIMon's hallucination detector works when the chatbot is asked questions that are not
related to the webpage, in which case it is likely to answer out of its own learned knowledge.

## Setup

### Installation

Install the required packages from the `requirements.txt` file specified in this directory.

```bash
pip install -r requirements.txt
```

### API Keys

You will need to specify AIMon and OpenAI API keys in a `secrets.toml` file inside the
`.streamlit` directory.

```toml
openai_key=YOUR_OPENAI_API
aimon_api_key=YOUR_AIMON_API
```

### Running the Chatbot

The chatbot is a streamlit app. You can run it using this command:

```bash
streamlit run chatbot.py
```


Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,17 @@
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.readers.web import SimpleWebPageReader
from aimon import DetectWithQueryInstructionsFuncReturningContext
from aimon import Detect
from aimon import AuthenticationError
import logging
import os

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
st.set_page_config(page_title="AIMon Chatbot Demo", page_icon="🦙", layout="centered", initial_sidebar_state="auto",
st.set_page_config(page_title="AIMon Chatbot Demo", layout="centered", initial_sidebar_state="auto",
menu_items=None)

aimon_config = {'hallucination': {'detector_name': 'default'}, 'instruction_adherence': {'detector_name': 'default'}}
detect = DetectWithQueryInstructionsFuncReturningContext(st.secrets.aimon_api_key, aimon_config)
detect = Detect(values_returned=['context', 'user_query', 'instructions', 'generated_text'], api_key=st.secrets.aimon_api_key, config=aimon_config)


@st.cache_resource(show_spinner=False)
Expand Down Expand Up @@ -85,7 +85,7 @@ def split_into_paragraphs(text):
def am_chat(usr_prompt, instructions):
response = st.session_state.chat_engine.chat(usr_prompt)
context = get_source_docs(response)
return response.response, context
return context, usr_prompt, instructions, response.response


def execute():
Expand Down Expand Up @@ -136,7 +136,7 @@ def execute():
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
if cprompt:
response, context, am_res = am_chat(cprompt, instructions)
context, usr_prompt, instructions, response, am_res = am_chat(cprompt, instructions)
message = {"role": "assistant", "content": response}
am_res_json = am_res.to_json()
st.write(response)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
llama-index
llama-index-readers-web
streamlit
openai
aimon

34 changes: 34 additions & 0 deletions examples/metaflow/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# AIMon Metaflow Demo

This is a simple Metaflow flow that generates a summary of an input document.
It uses AIMon to check the quality of the generated summary.
The summarizer is built using Langchain.

## Setup

### Installation

Install the required packages from the `requirements.txt` file specified in this directory.

```bash
pip install -r requirements.txt
```

### API Keys

You will need to specify AIMon and OpenAI API keys as part of their respective environment variables.

```bash
export OPENAI_KEY=YOUR_OPENAI_API
export AIMON_API_KEY=YOUR_AIMON_API
```

### Running the flow

The flow can be run using the following command:

```bash
python summarization_flow.py run
```


6 changes: 6 additions & 0 deletions examples/metaflow/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
langchain
langchain-community
metaflow
aimon
openai

60 changes: 60 additions & 0 deletions examples/metaflow/summarization_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from metaflow import FlowSpec, step
from langchain_community.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
from aimon import Detect
import os

detect = Detect(values_returned=['context', 'generated_text'], config={"hallucination": {"detector_name": "default"}})

class SummarizeFlow(FlowSpec):

@step
def start(self):
# Load your document here
self.document = """
Your document text goes here. Replace this text with the actual content you want to summarize.
"""

context, summary, aimon_res = self.summarize(self.document)

# Print the summary
print("Summary:")
print(summary)


# Print the AIMon result
print("AIMon hallucination detection:")
print(aimon_res.hallucination)

self.next(self.end)

@detect
def summarize(self, context):
# Split the source text
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(context)

# Create Document objects for the texts
docs = [Document(page_content=t) for t in texts[:3]]

openai_key = os.getenv("OPENAI_KEY")


# Initialize the OpenAI model
llm = OpenAI(temperature=0, api_key=openai_key)

# Create the summarization chain
summarize_chain = load_summarize_chain(llm)

# Summarize the document
return context, summarize_chain.run(docs)

@step
def end(self):
print("Flow completed.")

if __name__ == "__main__":
SummarizeFlow()

Original file line number Diff line number Diff line change
Expand Up @@ -90,42 +90,32 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 11,
"id": "bcdddfa8-43c7-446a-9337-3ad0f16a015e",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/preetamjoshi/projects/aimon/pj_aimon_rely/examples/chbt/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The class `OpenAI` was deprecated in LangChain 0.0.10 and will be removed in 0.3.0. An updated version of the class exists in the langchain-openai package and should be used instead. To use it run `pip install -U langchain-openai` and import as `from langchain_openai import OpenAI`.\n",
" warn_deprecated(\n",
"/Users/preetamjoshi/projects/aimon/pj_aimon_rely/examples/chbt/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.\n",
" warn_deprecated(\n"
]
},
{
"data": {
"text/plain": [
"[(' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be beneficial for developers. This new version includes features like async support and improved error handling.',\n",
"[(' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be valuable for developers. This new version includes features like async support and improved error handling.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" ('\\n\\nTo configure the Acme python client, environment variables must be set up and dependencies must be installed. Detailed instructions for both basic and advanced setups can be found in the official documentation.',\n",
" ('\\n\\nTo configure the Acme python client, follow the official documentation which includes setting up environment variables and installing dependencies for both basic and advanced setups.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" (' The Acme python client is compatible with Python 3.6+ and multiple databases, including MySQL, PostgreSQL, and MongoDB. It is also suitable for cross-language projects with Node.js.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" (' The Acme python client may have installation, package conflicts, and connectivity issues. Troubleshooting involves checking the Python environment, dependencies, and log files, with specific error resolutions available in the online help section.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" (' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be beneficial for developers. This new version includes features like async support and improved error handling.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" ('\\n\\nTo configure the Acme python client, environment variables must be set up and dependencies must be installed. Detailed instructions for both basic and advanced setups can be found in the official documentation.',\n",
" ('\\n\\nTo configure the Acme python client, follow the official documentation which includes setting up environment variables and installing dependencies for both basic and advanced setups.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" (' The Acme python client is compatible with Python 3.6+ and multiple databases, including MySQL, PostgreSQL, and MongoDB. It is also suitable for cross-language projects with Node.js.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
" (' The Acme python client may have installation, package conflicts, and connectivity issues. Troubleshooting involves checking the Python environment, dependencies, and log files, with specific error resolutions available in the online help section.',\n",
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.'))]"
]
},
"execution_count": 6,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
Expand Down

0 comments on commit 44342ad

Please sign in to comment.