Skip to content

Commit

Permalink
Updating SDK to handle instructions in evaluation and continuous moni…
Browse files Browse the repository at this point in the history
…toring. (#28)

* Initial commit

Adding the Aimon Rely README, images, the postman collection, a simple client and examples.

A few small changes for error handling in the client and the example application.

Getting the Aimon API key from the streamlit app

updating README

Updating langchain example gif

Updating API endpoint

Adding V2 API with support for conciseness, completeness and toxicity checks (#1)

* Adding V2 API with support for conciseness, completeness and toxicity checks.

* Removing prints and updating config for the example application.

* Updating README

---------

Co-authored-by: Preetam Joshi <info@aimon.ai>

Updating postman collection

Fixed the simple aimon client's handling of batch requests.

Updated postman collection. Added support for a user_query parameter in the input data dictionary.

Updating readme

Fixed bug in the example app

Uploading client code

Adding more convenience APIs

Fixing bug in create_dataset

Added Github actions config to publish to PyPI. Cleaned up dependencies and updated documentation.

Fixing langchain example

Fixing doc links

Formatting changes

Changes for aimon-rely

* Adding instruction adherence and hallucination v0.2 to the client

Updating git ignore

Adding more to gitignore

Removing .idea files

* Fixing doc string

* Updating documentation

* Updating Client to use V3 API

* Fixing test

* Updating tests

* Updating documentation in the client

* Adding .streamlit dir to .gitignore

* initial version of decorators for syntactic sugar

* A few more changes

* updating analyze and detect decorators

* Adding new notebooks

* Fixing bug in analyze decorator

* Updating Detect decorator to make it simpler. Adding Metaflow example. Adding documentation for the chatbot.

* fixing chatbot example

* Fixed issue in detect decorator. Improved code organization.

* fixed typo

* Updated the decorators with a more cleaner interface. Added a metaflow analyze example.

* Updated version

* Updated Notebook

* Fixing context parsing issue with analyze_eval decorator

* Updating application to production in the analyze_prod decorator

* Updating SDK to handle instructions in evaluation and continuous monitoring.

* Deleting old notebook

* Fixing usability issues in the chatbot. Organizing examples a bit better.

---------

Co-authored-by: Preetam Joshi <info@aimon.ai>
  • Loading branch information
pjoshi30 and Preetam Joshi authored Aug 11, 2024
1 parent a1d4ddc commit 70e0b50
Show file tree
Hide file tree
Showing 32 changed files with 528 additions and 674 deletions.
4 changes: 1 addition & 3 deletions aimon/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from . import types
from ._types import NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes
from ._utils import file_from_path
Expand Down Expand Up @@ -62,7 +60,7 @@
"DEFAULT_MAX_RETRIES",
"DEFAULT_CONNECTION_LIMITS",
"DefaultHttpxClient",
"DefaultAsyncHttpxClient"
"DefaultAsyncHttpxClient",
]

_setup_logging()
Expand Down
71 changes: 29 additions & 42 deletions aimon/_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,16 +124,14 @@ def __init__(
self,
*,
url: URL,
) -> None:
...
) -> None: ...

@overload
def __init__(
self,
*,
params: Query,
) -> None:
...
) -> None: ...

def __init__(
self,
Expand Down Expand Up @@ -166,8 +164,7 @@ def has_next_page(self) -> bool:
return False
return self.next_page_info() is not None

def next_page_info(self) -> Optional[PageInfo]:
...
def next_page_info(self) -> Optional[PageInfo]: ...

def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
...
Expand Down Expand Up @@ -903,8 +900,7 @@ def request(
*,
stream: Literal[True],
stream_cls: Type[_StreamT],
) -> _StreamT:
...
) -> _StreamT: ...

@overload
def request(
Expand All @@ -914,8 +910,7 @@ def request(
remaining_retries: Optional[int] = None,
*,
stream: Literal[False] = False,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
def request(
Expand All @@ -926,8 +921,7 @@ def request(
*,
stream: bool = False,
stream_cls: Type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
...
) -> ResponseT | _StreamT: ...

def request(
self,
Expand Down Expand Up @@ -1049,6 +1043,7 @@ def _request(
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=options.get_max_retries(self.max_retries) - retries,
)

def _retry_request(
Expand Down Expand Up @@ -1090,6 +1085,7 @@ def _process_response(
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
origin = get_origin(cast_to) or cast_to

Expand All @@ -1107,6 +1103,7 @@ def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

Expand All @@ -1120,6 +1117,7 @@ def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
Expand Down Expand Up @@ -1152,8 +1150,7 @@ def get(
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
def get(
Expand All @@ -1164,8 +1161,7 @@ def get(
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_StreamT],
) -> _StreamT:
...
) -> _StreamT: ...

@overload
def get(
Expand All @@ -1176,8 +1172,7 @@ def get(
options: RequestOptions = {},
stream: bool,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
...
) -> ResponseT | _StreamT: ...

def get(
self,
Expand All @@ -1203,8 +1198,7 @@ def post(
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[False] = False,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
def post(
Expand All @@ -1217,8 +1211,7 @@ def post(
files: RequestFiles | None = None,
stream: Literal[True],
stream_cls: type[_StreamT],
) -> _StreamT:
...
) -> _StreamT: ...

@overload
def post(
Expand All @@ -1231,8 +1224,7 @@ def post(
files: RequestFiles | None = None,
stream: bool,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
...
) -> ResponseT | _StreamT: ...

def post(
self,
Expand Down Expand Up @@ -1465,8 +1457,7 @@ async def request(
*,
stream: Literal[False] = False,
remaining_retries: Optional[int] = None,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
async def request(
Expand All @@ -1477,8 +1468,7 @@ async def request(
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
remaining_retries: Optional[int] = None,
) -> _AsyncStreamT:
...
) -> _AsyncStreamT: ...

@overload
async def request(
Expand All @@ -1489,8 +1479,7 @@ async def request(
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
remaining_retries: Optional[int] = None,
) -> ResponseT | _AsyncStreamT:
...
) -> ResponseT | _AsyncStreamT: ...

async def request(
self,
Expand Down Expand Up @@ -1610,6 +1599,7 @@ async def _request(
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=options.get_max_retries(self.max_retries) - retries,
)

async def _retry_request(
Expand Down Expand Up @@ -1649,6 +1639,7 @@ async def _process_response(
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
origin = get_origin(cast_to) or cast_to

Expand All @@ -1666,6 +1657,7 @@ async def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

Expand All @@ -1679,6 +1671,7 @@ async def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
Expand All @@ -1701,8 +1694,7 @@ async def get(
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
async def get(
Expand All @@ -1713,8 +1705,7 @@ async def get(
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
) -> _AsyncStreamT:
...
) -> _AsyncStreamT: ...

@overload
async def get(
Expand All @@ -1725,8 +1716,7 @@ async def get(
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
...
) -> ResponseT | _AsyncStreamT: ...

async def get(
self,
Expand All @@ -1750,8 +1740,7 @@ async def post(
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT:
...
) -> ResponseT: ...

@overload
async def post(
Expand All @@ -1764,8 +1753,7 @@ async def post(
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
) -> _AsyncStreamT:
...
) -> _AsyncStreamT: ...

@overload
async def post(
Expand All @@ -1778,8 +1766,7 @@ async def post(
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
...
) -> ResponseT | _AsyncStreamT: ...

async def post(
self,
Expand Down
29 changes: 12 additions & 17 deletions aimon/_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import pydantic
from pydantic.fields import FieldInfo

from ._types import StrBytesIntFloat
from ._types import IncEx, StrBytesIntFloat

_T = TypeVar("_T")
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
Expand Down Expand Up @@ -133,17 +133,20 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
def model_dump(
model: pydantic.BaseModel,
*,
exclude: IncEx = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
) -> dict[str, Any]:
if PYDANTIC_V2:
return model.model_dump(
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
),
Expand All @@ -159,22 +162,19 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
# generic models
if TYPE_CHECKING:

class GenericModel(pydantic.BaseModel):
...
class GenericModel(pydantic.BaseModel): ...

else:
if PYDANTIC_V2:
# there no longer needs to be a distinction in v2 but
# we still have to create our own subclass to avoid
# inconsistent MRO ordering errors
class GenericModel(pydantic.BaseModel):
...
class GenericModel(pydantic.BaseModel): ...

else:
import pydantic.generics

class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
...
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...


# cached properties
Expand All @@ -193,26 +193,21 @@ class typed_cached_property(Generic[_T]):
func: Callable[[Any], _T]
attrname: str | None

def __init__(self, func: Callable[[Any], _T]) -> None:
...
def __init__(self, func: Callable[[Any], _T]) -> None: ...

@overload
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
...
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...

@overload
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
...
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...

def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
raise NotImplementedError()

def __set_name__(self, owner: type[Any], name: str) -> None:
...
def __set_name__(self, owner: type[Any], name: str) -> None: ...

# __set__ is not defined at runtime, but @cached_property is designed to be settable
def __set__(self, instance: object, value: _T) -> None:
...
def __set__(self, instance: object, value: _T) -> None: ...
else:
try:
from functools import cached_property as cached_property
Expand Down
Loading

0 comments on commit 70e0b50

Please sign in to comment.