diff --git a/docs/assets/thumbnails/langchain_lcel.png b/docs/assets/thumbnails/langchain_lcel.png index f4dd469..f74a6bb 100644 Binary files a/docs/assets/thumbnails/langchain_lcel.png and b/docs/assets/thumbnails/langchain_lcel.png differ diff --git a/docs/assets/thumbnails/langchain_streaming_lcel_with_memory.png b/docs/assets/thumbnails/langchain_streaming_lcel_with_memory.png new file mode 100644 index 0000000..5fd5266 Binary files /dev/null and b/docs/assets/thumbnails/langchain_streaming_lcel_with_memory.png differ diff --git a/docs/assets/videos/langchain_lcel.mp4 b/docs/assets/videos/langchain_lcel.mp4 index ba51cd0..936faab 100644 Binary files a/docs/assets/videos/langchain_lcel.mp4 and b/docs/assets/videos/langchain_lcel.mp4 differ diff --git a/docs/assets/videos/langchain_streaming_lcel_with_memory.mp4 b/docs/assets/videos/langchain_streaming_lcel_with_memory.mp4 new file mode 100644 index 0000000..613433a Binary files /dev/null and b/docs/assets/videos/langchain_streaming_lcel_with_memory.mp4 differ diff --git a/docs/examples/langchain/langchain_streaming_lcel_with_memory.py b/docs/examples/langchain/langchain_streaming_lcel_with_memory.py new file mode 100644 index 0000000..abafea5 --- /dev/null +++ b/docs/examples/langchain/langchain_streaming_lcel_with_memory.py @@ -0,0 +1,56 @@ +""" +Demonstrates how to use the `ChatInterface` to create a chatbot using +[LangChain Expression Language](https://python.langchain.com/docs/expression_language/) (LCEL) +with streaming and memory. +""" + +from operator import itemgetter + +import panel as pn +from langchain.memory import ConversationSummaryBufferMemory +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.runnables import RunnableLambda, RunnablePassthrough +from langchain_openai import ChatOpenAI + +pn.extension() + +SYSTEM_PROMPT = "Try to be a silly comedian." + + +async def callback(contents, user, instance): + message = "" + inputs = {"input": contents} + async for token in chain.astream(inputs): + message += token + yield message + memory.save_context(inputs, {"output": message}) + + +model = ChatOpenAI(model="gpt-3.5-turbo") +memory = ConversationSummaryBufferMemory(return_messages=True, llm=model) +prompt = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_PROMPT), + MessagesPlaceholder(variable_name="history"), + ("human", "{input}"), + ] +) +output_parser = StrOutputParser() +chain = ( + RunnablePassthrough.assign( + history=RunnableLambda(memory.load_memory_variables) | itemgetter("history") + ) + | prompt + | model + | output_parser +) + +chat_interface = pn.chat.ChatInterface( + pn.chat.ChatMessage( + "Offer a topic and ChatGPT will try to be funny!", user="System" + ), + callback=callback, + callback_user="ChatGPT", +) +chat_interface.servable() diff --git a/tests/ui/user.py b/tests/ui/user.py index 5fef18c..b64d0e0 100644 --- a/tests/ui/user.py +++ b/tests/ui/user.py @@ -175,6 +175,14 @@ def langchain_lcel(page: Page): page.wait_for_timeout(5000) +def langchain_streaming_lcel_with_memory(page: Page): + chat = ChatInterface(page) + chat.send("Remember this number: 8. Be concise.") + page.wait_for_timeout(10000) + chat.send("What number did I just ask you to remember?") + page.wait_for_timeout(10000) + + def mistral_and_llama(page: Page): chat = ChatInterface(page) chat.send("What do you think about HoloViz in a single sentence?")