import os
from langchain_openai import ChatOpenAI
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
prompt = ChatPromptTemplate(
[
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
OPENAIKEY = os.getenv("OPENAI_API_KEY") # 建议更名llmparam = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=30, # 建议设置合理超时max_retries=2,
api_key=OPENAIKEY,
base_url="https://api.openai-proxy.org/v1",
)
legacy_chain = LLMChain(
llm=llmparam,
prompt=prompt,
memory=memory,
)
print(legacy_result)
print(legacy_result["text"])
legacy_result = legacy_chain.invoke({"text": "my age is 23"})
print(legacy_result)
print(legacy_result["text"])
legacy_result = legacy_chain.invoke({"text": "what was my name"})
print(legacy_result["text"])
legacy_result = legacy_chain.invoke({"text": "what was my age ? "})
print(legacy_result["text"])
legacy_result = legacy_chain.invoke({"text": "what was my father's name ?"})
print(legacy_result["text"])
输出:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
{
"text": "Hi Bob! Nice to meet you. How can I assist you today? 😊",
"chat_history": [
{
"type": "HumanMessage",
"content": "my name is bob",
"additional_kwargs": {},
"response_metadata": {}
},
{
"type": "AIMessage",
"content": "Hi Bob! Nice to meet you. How can I assist you today? 😊",
"additional_kwargs": {},
"response_metadata": {}
}
]
}
Hi Bob! Nice to meet you. How can I assist you today? 😊
{
"text": "That's awesome, Bob! 23 is a great age—you're in the prime of exploring life and chasing your dreams. Is there anything specific you'd like to talk about or work on today? 😊",
"chat_history": [
{
"type": "HumanMessage",
"content": "my name is bob",
"additional_kwargs": {},
"response_metadata": {}
},
{
"type": "AIMessage",
"content": "Hi Bob! Nice to meet you. How can I assist you today? 😊",
"additional_kwargs": {},
"response_metadata": {}
},
{
"type": "HumanMessage",
"content": "my age is 23",
"additional_kwargs": {},
"response_metadata": {}
},
{
"type": "AIMessage",
"content": "That's awesome, Bob! 23 is a great age—you're in the prime of exploring life and chasing your dreams. Is there anything specific you'd like to talk about or work on today? 😊",
"additional_kwargs": {},
"response_metadata": {}
}
]
}
That’s awesome, Bob! 23 is a great age—you’re in the prime of exploring life and chasing your dreams.
Is there anything specific you’d like to talk about or work on today? 😊
Your name is Bob! 😊
Your age is 23! 😊
You haven’t mentioned your father’s name yet, Bob! Feel free to share if you’d like me to know. 😊
import uuid
import os
from IPython.display import Image, display
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
# Define a new graphworkflow = StateGraph(state_schema=MessagesState)
# Define a chat modelmodel = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=30, # 建议设置合理超时max_retries=2,
api_key=OPENAIKEY,
base_url="https://api.openai-proxy.org/v1",
)
OPENAIKEY = os.getenv("OPENAI_API_KEY")
# Define the function that calls the modeldefcall_model(state: MessagesState):
response = model.invoke(state["messages"])
# We return a list, because this will get added to the existing listreturn {"messages": response}
# Define the two nodes we will cycle betweenworkflow.add_edge(START, "model")
workflow.add_node("model", call_model)
# Adding memory is straight forward in langgraph!memory = MemorySaver()
app = workflow.compile(
checkpointer=memory
)
# The thread id is a unique key that identifies# this particular conversation.# We'll just generate a random uuid here.# This enables a single application to manage conversations among multiple users.thread_id = uuid.uuid4()
config = {"configurable": {"thread_id": thread_id}}
input_message = HumanMessage(content="hi! I'm bob")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
# Here, let's confirm that the AI remembers our name!input_message = HumanMessage(content="what was my name?")
for event in app.stream({"messages": [input_message]}, config, stream_mode="values"):
event["messages"][-1].pretty_print()
输出:
================================ Human Message =================================
hi! I’m bob
================================== Ai Message ==================================
Hi Bob! 👋 How’s it going?
================================ Human Message =================================
what was my name?
================================== Ai Message ==================================