使用LangChain结合大模型开发一个翻译助手,直接解决难题,代码如下:
from langchain_ollama import ChatOllama
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
import langchain
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
class BuildTranslationAssistant:
def __init__(self):
print("加载模型中...")
self.llm = ChatOllama(model="qwen2.5:7b",callbacks=[StreamingStdOutCallbackHandler()]) # 加载本地模型且进行流式输出
print("加载模型成功...")
# 构建提示词
template = "你是一个翻译助理,请将用户输入的内容由{input_language}直接翻译为{output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
self.chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt,human_message_prompt])
def translate(self,text):
print("开始翻译...")
print("用户输入:\n",text)
# 如果去掉content将看到一个message对象所有的字符串
# 设置调试模式,会打印出调用的详细信息
langchain.debug = True
#langchain.verbose = True
#set_debug(True)
#print("AI:\n",self.llm.invoke(self.chat_prompt.format_prompt(input_language="英语",output_language="中文",text=text).to_messages()).content)
# 流式输出会在回调处理器中处理,这里不需要再打印内容
print("AI:")
self.llm.invoke(self.chat_prompt.format_prompt(input_language="英语", output_language="中文", text=text).to_messages())
print("翻译结束...")
if __name__ == "__main__":
assistant = BuildTranslationAssistant()
text = "Gemma is a lightweight, family of models from Google built on Gemini technology. The Gemma 3 models are multimodal—processing text and images—and feature a 128K context window with support for over 140 languages. Available in 1B, 4B, 12B, and 27B parameter sizes, they excel in tasks like question answering, summarization, and reasoning, while their compact design allows deployment on resource-limited devices."
assistant.translate(text)