langchain-chat-with-milvus/milvus_question.py
2023-11-13 20:23:15 +08:00

54 lines
1.6 KiB
Python

import openai
from langchain.embeddings.openai import OpenAIEmbeddings
# from langchain.output_parsers import CommaSeparatedListOutputParser
# from langchain.prompts import PromptTemplate
from langchain.vectorstores import Milvus
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.llms import OpenAI
question = input("请输入问题:")
question += " reply in spoken language "
# question = "这个 yarn 为什么会发生错误,该怎么解决?使用中文回复"
MILVUS_HOST = "127.0.0.1"
MILVUS_PORT = "19530"
# 准备嵌入模型
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
vector_db: Milvus = Milvus(
embedding_function=embeddings,
connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT},
collection_name="todos",
)
print("正在从向量数据库中搜索...")
docs = vector_db.similarity_search(query=question)
f = open("question_docs.txt", "w")
f.write(str(docs))
f.close()
# print(docs)
# exit(0)
# load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="map_reduce", return_intermediate_steps=True, verbose=True)
# print("正在调用 LLM...")
# chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="map_reduce", return_intermediate_steps=True, verbose=True)
print("正在调用 LLM...")
# # load_qa_with_sources_chain with custom prompt
chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="map_reduce", return_intermediate_steps=False,
verbose=False)
output = chain({"input_documents": docs, "question": question}, return_only_outputs=True)
print("回复:" + output["output_text"])
#
#